* [dpdk-dev] [PATCH] crypto/qat: add data-path APIs
@ 2020-06-12 14:39 Fan Zhang
2020-06-18 17:50 ` Trahe, Fiona
` (2 more replies)
0 siblings, 3 replies; 39+ messages in thread
From: Fan Zhang @ 2020-06-12 14:39 UTC (permalink / raw)
To: dev; +Cc: akhil.goyal, fiona.trahe, roy.fan.zhang, Piotr Bronowski
This patch adds data-path APIs to QAT symmetric dirver to support
raw data as input.
For applications/libraries that want to benefit from the data-path
encryption acceleration provided by QAT but not necessarily depends
on DPDK data-path structures (such as VPP), some performance
degradation is unavoidable to convert between their specific data
structure and DPDK cryptodev operation as well as mbufs.
This patch takes advantage of existing QAT implementations to form
symmetric data-path enqueue and dequeue APIs that support raw data
as input so that they can have wider usability towards those
applications/libraries without performance drop caused by the data
structure conversions. In the meantime the less performance-sensitive
cryptodev device and session management remains intact so that DPDK
cryptodev remains to be unified control path library for QAT.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
---
drivers/common/qat/Makefile | 4 +-
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/compress/qat/rte_pmd_qat_version.map | 11 +
drivers/crypto/qat/meson.build | 5 +
drivers/crypto/qat/qat_sym_frame.c | 294 +++++++++++++++++++
drivers/crypto/qat/qat_sym_frame.h | 237 +++++++++++++++
7 files changed, 555 insertions(+), 3 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_frame.c
create mode 100644 drivers/crypto/qat/qat_sym_frame.h
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 28bd5668f..3874f75ab 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -39,6 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
SRCS-y += qat_sym.c
SRCS-y += qat_sym_session.c
SRCS-y += qat_sym_pmd.c
+ SRCS-y += qat_sym_frame.c
+
build_qat = yes
endif
endif
@@ -62,7 +64,7 @@ ifdef build_qat
LDLIBS += -lrte_pci -lrte_bus_pci
# export include files
- SYMLINK-y-include +=
+ SYMLINK-y-include += qat_sym_frame.h
# versioning export map
EXPORT_MAP := ../../compress/qat/rte_pmd_qat_version.map
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 8e6dd04eb..06e2d8c8a 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
q->csr_tail = q->tail;
}
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
{
uint32_t old_head, new_head;
uint32_t max_head;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..8add1b049 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
void *op_cookie __rte_unused,
uint64_t *dequeue_err_count __rte_unused);
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
+
#endif /* _QAT_QP_H_ */
diff --git a/drivers/compress/qat/rte_pmd_qat_version.map b/drivers/compress/qat/rte_pmd_qat_version.map
index f9f17e4f6..a9160b157 100644
--- a/drivers/compress/qat/rte_pmd_qat_version.map
+++ b/drivers/compress/qat/rte_pmd_qat_version.map
@@ -1,3 +1,14 @@
DPDK_20.0 {
local: *;
};
+
+EXPERIMENTAL {
+ global:
+
+ qat_sym_get_qp;
+ qat_sym_enqueue_frame_aead;
+ qat_sym_enqueue_frame_cipher;
+ qat_sym_enqueue_frame_auth;
+ qat_sym_enqueue_frame_chain;
+ qat_sym_dequeue_frame;
+};
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index fc65923a7..8d53debcf 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -13,9 +13,14 @@ if dep.found()
qat_sources += files('qat_sym_pmd.c',
'qat_sym.c',
'qat_sym_session.c',
+ 'qat_sym_frame.c',
'qat_asym_pmd.c',
'qat_asym.c')
qat_ext_deps += dep
qat_cflags += '-DBUILD_QAT_SYM'
qat_cflags += '-DBUILD_QAT_ASYM'
+ headers = files(
+ 'qat_sym_frame.h',
+ )
+ use_function_versioning = true
endif
diff --git a/drivers/crypto/qat/qat_sym_frame.c b/drivers/crypto/qat/qat_sym_frame.c
new file mode 100644
index 000000000..27656c970
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_frame.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_sym_frame.h"
+#include "qat_qp.h"
+
+void *
+qat_sym_get_qp(uint8_t dev_id, uint16_t qp_id)
+{
+ struct rte_cryptodev *dev;
+ struct qat_qp *qp;
+ const char *drv_name;
+
+ /* make sure it is a QAT device */
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
+ return NULL;
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ drv_name = rte_cryptodev_driver_name_get(dev->driver_id);
+ if ((strncmp(drv_name, RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD),
+ sizeof(RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD))) != 0) ||
+ (qp_id > dev->data->nb_queue_pairs))
+ return NULL;
+
+ qp = dev->data->queue_pairs[qp_id];
+ if (qp->service_type != QAT_SERVICE_SYMMETRIC)
+ return NULL;
+
+ return (void *)qp;
+}
+
+int
+qat_sym_enqueue_frame_aead(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
+ struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
+ uint8_t *iv, rte_iova_t tag_iova, rte_iova_t aad_iova,
+ uint8_t is_first, uint8_t is_last, void *frame)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ register uint8_t *msg = (uint8_t *)tx_queue->base_addr +
+ tx_queue->tail;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req =
+ (struct icp_qat_fw_la_bulk_req *)msg;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ rte_mov128(msg, (const uint8_t *)&(ctx->fw_req));
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr = data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length = cipher_ofs +
+ cipher_len;
+
+ /* TODO: add support to non-gcm algorithms */
+ if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_GALOIS_128 &&
+ ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_GALOIS_64)
+ return -1;
+
+ /* since we know it is GCM, iv has to be 12 bytes */
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+
+ if (unlikely(is_first != 0))
+ req->comn_mid.opaque_data = (uintptr_t)frame;
+
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array, iv,
+ ctx->auth_iv.length);
+
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = aad_iova;
+ cipher_param->spc_auth_res_addr = tag_iova;
+ }
+
+ if (sgl) {
+ if (!n_sgl_vecs)
+ return -1;
+ /* TODO: sgl process */
+ } else {
+ cipher_param->cipher_offset = cipher_ofs;
+ cipher_param->cipher_length = cipher_len;
+ auth_param->auth_off = cipher_ofs;
+ auth_param->auth_len = cipher_len;
+ auth_param->u1.aad_adr = aad_iova;
+ auth_param->auth_res_addr = tag_iova;
+ }
+
+ tx_queue->tail = (tx_queue->tail + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+
+ if (unlikely(is_last != 0)) {
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ }
+
+ return 0;
+}
+
+int
+qat_sym_enqueue_frame_chain(__rte_unused void *qat_sym_qp,
+ __rte_unused struct rte_cryptodev_sym_session *session,
+ __rte_unused rte_iova_t data_iova,
+ __rte_unused uint32_t cipher_ofs,
+ __rte_unused uint32_t cipher_len,
+ __rte_unused uint32_t auth_ofs,
+ __rte_unused uint32_t auth_len,
+ __rte_unused struct rte_crypto_vec *sgl,
+ __rte_unused uint32_t n_sgl_vecs,
+ __rte_unused uint8_t *iv, __rte_unused rte_iova_t digest_iova,
+ __rte_unused uint8_t is_first,
+ __rte_unused uint8_t is_last, __rte_unused void *frame)
+{
+ /* TODO: implement the body */
+ return 0;
+}
+
+int
+qat_sym_enqueue_frame_cipher(__rte_unused void *qat_sym_qp,
+ __rte_unused struct rte_cryptodev_sym_session *session,
+ __rte_unused rte_iova_t data_iova,
+ __rte_unused uint32_t cipher_ofs,
+ __rte_unused uint32_t cipher_len,
+ __rte_unused struct rte_crypto_vec *sgl,
+ __rte_unused uint32_t n_sgl_vecs,
+ __rte_unused uint8_t *iv,
+ __rte_unused uint8_t is_first,
+ __rte_unused uint8_t is_last, __rte_unused void *frame)
+{
+ /* TODO: implement the body */
+ return 0;
+}
+
+int
+qat_sym_enqueue_frame_auth(__rte_unused void *qat_sym_qp,
+ __rte_unused struct rte_cryptodev_sym_session *session,
+ __rte_unused rte_iova_t data_iova,
+ __rte_unused uint32_t auth_ofs,
+ __rte_unused uint32_t auth_len,
+ __rte_unused struct rte_crypto_vec *sgl,
+ __rte_unused uint32_t n_sgl_vecs,
+ __rte_unused uint8_t *iv, __rte_unused rte_iova_t digest_iova,
+ __rte_unused uint8_t is_first,
+ __rte_unused uint8_t is_last, __rte_unused void *frame)
+{
+ /* TODO: implement the body */
+ return 0;
+}
+
+#define get_rx_queue_message_at_index(q, h, i) \
+ (void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
+ q->modulo_mask))
+
+static __rte_always_inline int
+qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
+{
+ return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status);
+}
+
+int
+qat_sym_dequeue_frame(void *qat_sym_qp, void **frame,
+ qat_qp_get_frame_n_element_t get_frame_n_elt,
+ uint32_t first_status_offset, uint32_t element_interval,
+ uint8_t element_status_success, uint8_t element_status_error)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp, *resp1, *resp2, *resp3;
+ void *f = NULL;
+ uint32_t n_elts, i;
+ uint8_t *status, *status1, *status2, *status3;
+ int n_fail = 0, n_fail1 = 0, n_fail2 = 0, n_fail3 = 0;
+ uint32_t head = rx_queue->head;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ /* if message is not processed, return 0 */
+ if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG) {
+ *frame = NULL;
+ return -1;
+ }
+
+ f = (void *)(uintptr_t)resp->opaque_data;
+ if (unlikely(f == NULL)) {
+ *frame = NULL;
+ return -1;
+ }
+
+ *frame = f;
+ status = (uint8_t *)f + first_status_offset;
+
+ n_elts = (*get_frame_n_elt)(f);
+ if (unlikely(n_elts == 0))
+ return -1;
+
+ /* process the first message */
+ if (qat_is_rx_msg_ok(resp))
+ *status = element_status_success;
+ else {
+ *status = element_status_error;
+ n_fail--;
+ }
+
+ status += element_interval;
+
+ /* fetch 4 messages in a loop */
+ for (i = 1; i < n_elts - 4; i += 4) {
+ resp = get_rx_queue_message_at_index(rx_queue, head, 0);
+ resp1 = get_rx_queue_message_at_index(rx_queue, head, 1);
+ resp2 = get_rx_queue_message_at_index(rx_queue, head, 2);
+ resp3 = get_rx_queue_message_at_index(rx_queue, head, 3);
+
+ status1 = status + element_interval;
+ status2 = status + element_interval * 2;
+ status3 = status + element_interval * 3;
+
+ if (qat_is_rx_msg_ok(resp))
+ *status = element_status_success;
+ else {
+ *status = element_status_error;
+ n_fail--;
+ }
+
+ if (qat_is_rx_msg_ok(resp1))
+ *status1 = element_status_success;
+ else {
+ *status1 = element_status_error;
+ n_fail1--;
+ }
+
+ if (qat_is_rx_msg_ok(resp2))
+ *status2 = element_status_success;
+ else {
+ *status2 = element_status_error;
+ n_fail2--;
+ }
+
+ if (qat_is_rx_msg_ok(resp3))
+ *status3 = element_status_success;
+ else {
+ *status3 = element_status_error;
+ n_fail3--;
+ }
+
+ i += 4;
+ status = status3 + element_interval;
+ head = (head + rx_queue->msg_size * 4) & rx_queue->modulo_mask;
+ }
+
+ for (; i < n_elts; i++) {
+ resp = get_rx_queue_message_at_index(rx_queue, head, 0);
+ if (qat_is_rx_msg_ok(resp))
+ *status = element_status_success;
+ else {
+ *status = element_status_error;
+ n_fail--;
+ }
+ status += element_interval;
+ head = (head + rx_queue->msg_size * 4) & rx_queue->modulo_mask;
+ }
+
+ /* update queue pair head */
+ rx_queue->head = (rx_queue->head + i * rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+ rx_queue->nb_processed_responses += i;
+ qp->dequeued += i;
+ qp->stats.dequeued_count += i;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+
+ return n_fail + n_fail1 + n_fail2 + n_fail3;
+}
diff --git a/drivers/crypto/qat/qat_sym_frame.h b/drivers/crypto/qat/qat_sym_frame.h
new file mode 100644
index 000000000..e378cacb8
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_frame.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_FRAME_H_
+#define _QAT_SYM_FRAME_H_
+
+#include <rte_common.h>
+
+/**
+ * Get the QAT queue pair based on device id and queue pair id.
+ * Checks if passed arguments are valid.
+ *
+ * @param dev_id
+ * cryptodev device id.
+ * @param qp_id
+ * queue pair id
+ * @return
+ * pointer to queue pair if passed parameters are valid.
+ * NULL pointer otherwise.
+ **/
+__rte_experimental
+void *
+qat_sym_get_qp(uint8_t devi_id, uint16_t qp_id);
+
+/**
+ * enqueue one AEAD operation into QAT queue
+ *
+ * @param qat_sym_qp
+ * queue pair data got from qat_sym_get_qp().
+ * @param session
+ * configured cryptodev symmetric session data.
+ * @param data_iova
+ * iova address of data.
+ * @param cipher_ofs
+ * cipher offset start from data_iova.
+ * @param cipher_len
+ * cipher total length.
+ * @param sgl
+ * in case of SGL data, pointer to an array of sgl structure.
+ * @param n_sgl_vecs
+ * number of SGL vectors in sgl array, 0 for non-sgl input.
+ * @param iv
+ * pointer to iv data.
+ * @param tag_iova
+ * Tag iova address.
+ * @param aad_iova
+ * AAD iova address.
+ * @param is_first
+ * 1 if it is the first operation in the frame.
+ * 0 otherwise.
+ * @param is_last
+ * 1 if the data is the last element in the frame.
+ * 0 otherwise.
+ * @param frame
+ * if is_first is set the frame pointer will be written in to the message.
+ *
+ * @return
+ * 0 if operation is successful, negative value if otherwise.
+ **/
+
+__rte_experimental
+int
+qat_sym_enqueue_frame_aead(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
+ struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
+ uint8_t *iv, rte_iova_t tag_iova, rte_iova_t aad_iova,
+ uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * enqueue one chaining operation (cipher and hash) into QAT queue
+ *
+ * @param qat_sym_qp
+ * queue pair data got from qat_sym_get_qp().
+ * @param session
+ * configured cryptodev symmetric session data.
+ * @param data_iova
+ * iova address of data.
+ * @param cipher_ofs
+ * cipher offset start from data_iova.
+ * @param cipher_len
+ * cipher total length.
+ * @param sgl
+ * in case of SGL data, pointer to an array of sgl structure.
+ * @param n_sgl_vecs
+ * number of SGL vectors in sgl array, 0 for non-sgl input.
+ * @param iv
+ * pointer to iv data.
+ * @param digest_iova
+ * Digest iova address.
+ * @param is_first
+ * 1 if it is the first operation in the frame so that opaque is to written
+ * into QAT queue message that can be retrieved upon dequeue.
+ * 0 otherwise.
+ * @param is_last
+ * 1 if the data is the last element in the frame, so that QAT queue tail
+ * is kicked and the HW will start processing
+ * 0 otherwise.
+ * @param opaque
+ * if is_first is set opaque will be written in to the message.
+ *
+ * @return
+ * 0 if operation is successful, negative value if otherwise.
+ **/
+__rte_experimental
+int
+qat_sym_enqueue_frame_chain(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
+ uint32_t auth_ofs, uint32_t auth_len,
+ struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
+ uint8_t *iv, rte_iova_t digest_iova,
+ uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * enqueue one cipher-only operation into QAT queue
+ *
+ * @param qat_sym_qp
+ * queue pair data got from qat_sym_get_qp().
+ * @param session
+ * configured cryptodev symmetric session data.
+ * @param data_iova
+ * iova address of data.
+ * @param cipher_ofs
+ * cipher offset start from data_iova.
+ * @param cipher_len
+ * cipher total length.
+ * @param sgl
+ * in case of SGL data, pointer to an array of sgl structure.
+ * @param n_sgl_vecs
+ * number of SGL vectors in sgl array, 0 for non-sgl input.
+ * @param iv
+ * pointer to iv data.
+ * @param is_first
+ * 1 if it is the first operation in the frame.
+ * 0 otherwise.
+ * @param is_last
+ * 1 if the data is the last element in the frame.
+ * 0 otherwise.
+ * @param frame
+ * if is_first is set the frame pointer will be written in to the message.
+ *
+ * @return
+ * 0 if operation is successful, negative value if otherwise.
+ **/
+
+__rte_experimental
+int
+qat_sym_enqueue_frame_cipher(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
+ struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
+ uint8_t *iv, uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * enqueue one auth-only operation into QAT queue
+ *
+ * @param qat_sym_qp
+ * queue pair data got from qat_sym_get_qp().
+ * @param session
+ * configured cryptodev symmetric session data.
+ * @param data_iova
+ * iova address of data.
+ * @param auth_ofs
+ * authentication offset start from data_iova.
+ * @param auth_len
+ * authentication total length.
+ * @param sgl
+ * in case of SGL data, pointer to an array of sgl structure.
+ * @param n_sgl_vecs
+ * number of SGL vectors in sgl array, 0 for non-sgl input.
+ * @param iv
+ * pointer to iv data.
+ * @param digest_iova
+ * digest iova address.
+ * @param is_first
+ * 1 if it is the first operation in the frame.
+ * 0 otherwise.
+ * @param is_last
+ * 1 if the data is the last element in the frame.
+ * 0 otherwise.
+ * @param frame
+ * if is_first is set the frame pointer will be written in to the message.
+ *
+ * @return
+ * 0 if operation is successful, negative value if otherwise.
+ **/
+
+__rte_experimental
+int
+qat_sym_enqueue_frame_auth(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ rte_iova_t data_iova, uint32_t auth_ofs, uint32_t auth_len,
+ struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
+ uint8_t *iv, rte_iova_t digest_iova,
+ uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * Function prototype to get the number of elements in a frame in dequeue.
+ * This function should be provided by the user.
+ **/
+typedef uint32_t (*qat_qp_get_frame_n_element_t)(void *frame);
+
+/**
+ * Dequeue a frame from QAT queue
+ *
+ * @param qat_sym_qp
+ * queue pair data got from qat_sym_get_qp().
+ * @param frame
+ * return the frame dequeued.
+ * @param get_frame_n_elt
+ * callback function that gets opaque_data from the first processed message.
+ * @param first_status_offset
+ * the offset to status field of first frame element..
+ * @param element_interval
+ * the size of frame element in the frame data, used to compute next
+ * status field.
+ * @param element_status_success
+ * value to set for successfully processed frame element.
+ * @param element_status_error
+ * value to set for unsuccessfully processed frame element.
+ *
+ * @return
+ * if a frame is retrieved from the queue pair it will be written
+ * into "frame" parameter, otherwise "frame" will be written as NULL and
+ * -1 will be returned. If all elements are successful 0 will be returned.
+ * Negative number of failed elements will be returned.
+ **/
+__rte_experimental
+int
+qat_sym_dequeue_frame(void *qat_sym_qp, void **frame,
+ qat_qp_get_frame_n_element_t get_frame_n_elt,
+ uint32_t first_status_offset, uint32_t element_interval,
+ uint8_t element_status_success, uint8_t element_status_error);
+
+#endif /* _QAT_SYM_FRAME_H_ */
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [PATCH] crypto/qat: add data-path APIs
2020-06-12 14:39 [dpdk-dev] [PATCH] crypto/qat: add data-path APIs Fan Zhang
@ 2020-06-18 17:50 ` Trahe, Fiona
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto " Fan Zhang
2020-06-26 6:55 ` [dpdk-dev] [PATCH] crypto/qat: add data-path APIs Jerin Jacob
2 siblings, 0 replies; 39+ messages in thread
From: Trahe, Fiona @ 2020-06-18 17:50 UTC (permalink / raw)
To: Zhang, Roy Fan, dev; +Cc: akhil.goyal, Bronowski, PiotrX, Trahe, Fiona
> -----Original Message-----
> From: Zhang, Roy Fan <roy.fan.zhang@intel.com>
> Sent: Friday, June 12, 2020 3:40 PM
> To: dev@dpdk.org
> Cc: akhil.goyal@nxp.com; Trahe, Fiona <fiona.trahe@intel.com>; Zhang, Roy Fan
> <roy.fan.zhang@intel.com>; Bronowski, PiotrX <piotrx.bronowski@intel.com>
> Subject: [PATCH] crypto/qat: add data-path APIs
>
> This patch adds data-path APIs to QAT symmetric dirver to support
> raw data as input.
>
> For applications/libraries that want to benefit from the data-path
> encryption acceleration provided by QAT but not necessarily depends
> on DPDK data-path structures (such as VPP), some performance
> degradation is unavoidable to convert between their specific data
> structure and DPDK cryptodev operation as well as mbufs.
>
> This patch takes advantage of existing QAT implementations to form
> symmetric data-path enqueue and dequeue APIs that support raw data
> as input so that they can have wider usability towards those
> applications/libraries without performance drop caused by the data
> structure conversions. In the meantime the less performance-sensitive
> cryptodev device and session management remains intact so that DPDK
> cryptodev remains to be unified control path library for QAT.
>
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
> ---
> drivers/common/qat/Makefile | 4 +-
> drivers/common/qat/qat_qp.c | 4 +-
> drivers/common/qat/qat_qp.h | 3 +
> drivers/compress/qat/rte_pmd_qat_version.map | 11 +
> drivers/crypto/qat/meson.build | 5 +
> drivers/crypto/qat/qat_sym_frame.c | 294 +++++++++++++++++++
> drivers/crypto/qat/qat_sym_frame.h | 237 +++++++++++++++
> 7 files changed, 555 insertions(+), 3 deletions(-)
> create mode 100644 drivers/crypto/qat/qat_sym_frame.c
> create mode 100644 drivers/crypto/qat/qat_sym_frame.h
>
> diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
> index 28bd5668f..3874f75ab 100644
> --- a/drivers/common/qat/Makefile
> +++ b/drivers/common/qat/Makefile
> @@ -39,6 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
> SRCS-y += qat_sym.c
> SRCS-y += qat_sym_session.c
> SRCS-y += qat_sym_pmd.c
> + SRCS-y += qat_sym_frame.c
> +
> build_qat = yes
> endif
> endif
> @@ -62,7 +64,7 @@ ifdef build_qat
> LDLIBS += -lrte_pci -lrte_bus_pci
>
> # export include files
> - SYMLINK-y-include +=
> + SYMLINK-y-include += qat_sym_frame.h
>
> # versioning export map
> EXPORT_MAP := ../../compress/qat/rte_pmd_qat_version.map
> diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
> index 8e6dd04eb..06e2d8c8a 100644
> --- a/drivers/common/qat/qat_qp.c
> +++ b/drivers/common/qat/qat_qp.c
> @@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
> q->csr_tail = q->tail;
> }
>
> -static inline
> -void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
> +void
> +rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
> {
> uint32_t old_head, new_head;
> uint32_t max_head;
> diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
> index 575d69059..8add1b049 100644
> --- a/drivers/common/qat/qat_qp.h
> +++ b/drivers/common/qat/qat_qp.h
> @@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp
> __rte_unused,
> void *op_cookie __rte_unused,
> uint64_t *dequeue_err_count __rte_unused);
>
> +void
> +rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
> +
> #endif /* _QAT_QP_H_ */
> diff --git a/drivers/compress/qat/rte_pmd_qat_version.map
> b/drivers/compress/qat/rte_pmd_qat_version.map
> index f9f17e4f6..a9160b157 100644
> --- a/drivers/compress/qat/rte_pmd_qat_version.map
> +++ b/drivers/compress/qat/rte_pmd_qat_version.map
> @@ -1,3 +1,14 @@
> DPDK_20.0 {
> local: *;
> };
> +
> +EXPERIMENTAL {
> + global:
> +
> + qat_sym_get_qp;
> + qat_sym_enqueue_frame_aead;
> + qat_sym_enqueue_frame_cipher;
> + qat_sym_enqueue_frame_auth;
> + qat_sym_enqueue_frame_chain;
> + qat_sym_dequeue_frame;
> +};
> diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
> index fc65923a7..8d53debcf 100644
> --- a/drivers/crypto/qat/meson.build
> +++ b/drivers/crypto/qat/meson.build
> @@ -13,9 +13,14 @@ if dep.found()
> qat_sources += files('qat_sym_pmd.c',
> 'qat_sym.c',
> 'qat_sym_session.c',
> + 'qat_sym_frame.c',
> 'qat_asym_pmd.c',
> 'qat_asym.c')
> qat_ext_deps += dep
> qat_cflags += '-DBUILD_QAT_SYM'
> qat_cflags += '-DBUILD_QAT_ASYM'
> + headers = files(
> + 'qat_sym_frame.h',
> + )
> + use_function_versioning = true
> endif
> diff --git a/drivers/crypto/qat/qat_sym_frame.c b/drivers/crypto/qat/qat_sym_frame.c
> new file mode 100644
> index 000000000..27656c970
> --- /dev/null
> +++ b/drivers/crypto/qat/qat_sym_frame.c
> @@ -0,0 +1,294 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2015-2019 Intel Corporation
> + */
> +
> +#include <rte_cryptodev_pmd.h>
> +
> +#include "adf_transport_access_macros.h"
> +#include "icp_qat_fw.h"
> +#include "icp_qat_fw_la.h"
> +
> +#include "qat_sym_pmd.h"
> +#include "qat_sym_session.h"
> +#include "qat_sym_frame.h"
> +#include "qat_qp.h"
> +
> +void *
> +qat_sym_get_qp(uint8_t dev_id, uint16_t qp_id)
> +{
> + struct rte_cryptodev *dev;
> + struct qat_qp *qp;
> + const char *drv_name;
> +
> + /* make sure it is a QAT device */
> + if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
> + return NULL;
[Fiona] I'd suggest using the new API just pushed here:
https://patches.dpdk.org/patch/71211/
Then you can leave out the above check and the nb_queue_pairs check below.
> + dev = rte_cryptodev_pmd_get_dev(dev_id);
> + drv_name = rte_cryptodev_driver_name_get(dev->driver_id);
> + if ((strncmp(drv_name, RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD),
> + sizeof(RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD))) != 0) ||
> + (qp_id > dev->data->nb_queue_pairs))
> + return NULL;
> +
> + qp = dev->data->queue_pairs[qp_id];
[Fiona] a null check would be useful before dereferencing qp below,
but if you use the fn above then no need.
> + if (qp->service_type != QAT_SERVICE_SYMMETRIC)
> + return NULL;
> +
> + return (void *)qp;
> +}
> +
[Fiona] Add function hdr with limitations/assumptions,
e.g. assumes in-place,
limit on how big the frame can be, relative to queue size,
the frame APis must be used for enqueue /dequeu and can't be
mixed with the other enqueue/dequeue APIs.
Also explanation of frame param, etc
> +int
> +qat_sym_enqueue_frame_aead(void *qat_sym_qp,
> + struct rte_cryptodev_sym_session *session,
> + rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
> + struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
[Fiona] could pass in an rte_crypto_sgl instead of both of these.
> + uint8_t *iv, rte_iova_t tag_iova, rte_iova_t aad_iova,
> + uint8_t is_first, uint8_t is_last, void *frame)
> +{
> + struct qat_qp *qp = qat_sym_qp;
> + struct qat_queue *tx_queue = &qp->tx_q;
> + register uint8_t *msg = (uint8_t *)tx_queue->base_addr +
> + tx_queue->tail;
> + struct qat_sym_session *ctx;
> + struct icp_qat_fw_la_bulk_req *req =
> + (struct icp_qat_fw_la_bulk_req *)msg;
> + struct icp_qat_fw_la_cipher_req_params *cipher_param;
> + struct icp_qat_fw_la_auth_req_params *auth_param;
> +
> + ctx = (struct qat_sym_session *)get_sym_session_private_data(
> + session, cryptodev_qat_driver_id);
> + rte_mov128(msg, (const uint8_t *)&(ctx->fw_req));
> +
> + cipher_param = (void *)&req->serv_specif_rqpars;
> + auth_param = (void *)((uint8_t *)cipher_param +
> + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
> + req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr = data_iova;
> + req->comn_mid.src_length = req->comn_mid.dst_length = cipher_ofs +
> + cipher_len;
> +
> + /* TODO: add support to non-gcm algorithms */
> + if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_GALOIS_128 &&
> + ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_GALOIS_64)
> + return -1;
> +
> + /* since we know it is GCM, iv has to be 12 bytes */
> + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
> + req->comn_hdr.serv_specif_flags,
> + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
> +
> + if (unlikely(is_first != 0))
> + req->comn_mid.opaque_data = (uintptr_t)frame;
> +
> + rte_memcpy_generic(cipher_param->u.cipher_IV_array, iv,
> + ctx->auth_iv.length);
> +
> + if (ctx->is_single_pass) {
> + cipher_param->spc_aad_addr = aad_iova;
> + cipher_param->spc_auth_res_addr = tag_iova;
> + }
> +
> + if (sgl) {
> + if (!n_sgl_vecs)
> + return -1;
> + /* TODO: sgl process */
> + } else {
> + cipher_param->cipher_offset = cipher_ofs;
> + cipher_param->cipher_length = cipher_len;
> + auth_param->auth_off = cipher_ofs;
> + auth_param->auth_len = cipher_len;
> + auth_param->u1.aad_adr = aad_iova;
> + auth_param->auth_res_addr = tag_iova;
> + }
> +
> + tx_queue->tail = (tx_queue->tail + tx_queue->msg_size) &
> + tx_queue->modulo_mask;
> +
> + if (unlikely(is_last != 0)) {
> + qp->enqueued++;
> + qp->stats.enqueued_count++;
[Fiona] This needs to increment by the num descriptors sent. Frame size.
Either need to keep track of this or calculate it here based on tail and orig tail
Should make sure the ring is not filled and overwrapped is is_last never set.
But avoid an extra check on data path - and at least document it in fn hdr.
> + WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
> + tx_queue->hw_bundle_number,
> + tx_queue->hw_queue_number,
> + tx_queue->tail);
> + tx_queue->csr_tail = tx_queue->tail;
> + }
> +
> + return 0;
> +}
> +
> +int
> +qat_sym_enqueue_frame_chain(__rte_unused void *qat_sym_qp,
> + __rte_unused struct rte_cryptodev_sym_session *session,
> + __rte_unused rte_iova_t data_iova,
> + __rte_unused uint32_t cipher_ofs,
> + __rte_unused uint32_t cipher_len,
> + __rte_unused uint32_t auth_ofs,
> + __rte_unused uint32_t auth_len,
> + __rte_unused struct rte_crypto_vec *sgl,
> + __rte_unused uint32_t n_sgl_vecs,
> + __rte_unused uint8_t *iv, __rte_unused rte_iova_t digest_iova,
> + __rte_unused uint8_t is_first,
> + __rte_unused uint8_t is_last, __rte_unused void *frame)
> +{
> + /* TODO: implement the body */
> + return 0;
> +}
> +
> +int
> +qat_sym_enqueue_frame_cipher(__rte_unused void *qat_sym_qp,
> + __rte_unused struct rte_cryptodev_sym_session *session,
> + __rte_unused rte_iova_t data_iova,
> + __rte_unused uint32_t cipher_ofs,
> + __rte_unused uint32_t cipher_len,
> + __rte_unused struct rte_crypto_vec *sgl,
> + __rte_unused uint32_t n_sgl_vecs,
> + __rte_unused uint8_t *iv,
> + __rte_unused uint8_t is_first,
> + __rte_unused uint8_t is_last, __rte_unused void *frame)
> +{
> + /* TODO: implement the body */
> + return 0;
> +}
> +
> +int
> +qat_sym_enqueue_frame_auth(__rte_unused void *qat_sym_qp,
> + __rte_unused struct rte_cryptodev_sym_session *session,
> + __rte_unused rte_iova_t data_iova,
> + __rte_unused uint32_t auth_ofs,
> + __rte_unused uint32_t auth_len,
> + __rte_unused struct rte_crypto_vec *sgl,
> + __rte_unused uint32_t n_sgl_vecs,
> + __rte_unused uint8_t *iv, __rte_unused rte_iova_t digest_iova,
> + __rte_unused uint8_t is_first,
> + __rte_unused uint8_t is_last, __rte_unused void *frame)
> +{
> + /* TODO: implement the body */
> + return 0;
> +}
> +
> +#define get_rx_queue_message_at_index(q, h, i) \
> + (void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
> + q->modulo_mask))
> +
> +static __rte_always_inline int
> +qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
> +{
> + return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
> + ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
> + resp_msg->comn_hdr.comn_status);
> +}
> +
> +int
> +qat_sym_dequeue_frame(void *qat_sym_qp, void **frame,
> + qat_qp_get_frame_n_element_t get_frame_n_elt,
> + uint32_t first_status_offset, uint32_t element_interval,
> + uint8_t element_status_success, uint8_t element_status_error)
> +{
> + struct qat_qp *qp = qat_sym_qp;
> + struct qat_queue *rx_queue = &qp->rx_q;
> + struct icp_qat_fw_comn_resp *resp, *resp1, *resp2, *resp3;
> + void *f = NULL;
> + uint32_t n_elts, i;
> + uint8_t *status, *status1, *status2, *status3;
> + int n_fail = 0, n_fail1 = 0, n_fail2 = 0, n_fail3 = 0;
> + uint32_t head = rx_queue->head;
> +
> + resp = (struct icp_qat_fw_comn_resp *)(
> + (uint8_t *)rx_queue->base_addr + head);
> +
> + /* if message is not processed, return 0 */
> + if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG) {
> + *frame = NULL;
> + return -1;
> + }
> +
> + f = (void *)(uintptr_t)resp->opaque_data;
> + if (unlikely(f == NULL)) {
> + *frame = NULL;
> + return -1;
> + }
> +
> + *frame = f;
> + status = (uint8_t *)f + first_status_offset;
> +
> + n_elts = (*get_frame_n_elt)(f);
> + if (unlikely(n_elts == 0))
> + return -1;
[Fiona] I'd move this check up before the previous 2 lines.
But is there ever a case of 0. Surely this can be caught on the enqueue and no check needed here?
> +
> + /* process the first message */
> + if (qat_is_rx_msg_ok(resp))
> + *status = element_status_success;
> + else {
> + *status = element_status_error;
> + n_fail--;
> + }
> +
[Fiona] isn't there a step missing? Need to make sure the whole frame is available before processing any reponses.
> + status += element_interval;
> +
> + /* fetch 4 messages in a loop */
> + for (i = 1; i < n_elts - 4; i += 4) {
> + resp = get_rx_queue_message_at_index(rx_queue, head, 0);
> + resp1 = get_rx_queue_message_at_index(rx_queue, head, 1);
> + resp2 = get_rx_queue_message_at_index(rx_queue, head, 2);
> + resp3 = get_rx_queue_message_at_index(rx_queue, head, 3);
> +
> + status1 = status + element_interval;
> + status2 = status + element_interval * 2;
> + status3 = status + element_interval * 3;
> +
> + if (qat_is_rx_msg_ok(resp))
> + *status = element_status_success;
> + else {
> + *status = element_status_error;
> + n_fail--;
> + }
> +
> + if (qat_is_rx_msg_ok(resp1))
> + *status1 = element_status_success;
> + else {
> + *status1 = element_status_error;
> + n_fail1--;
> + }
> +
> + if (qat_is_rx_msg_ok(resp2))
> + *status2 = element_status_success;
> + else {
> + *status2 = element_status_error;
> + n_fail2--;
> + }
> +
> + if (qat_is_rx_msg_ok(resp3))
> + *status3 = element_status_success;
> + else {
> + *status3 = element_status_error;
> + n_fail3--;
> + }
> +
> + i += 4;
> + status = status3 + element_interval;
> + head = (head + rx_queue->msg_size * 4) & rx_queue->modulo_mask;
> + }
> +
> + for (; i < n_elts; i++) {
> + resp = get_rx_queue_message_at_index(rx_queue, head, 0);
> + if (qat_is_rx_msg_ok(resp))
> + *status = element_status_success;
> + else {
> + *status = element_status_error;
> + n_fail--;
> + }
> + status += element_interval;
> + head = (head + rx_queue->msg_size * 4) & rx_queue->modulo_mask;
> + }
> +
> + /* update queue pair head */
> + rx_queue->head = (rx_queue->head + i * rx_queue->msg_size) &
> + rx_queue->modulo_mask;
[Fiona] is this necessary? can head be different than the local var?
It may be on the other path due to the head coalescing. But think
you can leave that feature out here.
> + rx_queue->nb_processed_responses += i;
> + qp->dequeued += i;
> + qp->stats.dequeued_count += i;
> + if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
[Fiona] No if needed here. Shouldn't be necessary in frame case.
> + rxq_free_desc(qp, rx_queue);
> +
> + return n_fail + n_fail1 + n_fail2 + n_fail3;
> +}
> diff --git a/drivers/crypto/qat/qat_sym_frame.h b/drivers/crypto/qat/qat_sym_frame.h
> new file mode 100644
> index 000000000..e378cacb8
> --- /dev/null
> +++ b/drivers/crypto/qat/qat_sym_frame.h
> @@ -0,0 +1,237 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2015-2018 Intel Corporation
> + */
> +
> +#ifndef _QAT_SYM_FRAME_H_
> +#define _QAT_SYM_FRAME_H_
> +
> +#include <rte_common.h>
> +
> +/**
> + * Get the QAT queue pair based on device id and queue pair id.
> + * Checks if passed arguments are valid.
> + *
> + * @param dev_id
> + * cryptodev device id.
> + * @param qp_id
> + * queue pair id
> + * @return
> + * pointer to queue pair if passed parameters are valid.
> + * NULL pointer otherwise.
> + **/
> +__rte_experimental
> +void *
> +qat_sym_get_qp(uint8_t devi_id, uint16_t qp_id);
> +
> +/**
> + * enqueue one AEAD operation into QAT queue
> + *
> + * @param qat_sym_qp
> + * queue pair data got from qat_sym_get_qp().
> + * @param session
> + * configured cryptodev symmetric session data.
> + * @param data_iova
> + * iova address of data.
> + * @param cipher_ofs
> + * cipher offset start from data_iova.
> + * @param cipher_len
> + * cipher total length.
> + * @param sgl
> + * in case of SGL data, pointer to an array of sgl structure.
> + * @param n_sgl_vecs
> + * number of SGL vectors in sgl array, 0 for non-sgl input.
> + * @param iv
> + * pointer to iv data.
> + * @param tag_iova
> + * Tag iova address.
> + * @param aad_iova
> + * AAD iova address.
> + * @param is_first
> + * 1 if it is the first operation in the frame.
> + * 0 otherwise.
> + * @param is_last
> + * 1 if the data is the last element in the frame.
> + * 0 otherwise.
> + * @param frame
> + * if is_first is set the frame pointer will be written in to the message.
> + *
> + * @return
> + * 0 if operation is successful, negative value if otherwise.
> + **/
> +
> +__rte_experimental
> +int
> +qat_sym_enqueue_frame_aead(void *qat_sym_qp,
> + struct rte_cryptodev_sym_session *session,
> + rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
> + struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
> + uint8_t *iv, rte_iova_t tag_iova, rte_iova_t aad_iova,
> + uint8_t is_first, uint8_t is_last, void *frame);
> +
> +/**
> + * enqueue one chaining operation (cipher and hash) into QAT queue
> + *
> + * @param qat_sym_qp
> + * queue pair data got from qat_sym_get_qp().
> + * @param session
> + * configured cryptodev symmetric session data.
> + * @param data_iova
> + * iova address of data.
> + * @param cipher_ofs
> + * cipher offset start from data_iova.
> + * @param cipher_len
> + * cipher total length.
> + * @param sgl
> + * in case of SGL data, pointer to an array of sgl structure.
> + * @param n_sgl_vecs
> + * number of SGL vectors in sgl array, 0 for non-sgl input.
> + * @param iv
> + * pointer to iv data.
> + * @param digest_iova
> + * Digest iova address.
> + * @param is_first
> + * 1 if it is the first operation in the frame so that opaque is to written
> + * into QAT queue message that can be retrieved upon dequeue.
> + * 0 otherwise.
> + * @param is_last
> + * 1 if the data is the last element in the frame, so that QAT queue tail
> + * is kicked and the HW will start processing
> + * 0 otherwise.
> + * @param opaque
> + * if is_first is set opaque will be written in to the message.
> + *
> + * @return
> + * 0 if operation is successful, negative value if otherwise.
> + **/
> +__rte_experimental
> +int
> +qat_sym_enqueue_frame_chain(void *qat_sym_qp,
> + struct rte_cryptodev_sym_session *session,
> + rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
> + uint32_t auth_ofs, uint32_t auth_len,
> + struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
> + uint8_t *iv, rte_iova_t digest_iova,
> + uint8_t is_first, uint8_t is_last, void *frame);
> +
> +/**
> + * enqueue one cipher-only operation into QAT queue
> + *
> + * @param qat_sym_qp
> + * queue pair data got from qat_sym_get_qp().
> + * @param session
> + * configured cryptodev symmetric session data.
> + * @param data_iova
> + * iova address of data.
> + * @param cipher_ofs
> + * cipher offset start from data_iova.
> + * @param cipher_len
> + * cipher total length.
> + * @param sgl
> + * in case of SGL data, pointer to an array of sgl structure.
> + * @param n_sgl_vecs
> + * number of SGL vectors in sgl array, 0 for non-sgl input.
> + * @param iv
> + * pointer to iv data.
> + * @param is_first
> + * 1 if it is the first operation in the frame.
> + * 0 otherwise.
> + * @param is_last
> + * 1 if the data is the last element in the frame.
> + * 0 otherwise.
> + * @param frame
> + * if is_first is set the frame pointer will be written in to the message.
> + *
> + * @return
> + * 0 if operation is successful, negative value if otherwise.
> + **/
> +
> +__rte_experimental
> +int
> +qat_sym_enqueue_frame_cipher(void *qat_sym_qp,
> + struct rte_cryptodev_sym_session *session,
> + rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
> + struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
> + uint8_t *iv, uint8_t is_first, uint8_t is_last, void *frame);
> +
> +/**
> + * enqueue one auth-only operation into QAT queue
> + *
> + * @param qat_sym_qp
> + * queue pair data got from qat_sym_get_qp().
> + * @param session
> + * configured cryptodev symmetric session data.
> + * @param data_iova
> + * iova address of data.
> + * @param auth_ofs
> + * authentication offset start from data_iova.
> + * @param auth_len
> + * authentication total length.
> + * @param sgl
> + * in case of SGL data, pointer to an array of sgl structure.
> + * @param n_sgl_vecs
> + * number of SGL vectors in sgl array, 0 for non-sgl input.
> + * @param iv
> + * pointer to iv data.
> + * @param digest_iova
> + * digest iova address.
> + * @param is_first
> + * 1 if it is the first operation in the frame.
> + * 0 otherwise.
> + * @param is_last
> + * 1 if the data is the last element in the frame.
> + * 0 otherwise.
> + * @param frame
> + * if is_first is set the frame pointer will be written in to the message.
> + *
> + * @return
> + * 0 if operation is successful, negative value if otherwise.
> + **/
> +
> +__rte_experimental
> +int
> +qat_sym_enqueue_frame_auth(void *qat_sym_qp,
> + struct rte_cryptodev_sym_session *session,
> + rte_iova_t data_iova, uint32_t auth_ofs, uint32_t auth_len,
> + struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
> + uint8_t *iv, rte_iova_t digest_iova,
> + uint8_t is_first, uint8_t is_last, void *frame);
> +
> +/**
> + * Function prototype to get the number of elements in a frame in dequeue.
> + * This function should be provided by the user.
> + **/
> +typedef uint32_t (*qat_qp_get_frame_n_element_t)(void *frame);
> +
> +/**
> + * Dequeue a frame from QAT queue
> + *
> + * @param qat_sym_qp
> + * queue pair data got from qat_sym_get_qp().
> + * @param frame
> + * return the frame dequeued.
> + * @param get_frame_n_elt
> + * callback function that gets opaque_data from the first processed message.
> + * @param first_status_offset
> + * the offset to status field of first frame element..
> + * @param element_interval
> + * the size of frame element in the frame data, used to compute next
> + * status field.
> + * @param element_status_success
> + * value to set for successfully processed frame element.
> + * @param element_status_error
> + * value to set for unsuccessfully processed frame element.
> + *
> + * @return
> + * if a frame is retrieved from the queue pair it will be written
> + * into "frame" parameter, otherwise "frame" will be written as NULL and
> + * -1 will be returned. If all elements are successful 0 will be returned.
> + * Negative number of failed elements will be returned.
> + **/
> +__rte_experimental
> +int
> +qat_sym_dequeue_frame(void *qat_sym_qp, void **frame,
> + qat_qp_get_frame_n_element_t get_frame_n_elt,
> + uint32_t first_status_offset, uint32_t element_interval,
> + uint8_t element_status_success, uint8_t element_status_error);
> +
> +#endif /* _QAT_SYM_FRAME_H_ */
> --
> 2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto data-path APIs
2020-06-12 14:39 [dpdk-dev] [PATCH] crypto/qat: add data-path APIs Fan Zhang
2020-06-18 17:50 ` Trahe, Fiona
@ 2020-06-25 13:31 ` Fan Zhang
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 1/3] crypto/qat: add " Fan Zhang
` (4 more replies)
2020-06-26 6:55 ` [dpdk-dev] [PATCH] crypto/qat: add data-path APIs Jerin Jacob
2 siblings, 5 replies; 39+ messages in thread
From: Fan Zhang @ 2020-06-25 13:31 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang, Piotr Bronowski
This patch adds symmetric crypto data-path APIs for QAT PMD. QAT direct
symmetric crypto data-path APIs are a set of APIs that provide
more HW friendly enqueue/dequeue data-path functions as an alternative
approach to ``rte_cryptodev_enqueue_burst`` and
``rte_cryptodev_dequeue_burst``. The APIs are designed for external
libraries/applications that want to use QAT as symmetric crypto data-path
accelerator but with their own data structures. With the APIs the
cycle cost spent on conversion from their data structure to DPDK cryptodev
operations can be reduced, and the dependency to DPDK crypto operation
mempool can be reliefed.
It is expected that the user can develop close-to-native performance
symmetric crypto data-path implementations with the functions provided
in this patchset.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
v2:
- Used a structure to simplify parameters.
- Added unit tests.
- Added documentation.
Fan Zhang (3):
crypto/qat: add data-path APIs
test/crypto: add unit-test for QAT direct APIs
doc: add QAT direct APIs guide
app/test/Makefile | 4 +
app/test/meson.build | 1 +
app/test/test_cryptodev.c | 371 +++++++++-
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 +-
doc/guides/prog_guide/cryptodev_lib.rst | 272 +++++++
drivers/common/qat/Makefile | 8 +-
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/compress/qat/rte_pmd_qat_version.map | 11 +
drivers/crypto/qat/meson.build | 5 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_frame.c | 701 +++++++++++++++++++
drivers/crypto/qat/qat_sym_frame.h | 242 +++++++
14 files changed, 1636 insertions(+), 43 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_frame.c
create mode 100644 drivers/crypto/qat/qat_sym_frame.h
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v2 1/3] crypto/qat: add data-path APIs
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto " Fan Zhang
@ 2020-06-25 13:31 ` Fan Zhang
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 2/3] test/crypto: add unit-test for QAT direct APIs Fan Zhang
` (3 subsequent siblings)
4 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-06-25 13:31 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang, Piotr Bronowski
This patch adds data-path APIs to QAT symmetric dirver to support
raw data as input.
This patch depends on patch-72157 ("cryptodev: add function to check
if qp was setup")
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
---
drivers/common/qat/Makefile | 8 +-
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/compress/qat/rte_pmd_qat_version.map | 11 +
drivers/crypto/qat/meson.build | 5 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_frame.c | 701 +++++++++++++++++++
drivers/crypto/qat/qat_sym_frame.h | 242 +++++++
8 files changed, 969 insertions(+), 6 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_frame.c
create mode 100644 drivers/crypto/qat/qat_sym_frame.h
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 28bd5668f..f030f1084 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -39,6 +39,11 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
SRCS-y += qat_sym.c
SRCS-y += qat_sym_session.c
SRCS-y += qat_sym_pmd.c
+ SRCS-y += qat_sym_frame.c
+
+ # export include files
+ SYMLINK-y-include += qat_sym_frame.h
+
build_qat = yes
endif
endif
@@ -61,9 +66,6 @@ ifdef build_qat
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
LDLIBS += -lrte_pci -lrte_bus_pci
- # export include files
- SYMLINK-y-include +=
-
# versioning export map
EXPORT_MAP := ../../compress/qat/rte_pmd_qat_version.map
endif
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 8e6dd04eb..06e2d8c8a 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
q->csr_tail = q->tail;
}
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
{
uint32_t old_head, new_head;
uint32_t max_head;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..8add1b049 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
void *op_cookie __rte_unused,
uint64_t *dequeue_err_count __rte_unused);
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
+
#endif /* _QAT_QP_H_ */
diff --git a/drivers/compress/qat/rte_pmd_qat_version.map b/drivers/compress/qat/rte_pmd_qat_version.map
index f9f17e4f6..a9160b157 100644
--- a/drivers/compress/qat/rte_pmd_qat_version.map
+++ b/drivers/compress/qat/rte_pmd_qat_version.map
@@ -1,3 +1,14 @@
DPDK_20.0 {
local: *;
};
+
+EXPERIMENTAL {
+ global:
+
+ qat_sym_get_qp;
+ qat_sym_enqueue_frame_aead;
+ qat_sym_enqueue_frame_cipher;
+ qat_sym_enqueue_frame_auth;
+ qat_sym_enqueue_frame_chain;
+ qat_sym_dequeue_frame;
+};
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index fc65923a7..8d53debcf 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -13,9 +13,14 @@ if dep.found()
qat_sources += files('qat_sym_pmd.c',
'qat_sym.c',
'qat_sym_session.c',
+ 'qat_sym_frame.c',
'qat_asym_pmd.c',
'qat_asym.c')
qat_ext_deps += dep
qat_cflags += '-DBUILD_QAT_SYM'
qat_cflags += '-DBUILD_QAT_ASYM'
+ headers = files(
+ 'qat_sym_frame.h',
+ )
+ use_function_versioning = true
endif
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 25b6dd5f4..609180d3f 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -336,7 +336,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
set_cipher_iv(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, qat_req);
-
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
diff --git a/drivers/crypto/qat/qat_sym_frame.c b/drivers/crypto/qat/qat_sym_frame.c
new file mode 100644
index 000000000..282bfbed7
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_frame.c
@@ -0,0 +1,701 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym.h"
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_sym_frame.h"
+#include "qat_qp.h"
+
+void *
+qat_sym_get_qp(uint8_t dev_id, uint16_t qp_id)
+{
+ struct rte_cryptodev *dev;
+ struct qat_qp *qp;
+ const char *drv_name;
+
+ /* make sure it is a QAT device */
+ if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
+ return NULL;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ drv_name = rte_cryptodev_driver_name_get(dev->driver_id);
+ if ((strncmp(drv_name, RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD),
+ sizeof(RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD))) != 0) ||
+ (qp_id > dev->data->nb_queue_pairs))
+ return NULL;
+
+ qp = dev->data->queue_pairs[qp_id];
+ if (qp->service_type != QAT_SERVICE_SYMMETRIC)
+ return NULL;
+
+ return (void *)qp;
+}
+
+static __rte_always_inline int
+qat_sym_frame_fill_sgl(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_sgl *sgl, uint32_t max_len)
+{
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sgl *list;
+ int64_t len = max_len;
+ uint32_t i;
+
+ if (!sgl)
+ return -EINVAL;
+ if (sgl->num < 2 || sgl->num > QAT_SYM_SGL_MAX_NUMBER || !sgl->vec)
+ return -EINVAL;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
+ list = (struct qat_sgl *)&cookie->qat_sgl_src;
+
+ for (i = 0; i < sgl->num && len > 0; i++) {
+ list->buffers[i].len = RTE_MIN(sgl->vec[i].len, len);
+ list->buffers[i].resrvd = 0;
+ list->buffers[i].addr = sgl->vec[i].iova;
+ len -= list->buffers[i].len;
+ }
+
+ if (unlikely(len > 0))
+ return -1;
+
+ list->num_bufs = i;
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ req->comn_mid.src_length = req->comn_mid.dst_length = 0;
+ return 0;
+}
+
+static __rte_always_inline void
+qat_sym_set_cipher_param(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ uint32_t cipher_ofs, uint32_t cipher_len)
+{
+ cipher_param->cipher_offset = cipher_ofs;
+ cipher_param->cipher_length = cipher_len;
+}
+
+static __rte_always_inline void
+qat_sym_set_auth_param(struct icp_qat_fw_la_auth_req_params *auth_param,
+ uint32_t auth_ofs, uint32_t auth_len,
+ rte_iova_t digest_iova, rte_iova_t aad_iova)
+{
+ auth_param->auth_off = auth_ofs;
+ auth_param->auth_len = auth_len;
+ auth_param->auth_res_addr = digest_iova;
+ auth_param->u1.aad_adr = aad_iova;
+}
+
+int
+qat_sym_enqueue_frame_aead(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct qat_sym_job *job, uint32_t *tail,
+ uint8_t is_first, uint8_t is_last, void *frame)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len;
+ uint8_t aad_len_field_sz;
+ uint32_t msg_len_be;
+ rte_iova_t aad_iova;
+ uint8_t q;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely(is_first != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)frame;
+ } else {
+ t = *tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->aead.aead_ofs + job->aead.aead_len;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ aad_iova = job->aead.aad_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ aad_data = job->aead.aad;
+ aad_iova = job->aead.aad_iova;
+ aad_ccm_real_len = 0;
+ aad_len_field_sz = 0;
+ msg_len_be = rte_bswap32(job->aead.aead_len);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ aad_data = job->iv;
+ aad_iova = job->iv_iova;
+ }
+
+ q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+ rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx], 0, pad_len);
+ }
+
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+ + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(job->aead.aad +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ qat_sym_set_cipher_param(cipher_param, job->aead.aead_ofs,
+ job->aead.aead_len);
+ qat_sym_set_auth_param(auth_param, job->aead.aead_ofs,
+ job->aead.aead_len, job->aead.tag_iova, aad_iova);
+
+ if (unlikely(job->flags & QAT_SYM_DESC_FLAG_IS_SGL) != 0) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->aead.aead_ofs + job->aead.aead_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = aad_iova;
+ cipher_param->spc_auth_res_addr = job->aead.tag_iova;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely(is_last != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *tail = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+
+int
+qat_sym_enqueue_frame_cipher(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct qat_sym_job *job, uint32_t *tail,
+ uint8_t is_first, uint8_t is_last, void *frame)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely(is_first != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)frame;
+ } else {
+ t = *tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->cipher_only.cipher_ofs,
+ job->cipher_only.cipher_len);
+
+ if (unlikely((job->flags & QAT_SYM_DESC_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely(is_last != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *tail = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+int
+qat_sym_enqueue_frame_auth(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct qat_sym_job *job, uint32_t *tail,
+ uint8_t is_first, uint8_t is_last, void *frame)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely(is_first != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tx_queue->tail);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)frame;
+ } else {
+ t = *tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + *tail);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ auth_param = (void *)((uint8_t *)&req->serv_specif_rqpars +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+ if (unlikely(is_first != 0))
+ req->comn_mid.opaque_data = (uintptr_t)frame;
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->auth_only.auth_ofs + job->auth_only.auth_len;
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->auth_only.auth_ofs,
+ job->auth_only.auth_len, job->auth_only.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((job->flags & QAT_SYM_DESC_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->auth_only.auth_ofs +
+ job->auth_only.auth_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely(is_last != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *tail = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+int
+qat_sym_enqueue_frame_chain(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct qat_sym_job *job, uint32_t *tail,
+ uint8_t is_first, uint8_t is_last, void *frame)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t min_ofs = RTE_MIN(job->chain.cipher_ofs, job->chain.auth_ofs);
+ uint32_t max_len = RTE_MAX(job->chain.cipher_len, job->chain.auth_len);
+ rte_iova_t auth_iova_end;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely(is_first != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)frame;
+ } else {
+ t = *tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+ if (unlikely(is_first != 0))
+ req->comn_mid.opaque_data = (uintptr_t)frame;
+
+ req->comn_mid.src_data_addr =
+ req->comn_mid.dest_data_addr = job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length = min_ofs + max_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->chain.cipher_ofs,
+ job->chain.cipher_len);
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->chain.auth_ofs,
+ job->chain.auth_len, job->chain.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+
+ if (unlikely(job->flags & QAT_SYM_DESC_FLAG_IS_SGL)) {
+ uint32_t len = job->chain.auth_ofs +
+ job->chain.auth_len;
+ struct rte_crypto_vec *vec = job->sgl->vec;
+ int auth_end_get = 0;
+ while (len) {
+ if (len <= vec->len) {
+ auth_iova_end = vec->iova + len;
+ auth_end_get = 1;
+ break;
+ }
+ len -= vec->len;
+ vec++;
+ }
+ if (!auth_end_get) {
+ QAT_DP_LOG(ERR, "Failed to get auth end");
+ return -1;
+ }
+ } else
+ auth_iova_end = job->data_iova + job->chain.auth_ofs +
+ job->chain.auth_len;
+
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset +
+ cipher_param->cipher_length) &&
+ (job->chain.digest_iova == auth_iova_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset +
+ cipher_param->cipher_length <
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length)
+ req->comn_mid.dst_length =
+ req->comn_mid.src_length =
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header =
+ &req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((job->flags & QAT_SYM_DESC_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ min_ofs + max_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely(is_last != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *tail = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+#define get_rx_queue_message_at_index(q, h, i) \
+ (void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
+ q->modulo_mask))
+
+static __rte_always_inline int
+qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
+{
+ return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status);
+}
+
+int
+qat_sym_dequeue_frame(void *qat_sym_qp, void **frame,
+ qat_qp_get_frame_n_element_t get_frame_n_elt,
+ uint32_t first_status_offset, uint32_t element_interval,
+ uint8_t element_status_success, uint8_t element_status_error)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp, *resp1, *resp2, *resp3, *respl;
+ void *f = NULL;
+ uint32_t n_elts, i;
+ uint8_t *status, *status1, *status2, *status3;
+ int n_fail = 0, n_fail1 = 0, n_fail2 = 0, n_fail3 = 0;
+ uint32_t head = rx_queue->head;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ /* if message is not processed, return 0 */
+ if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG) {
+ *frame = NULL;
+ return -1;
+ }
+
+ f = (void *)(uintptr_t)resp->opaque_data;
+ if (unlikely(f == NULL)) {
+ *frame = NULL;
+ return -1;
+ }
+
+ *frame = f;
+ status = (uint8_t *)f + first_status_offset;
+
+ n_elts = (*get_frame_n_elt)(f);
+ if (unlikely(n_elts == 0))
+ return -1;
+
+ /* process the first message */
+ if (qat_is_rx_msg_ok(resp))
+ *status = element_status_success;
+ else {
+ *status = element_status_error;
+ n_fail--;
+ }
+
+ /* get the last message */
+ respl = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + ((head +
+ rx_queue->msg_size * (n_elts - 1)) &
+ rx_queue->modulo_mask));
+ if (*(uint32_t *)respl == ADF_RING_EMPTY_SIG) {
+ *frame = NULL;
+ return -1;
+ }
+
+ status += element_interval;
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+
+ /* fetch 4 messages in a loop */
+ for (i = 1; i > 4 && i < n_elts - 4; i += 4) {
+ resp = get_rx_queue_message_at_index(rx_queue, head, 0);
+ resp1 = get_rx_queue_message_at_index(rx_queue, head, 1);
+ resp2 = get_rx_queue_message_at_index(rx_queue, head, 2);
+ resp3 = get_rx_queue_message_at_index(rx_queue, head, 3);
+
+ status1 = status + element_interval;
+ status2 = status + element_interval * 2;
+ status3 = status + element_interval * 3;
+
+ if (qat_is_rx_msg_ok(resp))
+ *status = element_status_success;
+ else {
+ *status = element_status_error;
+ n_fail--;
+ }
+
+ if (qat_is_rx_msg_ok(resp1))
+ *status1 = element_status_success;
+ else {
+ *status1 = element_status_error;
+ n_fail1--;
+ }
+
+ if (qat_is_rx_msg_ok(resp2))
+ *status2 = element_status_success;
+ else {
+ *status2 = element_status_error;
+ n_fail2--;
+ }
+
+ if (qat_is_rx_msg_ok(resp3))
+ *status3 = element_status_success;
+ else {
+ *status3 = element_status_error;
+ n_fail3--;
+ }
+
+ i += 4;
+ status = status3 + element_interval;
+ head = (head + rx_queue->msg_size * 4) & rx_queue->modulo_mask;
+ }
+
+ for (; i < n_elts - 1; i++) {
+ resp = get_rx_queue_message_at_index(rx_queue, head, 0);
+ if (qat_is_rx_msg_ok(resp))
+ *status = element_status_success;
+ else {
+ *status = element_status_error;
+ n_fail--;
+ }
+ status += element_interval;
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ }
+
+ /* fill the last status field */
+ if (qat_is_rx_msg_ok(respl))
+ *status = element_status_success;
+ else {
+ *status = element_status_error;
+ n_fail--;
+ }
+
+ /* update queue pair head */
+ rx_queue->head = head;
+ rx_queue->nb_processed_responses += i;
+ qp->dequeued += i;
+ qp->stats.dequeued_count += i;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+
+ return n_fail + n_fail1 + n_fail2 + n_fail3;
+}
diff --git a/drivers/crypto/qat/qat_sym_frame.h b/drivers/crypto/qat/qat_sym_frame.h
new file mode 100644
index 000000000..5397799e3
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_frame.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_FRAME_H_
+#define _QAT_SYM_FRAME_H_
+
+/* Structure to fill QAT tx queue. */
+struct qat_sym_job {
+ union {
+ /**
+ * When QAT_SYM_DESC_FLAG_IS_SGL bit is set in flags, sgl
+ * field is used as input data. Otherwise data_iova is
+ * used.
+ **/
+ rte_iova_t data_iova;
+ struct rte_crypto_sgl *sgl;
+ };
+ union {
+ /**
+ * Different than cryptodev, all ofs and len fields have the
+ * unit of bytes (including Snow3G/Kasumi/Zuc.
+ **/
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ } cipher_only;
+ struct {
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } auth_only;
+ struct {
+ uint32_t aead_ofs;
+ uint32_t aead_len;
+ rte_iova_t tag_iova;
+ /* aad is required to be filled only for CCM, for GCM
+ * only aad_iova is mandatory.
+ * Also for CCM the first byte of aad data will be
+ * used to construct B0 data
+ */
+ uint8_t *aad;
+ rte_iova_t aad_iova;
+ } aead;
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } chain;
+ };
+ uint8_t *iv;
+ rte_iova_t iv_iova;
+
+#define QAT_SYM_DESC_FLAG_IS_SGL (1 << 0)
+ uint32_t flags;
+};
+
+/**
+ * Get the QAT queue pair based on device id and queue pair id.
+ * Checks if passed arguments are valid.
+ *
+ * @param dev_id
+ * cryptodev device id.
+ * @param qp_id
+ * queue pair id
+ * @return
+ * pointer to queue pair if passed parameters are valid.
+ * NULL pointer otherwise.
+ **/
+__rte_experimental
+void *
+qat_sym_get_qp(uint8_t devi_id, uint16_t qp_id);
+
+/**
+ * enqueue one AEAD job into QAT queue
+ *
+ * @param qat_sym_qp
+ * queue pair data got from qat_sym_get_qp().
+ * @param session
+ * configured cryptodev symmetric session data.
+ * @param job
+ * job data to be filled into QAT tx queue message.
+ * @param tail
+ * The pointer to the queue tail index buffer. The value of the buffer should
+ * only be updated by this function. The caller application shall only
+ * provide the buffer.
+ * @param is_first
+ * 1 if it is the first operation in the frame.
+ * 0 otherwise.
+ * @param is_last
+ * 1 if the data is the last element in the frame.
+ * 0 otherwise.
+ * @param frame
+ * if is_first is set the frame pointer will be written in to the message.
+ *
+ * @return
+ * 0 if operation is successful, negative value if otherwise.
+ **/
+__rte_experimental
+int
+qat_sym_enqueue_frame_aead(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct qat_sym_job *job, uint32_t *tail,
+ uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * enqueue one chaining operation (cipher and hash) into QAT queue
+ *
+ * @param qat_sym_qp
+ * queue pair data got from qat_sym_get_qp().
+ * @param session
+ * configured cryptodev symmetric session data.
+ * @param job
+ * job data to be filled into QAT tx queue message.
+ * @param tail
+ * The pointer to the queue tail index buffer. The value of the buffer should
+ * only be updated by this function. The caller application shall only
+ * provide the buffer.
+ * @param is_first
+ * 1 if it is the first operation in the frame.
+ * 0 otherwise.
+ * @param is_last
+ * 1 if the data is the last element in the frame.
+ * 0 otherwise.
+ * @param frame
+ * if is_first is set the frame pointer will be written in to the message.
+ *
+ * @return
+ * 0 if operation is successful, negative value if otherwise.
+ **/
+__rte_experimental
+int
+qat_sym_enqueue_frame_chain(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct qat_sym_job *job, uint32_t *tail,
+ uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * enqueue one cipher-only operation into QAT queue
+ *
+ * @param qat_sym_qp
+ * queue pair data got from qat_sym_get_qp().
+ * @param session
+ * configured cryptodev symmetric session data.
+ * @param job
+ * job data to be filled into QAT tx queue message.
+ * @param tail
+ * The pointer to the queue tail index buffer. The value of the buffer should
+ * only be updated by this function. The caller application shall only
+ * provide the buffer.
+ * @param is_first
+ * 1 if it is the first operation in the frame.
+ * 0 otherwise.
+ * @param is_last
+ * 1 if the data is the last element in the frame.
+ * 0 otherwise.
+ * @param frame
+ * if is_first is set the frame pointer will be written in to the message.
+ *
+ * @return
+ * 0 if operation is successful, negative value if otherwise.
+ **/
+__rte_experimental
+int
+qat_sym_enqueue_frame_cipher(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct qat_sym_job *job, uint32_t *tail,
+ uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * enqueue one auth-only operation into QAT queue
+ *
+ * @param qat_sym_qp
+ * queue pair data got from qat_sym_get_qp().
+ * @param session
+ * configured cryptodev symmetric session data.
+ * @param job
+ * job data to be filled into QAT tx queue message.
+ * @param tail
+ * The pointer to the queue tail index buffer. The value of the buffer should
+ * only be updated by this function. The caller application shall only
+ * provide the buffer.
+ * @param is_first
+ * 1 if it is the first operation in the frame.
+ * 0 otherwise.
+ * @param is_last
+ * 1 if the data is the last element in the frame.
+ * 0 otherwise.
+ * @param frame
+ * if is_first is set the frame pointer will be written in to the message.
+ *
+ * @return
+ * 0 if operation is successful, negative value if otherwise.
+ **/
+__rte_experimental
+int
+qat_sym_enqueue_frame_auth(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct qat_sym_job *job, uint32_t *tail,
+ uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * Function prototype to get the number of elements in a frame in dequeue.
+ * This function should be provided by the user.
+ **/
+typedef uint32_t (*qat_qp_get_frame_n_element_t)(void *frame);
+
+/**
+ * Dequeue a frame from QAT queue
+ *
+ * @param qat_sym_qp
+ * queue pair data got from qat_sym_get_qp().
+ * @param frame
+ * return the frame dequeued.
+ * @param get_frame_n_elt
+ * callback function that gets opaque_data from the first processed message.
+ * @param first_status_offset
+ * the offset to status field of first frame element..
+ * @param element_interval
+ * the size of frame element in the frame data, used to compute next
+ * status field.
+ * @param element_status_success
+ * value to set for successfully processed frame element.
+ * @param element_status_error
+ * value to set for unsuccessfully processed frame element.
+ *
+ * @return
+ * if a frame is retrieved from the queue pair it will be written
+ * into "frame" parameter, otherwise "frame" will be written as NULL and
+ * -1 will be returned. If all elements are successful 0 will be returned.
+ * Negative number of failed elements will be returned.
+ **/
+__rte_experimental
+int
+qat_sym_dequeue_frame(void *qat_sym_qp, void **frame,
+ qat_qp_get_frame_n_element_t get_frame_n_elt,
+ uint32_t first_status_offset, uint32_t element_interval,
+ uint8_t element_status_success, uint8_t element_status_error);
+
+#endif /* _QAT_SYM_FRAME_H_ */
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v2 2/3] test/crypto: add unit-test for QAT direct APIs
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto " Fan Zhang
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 1/3] crypto/qat: add " Fan Zhang
@ 2020-06-25 13:31 ` Fan Zhang
2020-06-30 17:47 ` Trahe, Fiona
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 3/3] doc: add QAT direct APIs guide Fan Zhang
` (2 subsequent siblings)
4 siblings, 1 reply; 39+ messages in thread
From: Fan Zhang @ 2020-06-25 13:31 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang
This patch adds the test to use QAT symmetric crypto direct APIs.
The test will be enabled only when QAT Sym PMD is built.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
app/test/Makefile | 4 +
app/test/meson.build | 1 +
app/test/test_cryptodev.c | 371 ++++++++++++++++++++++++--
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 ++--
5 files changed, 395 insertions(+), 37 deletions(-)
diff --git a/app/test/Makefile b/app/test/Makefile
index 5b119aa61..da5c7648b 100644
--- a/app/test/Makefile
+++ b/app/test/Makefile
@@ -296,6 +296,10 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER),y)
LDLIBS += -lrte_pmd_crypto_scheduler
endif
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
+LDLIBS += -lrte_pmd_qat
+endif
+
endif
ifeq ($(CONFIG_RTE_APP_TEST_RESOURCE_TAR),y)
diff --git a/app/test/meson.build b/app/test/meson.build
index 1715ddbcb..f99395b6b 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -310,6 +310,7 @@ driver_test_names = [
'cryptodev_sw_mvsam_autotest',
'cryptodev_sw_snow3g_autotest',
'cryptodev_sw_zuc_autotest',
+ 'cryptodev_qat_sym_api_autotest',
'eventdev_selftest_octeontx',
'eventdev_selftest_sw',
'rawdev_autotest',
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 8f631468b..9ac97e777 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -17,6 +17,10 @@
#include <rte_cryptodev_pmd.h>
#include <rte_string_fns.h>
+#ifdef RTE_LIBRTE_PMD_QAT_SYM
+#include <qat_sym_frame.h>
+#endif
+
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
#include <rte_cryptodev_scheduler.h>
#include <rte_cryptodev_scheduler_operations.h>
@@ -55,6 +59,8 @@ static int gbl_driver_id;
static enum rte_security_session_action_type gbl_action_type =
RTE_SECURITY_ACTION_TYPE_NONE;
+int qat_api_test;
+
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
struct rte_mempool *large_mbuf_pool;
@@ -142,6 +148,164 @@ ceil_byte_length(uint32_t num_bits)
return (num_bits >> 3);
}
+#ifdef RTE_LIBRTE_PMD_QAT_SYM
+static uint32_t
+get_qat_api_n_elts(void __rte_unused * frame)
+{
+ return 1;
+}
+
+void
+process_qat_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits)
+{
+ int32_t n;
+ struct rte_crypto_op *op_ret;
+ void *qp = qat_sym_get_qp(dev_id, qp_id);
+ struct rte_crypto_sym_op *sop;
+ struct qat_sym_job job;
+ struct rte_crypto_sgl sgl;
+ struct rte_crypto_vec vec[UINT8_MAX] = { {0} };
+ int ret;
+ uint32_t min_ofs = 0, max_len = 0;
+ uint32_t tail;
+ enum {
+ cipher = 0,
+ auth,
+ chain,
+ aead
+ } qat_api_test_type;
+ uint32_t count = 0;
+
+ if (!qp) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ return;
+ }
+
+ memset(&job, 0, sizeof(job));
+
+ sop = op->sym;
+
+ if (is_cipher && is_auth) {
+ qat_api_test_type = chain;
+ min_ofs = RTE_MIN(sop->cipher.data.offset,
+ sop->auth.data.offset);
+ max_len = RTE_MAX(sop->cipher.data.length,
+ sop->auth.data.length);
+ } else if (is_cipher) {
+ qat_api_test_type = cipher;
+ min_ofs = sop->cipher.data.offset;
+ max_len = sop->cipher.data.length;
+ } else if (is_auth) {
+ qat_api_test_type = auth;
+ min_ofs = sop->auth.data.offset;
+ max_len = sop->auth.data.length;
+ } else { /* aead */
+ qat_api_test_type = aead;
+ min_ofs = sop->aead.data.offset;
+ max_len = sop->aead.data.length;
+ }
+
+ if (len_in_bits) {
+ max_len = max_len >> 3;
+ min_ofs = min_ofs >> 3;
+ }
+
+ n = rte_crypto_mbuf_to_vec(sop->m_src, min_ofs, max_len,
+ vec, RTE_DIM(vec));
+ if (n < 0 || n != sop->m_src->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ if (n > 1) {
+ sgl.vec = vec;
+ sgl.num = n;
+ job.flags |= QAT_SYM_DESC_FLAG_IS_SGL;
+ job.sgl = &sgl;
+ } else
+ job.data_iova = rte_pktmbuf_iova(sop->m_src);
+
+
+ switch (qat_api_test_type) {
+ case aead:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.aead.aead_ofs = min_ofs;
+ job.aead.aead_len = max_len;
+ job.aead.aad = sop->aead.aad.data;
+ job.aead.aad_iova = sop->aead.aad.phys_addr;
+ job.aead.tag_iova = sop->aead.digest.phys_addr;
+ ret = qat_sym_enqueue_frame_aead(qp, sop->session, &job,
+ &tail, 1, 1, (void *)op);
+ break;
+ case cipher:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.cipher_only.cipher_ofs = min_ofs;
+ job.cipher_only.cipher_len = max_len;
+ ret = qat_sym_enqueue_frame_cipher(qp, sop->session, &job,
+ &tail, 1, 1, (void *)op);
+ break;
+ case auth:
+ job.auth_only.auth_ofs = min_ofs;
+ job.auth_only.auth_len = max_len;
+ job.auth_only.digest_iova = sop->auth.digest.phys_addr;
+ ret = qat_sym_enqueue_frame_auth(qp, sop->session, &job,
+ &tail, 1, 1, (void *)op);
+ break;
+ case chain:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.chain.cipher_ofs = sop->cipher.data.offset;
+ job.chain.cipher_len = sop->cipher.data.length;
+ if (len_in_bits) {
+ job.chain.cipher_len = job.chain.cipher_len >> 3;
+ job.chain.cipher_ofs = job.chain.cipher_ofs >> 3;
+ }
+ job.chain.auth_ofs = sop->auth.data.offset;
+ job.chain.auth_len = sop->auth.data.length;
+ if (len_in_bits) {
+ job.chain.auth_len = job.chain.auth_len >> 3;
+ job.chain.auth_ofs = job.chain.auth_ofs >> 3;
+ }
+ job.chain.digest_iova = sop->auth.digest.phys_addr;
+ ret = qat_sym_enqueue_frame_chain(qp, sop->session, &job,
+ &tail, 1, 1, (void *)op);
+ break;
+ }
+
+ if (ret < 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ ret = -1;
+ while (ret != 0 && count++ < 1024) {
+ ret = qat_sym_dequeue_frame(qp, (void **)&op_ret,
+ get_qat_api_n_elts,
+ offsetof(struct rte_crypto_op, status),
+ sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op),
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ RTE_CRYPTO_OP_STATUS_ERROR);
+ if (!op_ret)
+ rte_delay_ms(1);
+ }
+ if (ret < 0 || count >= 1024)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+#else
+void
+process_qat_api_op(__rte_unused uint8_t dev_id,
+ __rte_unused uint16_t qp_id, struct rte_crypto_op *op,
+ __rte_unused uint8_t is_cipher, __rte_unused uint8_t is_auth,
+ __rte_unused uint8_t len_in_bits)
+{
+ RTE_LOG(ERR, USER1, "QAT SYM is not enabled\n");
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+#endif
+
static void
process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
{
@@ -2451,7 +2615,11 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -2530,7 +2698,11 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2600,6 +2772,9 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata)
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_crypt_auth_op(ts_params->valid_devs[0],
ut_params->op);
+ else if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 0, 1, 1);
else
ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
@@ -2671,7 +2846,11 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2878,8 +3057,12 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
- ut_params->op);
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -2964,7 +3147,11 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3287,7 +3474,11 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3362,7 +3553,11 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3737,7 +3932,11 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -3905,7 +4104,11 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4000,7 +4203,11 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4136,7 +4343,11 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4325,7 +4536,11 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4507,7 +4722,11 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4697,7 +4916,11 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4838,7 +5061,11 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4925,7 +5152,11 @@ test_zuc_encryption(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5012,7 +5243,11 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5100,7 +5335,11 @@ test_zuc_authentication(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5232,7 +5471,11 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5418,7 +5661,11 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -7024,6 +7271,9 @@ test_authenticated_encryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test)
+ process_qat_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -7993,6 +8243,9 @@ test_authenticated_decryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test == 1)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -11284,6 +11537,9 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
if (oop == IN_PLACE &&
gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (oop == IN_PLACE && qat_api_test == 1)
+ process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
+ 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -13241,6 +13497,79 @@ test_cryptodev_nitrox(void)
return unit_test_suite_runner(&cryptodev_nitrox_testsuite);
}
+#ifdef RTE_LIBRTE_PMD_QAT_SYM
+static struct unit_test_suite qat_direct_api_testsuite = {
+ .suite_name = "Crypto QAT direct API Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_cipheronly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_qat_sym_direct_api(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ int ret;
+
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both "
+ "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM "
+ "are enabled in config file to run this testsuite.\n");
+ return TEST_SKIPPED;
+ }
+
+ qat_api_test = 1;
+ ret = unit_test_suite_runner(&qat_direct_api_testsuite);
+ qat_api_test = 0;
+
+ return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_qat_sym_api_autotest, test_qat_sym_direct_api);
+
+#endif /* RTE_LIBRTE_PMD_QAT_SYM */
+
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_mb_autotest,
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 41542e055..6acc3f4ee 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -71,6 +71,8 @@
#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
#define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
+extern int qat_api_test;
+
/**
* Write (spread) data from buffer to mbuf data
*
@@ -209,4 +211,8 @@ create_segmented_mbuf(struct rte_mempool *mbuf_pool, int pkt_len,
return NULL;
}
+void
+process_qat_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits);
+
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_blockcipher.c b/app/test/test_cryptodev_blockcipher.c
index 642b54971..0fa6895eb 100644
--- a/app/test/test_cryptodev_blockcipher.c
+++ b/app/test/test_cryptodev_blockcipher.c
@@ -461,25 +461,43 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
/* Process crypto operation */
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Error sending packet for encryption");
- status = TEST_FAILED;
- goto error_exit;
- }
+ if (qat_api_test) {
+ uint8_t is_cipher = 0, is_auth = 0;
+
+ if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
+ RTE_LOG(DEBUG, USER1,
+ "QAT direct API does not support OOP, Test Skipped.\n");
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "SKIPPED");
+ status = TEST_SUCCESS;
+ goto error_exit;
+ }
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
+ is_cipher = 1;
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
+ is_auth = 1;
+
+ process_qat_api_op(dev_id, 0, op, is_cipher, is_auth, 0);
+ } else {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for encryption");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
- op = NULL;
+ op = NULL;
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
- rte_pause();
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
+ rte_pause();
- if (!op) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Failed to process sym crypto op");
- status = TEST_FAILED;
- goto error_exit;
+ if (!op) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process sym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
}
debug_hexdump(stdout, "m_src(after):",
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v2 3/3] doc: add QAT direct APIs guide
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto " Fan Zhang
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 1/3] crypto/qat: add " Fan Zhang
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 2/3] test/crypto: add unit-test for QAT direct APIs Fan Zhang
@ 2020-06-25 13:31 ` Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
4 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-06-25 13:31 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang
This patch updates programmer's guide to demonstrate the usage
and limitations of QAT symmetric crypto data-path APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
doc/guides/prog_guide/cryptodev_lib.rst | 272 ++++++++++++++++++++++++
1 file changed, 272 insertions(+)
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index c14f750fa..abc66a679 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -861,6 +861,278 @@ using one of the crypto PMDs available in DPDK.
num_dequeued_ops);
} while (total_num_dequeued_ops < num_enqueued_ops);
+QAT Direct Symmetric Crypto Data-path APIs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+QAT direct symmetric crypto data-path APIs are a set of APIs that especially
+for QAT symmetric PMD that provides fast data-path enqueue/dequeue operations.
+The direct data-path APIs take advantage of existing Cryptodev APIs for device,
+queue pairs, and session management. In addition the user are required to get
+the QAT queue pair pointer data prior to call the direct data-path APIs.
+The APIs are advanced features as an alternative to
+``rte_cryptodev_enqueue_burst`` and ``rte_cryptodev_dequeue_burst``. The APIs
+are designed for the user to develop close-to-native performance symmetric
+crypto data-path implementation for their applications that do not necessarily
+depend on cryptodev operations and cryptodev operation mempools, or mbufs.
+
+The fast data-path enqueue/dequeue is achieved through minimized data to
+issue a QAT symmetric crypto request and reduced branches inside the enqueue
+functions. To minimize the write and read of the opaque data in/out of QAT
+descriptors, the APIs also adopts the idea of the crypto workload frame, which
+is essentially a data structure to describe a burst of crypto jobs. In this
+case only the first QAT descriptors will be written the frame pointer upon
+enqueue, and the user shall provides a way for the driver to know the number of
+elements in the dequeued frame, and how and what to write the status fields for
+each frame element. Upon dequeue, only a finished frame or NULL is returned to
+the caller application.
+
+To simply the enqueue APIs a QAT symmetric job is defined:
+
+.. code-block:: c
+
+ /* Structure to fill QAT tx queue. */
+ struct qat_sym_job {
+ union {
+ /**
+ * When QAT_SYM_DESC_FLAG_IS_SGL bit is set in flags, sgl
+ * field is used as input data. Otherwise data_iova is
+ * used.
+ **/
+ rte_iova_t data_iova;
+ struct rte_crypto_sgl *sgl;
+ };
+ union {
+ /**
+ * Depends on the calling API one of the following fields in
+ * the following structures are used.
+ *
+ * Different than cryptodev, all ofs and len fields have the
+ * unit of bytes, including Snow3G/Kasumi/Zuc.
+ **/
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ } cipher_only;
+
+ struct {
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } auth_only;
+
+ struct {
+ uint32_t aead_ofs;
+ uint32_t aead_len;
+ rte_iova_t tag_iova;
+ /* The aad is required to be filled only for CCM, for GCM
+ * only aad_iova is mandatory.
+ *
+ * Also for CCM the first byte of aad data will be
+ * used to construct B0 data
+ */
+ uint8_t *aad;
+ rte_iova_t aad_iova;
+ } aead;
+
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } chain;
+ };
+ uint8_t *iv;
+ rte_iova_t iv_iova;
+
+ #define QAT_SYM_DESC_FLAG_IS_SGL (1 << 0)
+ uint32_t flags;
+ };
+
+Different than Cryptodev operation, the ``qat_sym_job`` structure focuses
+only on the data field required for QAT to execute a single job, and is
+not stored as opaque data in the QAT request descriptor. The user can freely
+allocate the structure buffer from stack and reuse it to fill all jobs.
+
+To use the QAT direct symmetric crypto APIs safely, the user has to carefully
+set the correct fields in qat_sym_job structure, otherwise the application or
+the system may crash. Also there are a few limitations to the QAT direct
+symmetric crypto APIs:
+
+* Only support in-place operations.
+* Docsis is NOT supported.
+* GMAC only digest generation or verification is NOT supported.
+* APIs are NOT thread-safe.
+* CANNOT mix the direct API's enqueue with rte_cryptodev_enqueue_burst, or
+* vice versa.
+
+The following sample code shows how to use QAT direct API to process a user
+defined frame with maximum 32 buffers with AES-CBC and HMAC-SHA chained
+algorithm of a frame defined by user.
+
+See *DPDK API Reference* for details on each API definitions.
+
+.. code-block:: c
+
+ #include <qat_sym_frame.h>
+
+ #define FRAME_ELT_OK 0
+ #define FRAME_ELT_FAIL 1
+ #define FRAME_OK 0
+ #define FRAME_SOME_ELT_ERROR 1
+ #define FRAME_SIZE 32
+
+ /* User created frame element struct */
+ struct sample_frame_elt {
+ /* The status field of frame element */
+ uint8_t status;
+ /* Pre-created and initialized cryptodev session */
+ struct rte_cryptodev_sym_session *session;
+ union {
+ __rte_iova_t data;
+ struct rte_crypto_sgl sgl;
+ };
+ uint32_t data_len;
+ __rte_iova_t digest;
+ uint8_t *iv;
+ uint8_t is_sgl;
+ };
+
+ /* User created frame struct */
+ struct sample_frame {
+ struct sample_frame_elt elts[FRAME_SIZE]; /**< All frame elements */
+ uint32_t n_elts; /**< Number of elements */
+ };
+
+ /* Frame enqueue function use QAT direct AES-CBC-* + HMAC-SHA* API */
+ static int
+ enqueue_frame_to_qat_direct_api(
+ uint8_t qat_cryptodev_id, /**< Initialized QAT cryptodev ID */
+ uint16_t qat_qp_id, /**< Initialized QAT queue pair ID */
+ struct sample_frame *frame /**< Initialized user frame struct */)
+ {
+ /* Get QAT queue pair data, provided as one of the QAT direct APIs. */
+ void *qp = qat_sym_get_qp(qat_cryptodev_id, qat_qp_id);
+ struct qat_sym_job job;
+ uint32_t i, tail;
+
+ /**
+ * If qat_cryptodev_id does not represent an initialized QAT device, or
+ * the qat_qp_id is not valid or initialized, qat_sym_get_qp will
+ * return NULL.
+ **/
+ if (qp == NULL)
+ return -1;
+
+ for (i = 0; i < frame->n_elts; i++) {
+ struct sample_frame_elt *fe = &f-elts[i];
+ int ret;
+
+ /* Fill the job data with frame element data */
+ if (fe->is_sgl != 0) {
+ /* The buffer is a SGL buffer */
+ job.sgl = &frame->sgl;
+ /* Set SGL flag in the job */
+ job.flag |= QAT_SYM_DESC_FLAG_IS_SGL;
+ } else {
+ job.data_iova = fe->data;
+ /* Unset SGL flag in the job */
+ job.flag &= ~QAT_SYM_DESC_FLAG_IS_SGL;
+ }
+
+ job.chain.cipher_ofs = job.chain.auth_ofs = 0;
+ job.chain.cipher_len = job.chain.auth_len = fe->data_len;
+ job.chain.digest_iova = fe->digest;
+
+ job.iv = fe->iv;
+
+ /* Call QAT direct data-path enqueue chaining op API */
+ ret = qat_sym_enqueue_frame_chain(qp, fe->session, &job,
+ &tail, /**< Tail should be updated only by the function */
+ i == 0 ? 1 : 0, /**< Set 1 for first job in the frame */
+ i == frame->n_elts - 1, /**< Set 1 for last job */
+ (void *)frame /**< Frame will written to first job's opaque */);
+
+ /**
+ * In case one element is failed to be enqueued, simply abandon
+ * enqueuing the whole frame.
+ **/
+ if (!ret)
+ return -1;
+
+ /**
+ * To this point the frame is enqueued. The job buffer can be
+ * safely reused for enqueuing next frame element.
+ **/
+ }
+
+ return 0;
+ }
+
+ /**
+ * User created function to return the number of elements in a frame.
+ * The function return and parameter should follow the prototype
+ * qat_qp_get_frame_n_element_t() in qat_sym_frame.h
+ **/
+ static uint32_t
+ get_frame_nb_elts(void *f)
+ {
+ struct sample_frame *frame = f;
+ return frame->n_elts;
+ }
+
+ /* Frame dequeue function use QAT direct dequeue API */
+ static struct sample_frame *
+ dequeue_frame_with_qat_direct_api(
+ uint8_t qat_cryptodev_id, /**< Initialized QAT cryptodev ID */
+ uint16_t qat_qp_id /**< Initialized QAT queue pair ID */)
+ {
+ void *qp = qat_sym_get_qp(qat_cryptodev_id, qat_qp_id);
+ struct sample_frame *ret_frame;
+ int ret;
+
+ /**
+ * If qat_cryptodev_id does not represent an initialized QAT device, or
+ * the qat_qp_id is not valid or initialized, qat_sym_get_qp will
+ * return NULL.
+ **/
+ if (qp == NULL)
+ return NULL;
+
+ ret = qat_sym_dequeue_frame(qp,
+ &ret_frame, /**< Valid frame or NULL will write to ret_frame */
+ get_frame_nb_elts, /**< Function to get frame element size */
+ /* Offset from the start of the frame to the first status field */
+ offsetof(struct sample_frame, elts),
+ sizeof(struct sample_frame), /**< Interval between status fields */
+ FRAME_ELT_OK, /**< Value to write to status when elt successful */
+ FRAME_ELT_FAIL /**< Value to write to status when elt failed */);
+
+ if (ret == 0) {
+ /**
+ * Return 0 means the frame is successfully retrieved, and all
+ * elements in the frame are successfully processed.
+ **/
+ ret_frame->state = FRAME_OK;
+ return ret_frame;
+ } else {
+ /**
+ * Return negative number but ret_frame is not NULL means the frame
+ * is successfully retrieved, but 1 or more elements are failed.
+ **/
+ if (ret_frame) {
+ ret_frame->sate = FRAME_SOME_ELT_ERROR;
+ return ret_frame;
+ } else {
+ /**
+ * Return negative number and ret_frame is NULL means QAT is
+ * yet to process all elements in the frame.
+ **/
+ return NULL;
+ }
+ }
+ }
+
Asymmetric Cryptography
-----------------------
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [PATCH] crypto/qat: add data-path APIs
2020-06-12 14:39 [dpdk-dev] [PATCH] crypto/qat: add data-path APIs Fan Zhang
2020-06-18 17:50 ` Trahe, Fiona
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto " Fan Zhang
@ 2020-06-26 6:55 ` Jerin Jacob
2020-06-26 10:38 ` [dpdk-dev] [dpdk-techboard] " Thomas Monjalon
2 siblings, 1 reply; 39+ messages in thread
From: Jerin Jacob @ 2020-06-26 6:55 UTC (permalink / raw)
To: Fan Zhang, techboard, Anoob Joseph
Cc: dpdk-dev, Akhil Goyal, Fiona Trahe, Piotr Bronowski
On Fri, Jun 12, 2020 at 8:10 PM Fan Zhang <roy.fan.zhang@intel.com> wrote:
>
> This patch adds data-path APIs to QAT symmetric dirver to support
> raw data as input.
>
> For applications/libraries that want to benefit from the data-path
> encryption acceleration provided by QAT but not necessarily depends
> on DPDK data-path structures (such as VPP), some performance
> degradation is unavoidable to convert between their specific data
> structure and DPDK cryptodev operation as well as mbufs.
>
> This patch takes advantage of existing QAT implementations to form
> symmetric data-path enqueue and dequeue APIs that support raw data
> as input so that they can have wider usability towards those
> applications/libraries without performance drop caused by the data
> structure conversions. In the meantime the less performance-sensitive
> cryptodev device and session management remains intact so that DPDK
> cryptodev remains to be unified control path library for QAT.
>
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
> ---
+ Techboard,
I think, this problem is not specific to QAT nor the crypto subsystem.
If we are planning to expose the PMD specific descriptors, It would good to get
general agreement from everyone. Probably we can/need to extend ethdev
PMDs as well based on the need.
If we are taking this path, at minimum, we need a generic control path
API with cryptodev,
to query such capability. (Probably API to register descriptor and
query supported descriptor as PMD
can support multiple descriptors)
> + SYMLINK-y-include += qat_sym_frame.h
I think, the exposed file name should be rte_pmd_qat_sym_frame.h
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [dpdk-techboard] [PATCH] crypto/qat: add data-path APIs
2020-06-26 6:55 ` [dpdk-dev] [PATCH] crypto/qat: add data-path APIs Jerin Jacob
@ 2020-06-26 10:38 ` Thomas Monjalon
2020-06-30 20:33 ` Honnappa Nagarahalli
0 siblings, 1 reply; 39+ messages in thread
From: Thomas Monjalon @ 2020-06-26 10:38 UTC (permalink / raw)
To: Fan Zhang, techboard, Jerin Jacob
Cc: Anoob Joseph, dpdk-dev, Akhil Goyal, Fiona Trahe,
Piotr Bronowski, honnappa.nagarahalli
26/06/2020 08:55, Jerin Jacob:
> On Fri, Jun 12, 2020 at 8:10 PM Fan Zhang <roy.fan.zhang@intel.com> wrote:
> >
> > This patch adds data-path APIs to QAT symmetric dirver to support
> > raw data as input.
> >
> > For applications/libraries that want to benefit from the data-path
> > encryption acceleration provided by QAT but not necessarily depends
> > on DPDK data-path structures (such as VPP), some performance
> > degradation is unavoidable to convert between their specific data
> > structure and DPDK cryptodev operation as well as mbufs.
> >
> > This patch takes advantage of existing QAT implementations to form
> > symmetric data-path enqueue and dequeue APIs that support raw data
> > as input so that they can have wider usability towards those
> > applications/libraries without performance drop caused by the data
> > structure conversions. In the meantime the less performance-sensitive
> > cryptodev device and session management remains intact so that DPDK
> > cryptodev remains to be unified control path library for QAT.
> >
> > Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> > Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
> > ---
>
> + Techboard,
>
> I think, this problem is not specific to QAT nor the crypto subsystem.
> If we are planning to expose the PMD specific descriptors, It would good to get
> general agreement from everyone. Probably we can/need to extend ethdev
> PMDs as well based on the need.
>
> If we are taking this path, at minimum, we need a generic control path
> API with cryptodev,
> to query such capability. (Probably API to register descriptor and
> query supported descriptor as PMD
> can support multiple descriptors)
I fully agree, it needs to be a community decision.
Today, if an application wants to use DPDK, either it adopts mbuf,
or it pays the cost of mbuf conversion.
The question is: can DPDK provides helpers for a non-mbuf datapath?
The benefit is clear for applications which are not mbuf-centric.
The disadvantages I can think about:
- Opening a new API layer is adding more work for everybody
(development, test, maintenance).
- Applications must duplicate a part of the DPDK datapath.
- Lack of consistency between the configuration APIs
and the datapath implemented by the application.
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [dpdk-dev v2 2/3] test/crypto: add unit-test for QAT direct APIs
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 2/3] test/crypto: add unit-test for QAT direct APIs Fan Zhang
@ 2020-06-30 17:47 ` Trahe, Fiona
0 siblings, 0 replies; 39+ messages in thread
From: Trahe, Fiona @ 2020-06-30 17:47 UTC (permalink / raw)
To: Zhang, Roy Fan, dev; +Cc: akhil.goyal, Trahe, Fiona
Hi Fan,
> -----Original Message-----
> From: Zhang, Roy Fan <roy.fan.zhang@intel.com>
> Sent: Thursday, June 25, 2020 2:32 PM
> To: dev@dpdk.org
> Cc: Trahe, Fiona <fiona.trahe@intel.com>; akhil.goyal@nxp.com; Zhang, Roy Fan
> <roy.fan.zhang@intel.com>
> Subject: [dpdk-dev v2 2/3] test/crypto: add unit-test for QAT direct APIs
>
> This patch adds the test to use QAT symmetric crypto direct APIs.
> The test will be enabled only when QAT Sym PMD is built.
>
> @@ -2451,7 +2615,11 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
> if (retval < 0)
> return retval;
>
> - ut_params->op = process_crypto_request(ts_params->valid_devs[0],
> + if (qat_api_test)
> + process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
> + 0, 1, 1);
[Fiona] it would be helpful wherever this is called to either use local params with useful names, enums or comments for last 3 params, e.g.:
process_qat_api_op(ts_params->valid_devs[0], 0, ut_params->op,
0, /*is_cipher */
1, /* is_auth */
1); /* len_in_bits*/
It would be better to generalise the tests too in light of Jerin's comments.
By passing the xform to process_crypto_request(), you should be able to avoid all the changes
to individual tests, e.g.;
in process_crypto_request(dev, op, xform )
if (direct_pmd_datapath)
switch (pmd) {
qat: process_qat_api_op();
other PMD:/*future*/
default: break;
}
else
rest of process_crypto_op fn
Unfortunately the xform is usually not available where process_crypto_request is called, it would have to be returned from the session create function - but though the 3 params are enough for the moment, it's likely other session params will be needed for other tests. Or by other PMDs.
> +static int
> +test_qat_sym_direct_api(void /*argv __rte_unused, int argc __rte_unused*/)
> +{
> + int ret;
> +
> + gbl_driver_id = rte_cryptodev_driver_id_get(
> + RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
> +
> + if (gbl_driver_id == -1) {
> + RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both "
> + "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM "
> + "are enabled in config file to run this testsuite.\n");
> + return TEST_SKIPPED;
> + }
> +
> + qat_api_test = 1;
[Fiona] I'd suggest renaming this direct_pmd_datapath as above
This would be a good place to check that the pmd has the direct_pmd_datapath capability.
Checks for a finer granularity capability may be needed in process_qat_api_op()
> + ret = unit_test_suite_runner(&qat_direct_api_testsuite);
> + qat_api_test = 0;
> +
> + return ret;
> +}
> +
[Fiona] It would be good to add tests that validate the frame concept - enqueueing
and dequeueing only 1 descriptor at a time doesn't cover it.
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [dpdk-techboard] [PATCH] crypto/qat: add data-path APIs
2020-06-26 10:38 ` [dpdk-dev] [dpdk-techboard] " Thomas Monjalon
@ 2020-06-30 20:33 ` Honnappa Nagarahalli
2020-06-30 21:00 ` Thomas Monjalon
0 siblings, 1 reply; 39+ messages in thread
From: Honnappa Nagarahalli @ 2020-06-30 20:33 UTC (permalink / raw)
To: thomas, Fan Zhang, techboard, Jerin Jacob
Cc: Anoob Joseph, dpdk-dev, Akhil.goyal@nxp.com, Fiona Trahe,
Piotr Bronowski, nd, Honnappa Nagarahalli, nd
<snip>
>
> 26/06/2020 08:55, Jerin Jacob:
> > On Fri, Jun 12, 2020 at 8:10 PM Fan Zhang <roy.fan.zhang@intel.com> wrote:
> > >
> > > This patch adds data-path APIs to QAT symmetric dirver to support
> > > raw data as input.
> > >
> > > For applications/libraries that want to benefit from the data-path
> > > encryption acceleration provided by QAT but not necessarily depends
> > > on DPDK data-path structures (such as VPP), some performance
> > > degradation is unavoidable to convert between their specific data
> > > structure and DPDK cryptodev operation as well as mbufs.
> > >
> > > This patch takes advantage of existing QAT implementations to form
> > > symmetric data-path enqueue and dequeue APIs that support raw data
> > > as input so that they can have wider usability towards those
> > > applications/libraries without performance drop caused by the data
> > > structure conversions. In the meantime the less
> > > performance-sensitive cryptodev device and session management
> > > remains intact so that DPDK cryptodev remains to be unified control path
> library for QAT.
> > >
> > > Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> > > Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
> > > ---
> >
> > + Techboard,
> >
> > I think, this problem is not specific to QAT nor the crypto subsystem.
> > If we are planning to expose the PMD specific descriptors, It would
> > good to get general agreement from everyone. Probably we can/need to
> > extend ethdev PMDs as well based on the need.
> >
> > If we are taking this path, at minimum, we need a generic control path
> > API with cryptodev, to query such capability. (Probably API to
> > register descriptor and query supported descriptor as PMD can support
> > multiple descriptors)
>
> I fully agree, it needs to be a community decision.
+1
>
> Today, if an application wants to use DPDK, either it adopts mbuf, or it pays
> the cost of mbuf conversion.
>
> The question is: can DPDK provides helpers for a non-mbuf datapath?
>
> The benefit is clear for applications which are not mbuf-centric.
Agree, this was captured in [1]
[1] https://dpdkna2019.sched.com/event/WYBw/custom-meta-data-in-pmds-honnappa-nagarahalli-arm
The other benefit is that, projects like VPP do not have to maintain their own driver code. So, at a big picture level, we (the humanity 😊) save on effort.
>
> The disadvantages I can think about:
> - Opening a new API layer is adding more work for everybody
> (development, test, maintenance).
Documentation to capture descriptor format.
> - Applications must duplicate a part of the DPDK datapath.
> - Lack of consistency between the configuration APIs
> and the datapath implemented by the application.
I did not understand this, can you please elaborate?
Since the datapath is completely implemented in the application, the responsibility of keeping it updated with the features added by the configuration APIs remains with the application.
>
>
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [dpdk-techboard] [PATCH] crypto/qat: add data-path APIs
2020-06-30 20:33 ` Honnappa Nagarahalli
@ 2020-06-30 21:00 ` Thomas Monjalon
0 siblings, 0 replies; 39+ messages in thread
From: Thomas Monjalon @ 2020-06-30 21:00 UTC (permalink / raw)
To: Honnappa Nagarahalli
Cc: Fan Zhang, techboard, Jerin Jacob, Anoob Joseph, dpdk-dev,
Akhil.goyal@nxp.com, Fiona Trahe, Piotr Bronowski, nd
30/06/2020 22:33, Honnappa Nagarahalli:
> 26/06/2020 12:38, Thomas Monjalon:
> > 26/06/2020 08:55, Jerin Jacob:
> > > On Fri, Jun 12, 2020 at 8:10 PM Fan Zhang <roy.fan.zhang@intel.com> wrote:
> > > >
> > > > This patch adds data-path APIs to QAT symmetric dirver to support
> > > > raw data as input.
> > > >
> > > > For applications/libraries that want to benefit from the data-path
> > > > encryption acceleration provided by QAT but not necessarily depends
> > > > on DPDK data-path structures (such as VPP), some performance
> > > > degradation is unavoidable to convert between their specific data
> > > > structure and DPDK cryptodev operation as well as mbufs.
> > > >
> > > > This patch takes advantage of existing QAT implementations to form
> > > > symmetric data-path enqueue and dequeue APIs that support raw data
> > > > as input so that they can have wider usability towards those
> > > > applications/libraries without performance drop caused by the data
> > > > structure conversions. In the meantime the less
> > > > performance-sensitive cryptodev device and session management
> > > > remains intact so that DPDK cryptodev remains to be unified control path
> > library for QAT.
> > > >
> > > > Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> > > > Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
> > > > ---
> > >
> > > + Techboard,
> > >
> > > I think, this problem is not specific to QAT nor the crypto subsystem.
> > > If we are planning to expose the PMD specific descriptors, It would
> > > good to get general agreement from everyone. Probably we can/need to
> > > extend ethdev PMDs as well based on the need.
> > >
> > > If we are taking this path, at minimum, we need a generic control path
> > > API with cryptodev, to query such capability. (Probably API to
> > > register descriptor and query supported descriptor as PMD can support
> > > multiple descriptors)
> >
> > I fully agree, it needs to be a community decision.
> +1
>
> >
> > Today, if an application wants to use DPDK, either it adopts mbuf, or it pays
> > the cost of mbuf conversion.
> >
> > The question is: can DPDK provides helpers for a non-mbuf datapath?
> >
> > The benefit is clear for applications which are not mbuf-centric.
> Agree, this was captured in [1]
>
> [1] https://dpdkna2019.sched.com/event/WYBw/custom-meta-data-in-pmds-honnappa-nagarahalli-arm
>
> The other benefit is that, projects like VPP do not have to maintain their own driver code. So, at a big picture level, we (the humanity 😊) save on effort.
>
> >
> > The disadvantages I can think about:
> > - Opening a new API layer is adding more work for everybody
> > (development, test, maintenance).
> Documentation to capture descriptor format.
>
> > - Applications must duplicate a part of the DPDK datapath.
> > - Lack of consistency between the configuration APIs
> > and the datapath implemented by the application.
>
> I did not understand this, can you please elaborate?
> Since the datapath is completely implemented in the application, the responsibility of keeping it updated with the features added by the configuration APIs remains with the application.
If you update a PMD strategy in a configuration step,
the app datapath can become out of sync.
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto " Fan Zhang
` (2 preceding siblings ...)
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 3/3] doc: add QAT direct APIs guide Fan Zhang
@ 2020-07-03 10:14 ` Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
` (2 more replies)
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
4 siblings, 3 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 10:14 UTC (permalink / raw)
To: dev
Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang,
Piotr Bronowski
This patch adds symmetric crypto data-path APIs for Cryptodev. Direct
symmetric crypto data-path APIs are a set of APIs that provide
more HW friendly enqueue/dequeue data-path functions as an alternative
approach to ``rte_cryptodev_enqueue_burst`` and
``rte_cryptodev_dequeue_burst``. The APIs are designed for external
libraries/applications that want to use Cryptodev as symmetric crypto
data-path accelerator but not necessarily mbuf data-path centric. With
the APIs the cycle cost spent on conversion from their data structure to
DPDK cryptodev operations/mbufs can be reduced, and the dependency on DPDK
crypto operation mempool can be relieved.
It is expected that the user can develop close-to-native performance
symmetric crypto data-path implementations with the functions provided
in this patchset.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
v3:
- Instead of QAT only API, moved the API to cryptodev
- Added cryptodev feature flags.
v2:
- Used a structure to simplify parameters.
- Added unit tests.
- Added documentation.
Fan Zhang (3):
crypto/qat: add support to direct data-path APIs
test/crypto: add unit-test for cryptodev direct APIs
doc: add cryptodev direct APIs guide
app/test/test_cryptodev.c | 353 ++++++++++++-
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 +-
doc/guides/prog_guide/cryptodev_lib.rst | 266 ++++++++++
doc/guides/rel_notes/release_20_08.rst | 8 +
drivers/common/qat/Makefile | 2 +
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_job.c | 661 ++++++++++++++++++++++++
drivers/crypto/qat/qat_sym_job.h | 12 +
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
13 files changed, 1332 insertions(+), 42 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_job.c
create mode 100644 drivers/crypto/qat/qat_sym_job.h
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct data-path APIs
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
@ 2020-07-03 10:14 ` Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
2 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 10:14 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch add symmetric crypto data-path APIs support to QAT-SYM PMD.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
drivers/common/qat/Makefile | 2 +
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_job.c | 661 +++++++++++++++++++++++++++++++
drivers/crypto/qat/qat_sym_job.h | 12 +
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
8 files changed, 686 insertions(+), 5 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_job.c
create mode 100644 drivers/crypto/qat/qat_sym_job.h
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 28bd5668f..6655fd2bc 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -39,6 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
SRCS-y += qat_sym.c
SRCS-y += qat_sym_session.c
SRCS-y += qat_sym_pmd.c
+ SRCS-y += qat_sym_job.c
+
build_qat = yes
endif
endif
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 8e6dd04eb..06e2d8c8a 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
q->csr_tail = q->tail;
}
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
{
uint32_t old_head, new_head;
uint32_t max_head;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..8add1b049 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
void *op_cookie __rte_unused,
uint64_t *dequeue_err_count __rte_unused);
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
+
#endif /* _QAT_QP_H_ */
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index fc65923a7..8a3921293 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -13,6 +13,7 @@ if dep.found()
qat_sources += files('qat_sym_pmd.c',
'qat_sym.c',
'qat_sym_session.c',
+ 'qat_sym_job.c',
'qat_asym_pmd.c',
'qat_asym.c')
qat_ext_deps += dep
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 25b6dd5f4..609180d3f 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -336,7 +336,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
set_cipher_iv(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, qat_req);
-
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
diff --git a/drivers/crypto/qat/qat_sym_job.c b/drivers/crypto/qat/qat_sym_job.c
new file mode 100644
index 000000000..7c0913459
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.c
@@ -0,0 +1,661 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym.h"
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_qp.h"
+#include "qat_sym_job.h"
+
+static __rte_always_inline int
+qat_sym_frame_fill_sgl(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_sgl *sgl, uint32_t max_len)
+{
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sgl *list;
+ int64_t len = max_len;
+ uint32_t i;
+
+ if (!sgl)
+ return -EINVAL;
+ if (sgl->num < 2 || sgl->num > QAT_SYM_SGL_MAX_NUMBER || !sgl->vec)
+ return -EINVAL;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
+ list = (struct qat_sgl *)&cookie->qat_sgl_src;
+
+ for (i = 0; i < sgl->num && len > 0; i++) {
+ list->buffers[i].len = RTE_MIN(sgl->vec[i].len, len);
+ list->buffers[i].resrvd = 0;
+ list->buffers[i].addr = sgl->vec[i].iova;
+ len -= list->buffers[i].len;
+ }
+
+ if (unlikely(len > 0))
+ return -1;
+
+ list->num_bufs = i;
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ req->comn_mid.src_length = req->comn_mid.dst_length = 0;
+ return 0;
+}
+
+static __rte_always_inline void
+qat_sym_set_cipher_param(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ uint32_t cipher_ofs, uint32_t cipher_len)
+{
+ cipher_param->cipher_offset = cipher_ofs;
+ cipher_param->cipher_length = cipher_len;
+}
+
+static __rte_always_inline void
+qat_sym_set_auth_param(struct icp_qat_fw_la_auth_req_params *auth_param,
+ uint32_t auth_ofs, uint32_t auth_len,
+ rte_iova_t digest_iova, rte_iova_t aad_iova)
+{
+ auth_param->auth_off = auth_ofs;
+ auth_param->auth_len = auth_len;
+ auth_param->auth_res_addr = digest_iova;
+ auth_param->u1.aad_adr = aad_iova;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_aead(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len;
+ uint8_t aad_len_field_sz;
+ uint32_t msg_len_be;
+ rte_iova_t aad_iova;
+ uint8_t q;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->aead.aead_ofs + job->aead.aead_len;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ aad_iova = job->aead.aad_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ aad_data = job->aead.aad;
+ aad_iova = job->aead.aad_iova;
+ aad_ccm_real_len = 0;
+ aad_len_field_sz = 0;
+ msg_len_be = rte_bswap32(job->aead.aead_len);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ aad_data = job->iv;
+ aad_iova = job->iv_iova;
+ }
+
+ q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+ rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx], 0, pad_len);
+ }
+
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+ + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(job->aead.aad +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ qat_sym_set_cipher_param(cipher_param, job->aead.aead_ofs,
+ job->aead.aead_len);
+ qat_sym_set_auth_param(auth_param, job->aead.aead_ofs,
+ job->aead.aead_len, job->aead.tag_iova, aad_iova);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->aead.aead_ofs + job->aead.aead_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = aad_iova;
+ cipher_param->spc_auth_res_addr = job->aead.tag_iova;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+
+static __rte_always_inline int
+qat_sym_job_enqueue_cipher(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->cipher_only.cipher_ofs,
+ job->cipher_only.cipher_len);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_auth(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ auth_param = (void *)((uint8_t *)&req->serv_specif_rqpars +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->auth_only.auth_ofs + job->auth_only.auth_len;
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->auth_only.auth_ofs,
+ job->auth_only.auth_len, job->auth_only.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->auth_only.auth_ofs +
+ job->auth_only.auth_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_chain(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t min_ofs = RTE_MIN(job->chain.cipher_ofs, job->chain.auth_ofs);
+ uint32_t max_len = RTE_MAX(job->chain.cipher_len, job->chain.auth_len);
+ rte_iova_t auth_iova_end;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr =
+ req->comn_mid.dest_data_addr = job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length = min_ofs + max_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->chain.cipher_ofs,
+ job->chain.cipher_len);
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->chain.auth_ofs,
+ job->chain.auth_len, job->chain.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ uint32_t len = job->chain.auth_ofs +
+ job->chain.auth_len;
+ struct rte_crypto_vec *vec = job->sgl->vec;
+ int auth_end_get = 0;
+ while (len) {
+ if (len <= vec->len) {
+ auth_iova_end = vec->iova + len;
+ auth_end_get = 1;
+ break;
+ }
+ len -= vec->len;
+ vec++;
+ }
+ if (!auth_end_get) {
+ QAT_DP_LOG(ERR, "Failed to get auth end");
+ return -1;
+ }
+ } else
+ auth_iova_end = job->data_iova + job->chain.auth_ofs +
+ job->chain.auth_len;
+
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset +
+ cipher_param->cipher_length) &&
+ (job->chain.digest_iova == auth_iova_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset +
+ cipher_param->cipher_length <
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length)
+ req->comn_mid.dst_length =
+ req->comn_mid.src_length =
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header =
+ &req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ min_ofs + max_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+#define get_rx_queue_message_at_index(q, h, i) \
+ (void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
+ q->modulo_mask))
+
+static __rte_always_inline int
+qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
+{
+ return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status);
+}
+
+static __rte_always_inline int
+qat_sym_query_processed_jobs(void *qat_sym_qp, uint32_t nb_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head = (rx_queue->head + (nb_jobs - 1) * rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG)
+ return 0;
+
+ return 1;
+}
+
+static __rte_always_inline void *
+qat_sym_job_dequeue_one(void *qat_sym_qp, uint64_t *drv_data, uint64_t flags,
+ int *is_op_success)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head;
+ void *opaque;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ *is_op_success = 0;
+ return NULL;
+ }
+
+ if (unlikely(qat_is_rx_msg_ok(resp) == 0))
+ *is_op_success = -1;
+ else
+ *is_op_success = 1;
+
+ opaque = (void *)(uintptr_t)resp->opaque_data;
+
+ rx_queue->head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ rx_queue->nb_processed_responses++;
+ qp->dequeued++;
+ qp->stats.dequeued_count++;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+
+ return opaque;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_job_dequeue_n(void *qat_sym_qp, uint64_t *drv_data,
+ void *user_data, rte_crpyto_hw_user_post_deq_cb_fn cb,
+ uint32_t n, uint64_t flags, uint32_t *n_failed_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head, i;
+ uint32_t status, total_fail = 0;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ for (i = 0; i < n; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_EXHAUST)
+ break;
+ return -i;
+ }
+
+ status = qat_is_rx_msg_ok(resp);
+ total_fail += status;
+ cb(user_data, i, status);
+
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ }
+
+ rx_queue->head = head;
+ rx_queue->nb_processed_responses += i;
+ qp->dequeued += i;
+ qp->stats.dequeued_count += i;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+ *n_failed_jobs = total_fail;
+
+ return i;
+}
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp->service_type != QAT_SERVICE_SYMMETRIC)
+ return -EINVAL;
+
+ hw_ops->qp = (void *)qp;
+ hw_ops->enqueue_aead = qat_sym_job_enqueue_aead;
+ hw_ops->enqueue_cipher = qat_sym_job_enqueue_cipher;
+ hw_ops->enqueue_auth = qat_sym_job_enqueue_auth;
+ hw_ops->enqueue_chain = qat_sym_job_enqueue_chain;
+ hw_ops->dequeue_one = qat_sym_job_dequeue_one;
+ hw_ops->dequeue_many = qat_sym_job_dequeue_n;
+ hw_ops->query_processed = qat_sym_query_processed_jobs;
+
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_sym_job.h b/drivers/crypto/qat/qat_sym_job.h
new file mode 100644
index 000000000..b11aeb841
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_FRAME_H_
+#define _QAT_SYM_FRAME_H_
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops);
+
+#endif /* _QAT_SYM_FRAME_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index e887c880f..be9d73c0a 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -13,6 +13,7 @@
#include "qat_sym.h"
#include "qat_sym_session.h"
#include "qat_sym_pmd.h"
+#include "qat_sym_job.h"
#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
@@ -234,7 +235,8 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
/* Crypto related operations */
.sym_session_get_size = qat_sym_session_get_private_size,
.sym_session_configure = qat_sym_session_configure,
- .sym_session_clear = qat_sym_session_clear
+ .sym_session_clear = qat_sym_session_clear,
+ .sym_get_hw_ops = qat_sym_get_ops,
};
static uint16_t
@@ -308,7 +310,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
- RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
+ RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
+ RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API;
internals = cryptodev->data->dev_private;
internals->qat_dev = qat_pci_dev;
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
@ 2020-07-03 10:14 ` Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
2 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 10:14 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch adds the QAT test to use cryptodev symmetric crypto
direct APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
app/test/test_cryptodev.c | 353 ++++++++++++++++++++++++--
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 ++--
3 files changed, 372 insertions(+), 37 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 8f631468b..9fbbe1d6c 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -55,6 +55,8 @@ static int gbl_driver_id;
static enum rte_security_session_action_type gbl_action_type =
RTE_SECURITY_ACTION_TYPE_NONE;
+int qat_api_test;
+
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
struct rte_mempool *large_mbuf_pool;
@@ -142,6 +144,154 @@ ceil_byte_length(uint32_t num_bits)
return (num_bits >> 3);
}
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits)
+{
+ int32_t n;
+ struct rte_crypto_hw_ops hw_ops;
+ struct rte_crypto_op *op_ret;
+ struct rte_crypto_sym_op *sop;
+ struct rte_crypto_sym_job job;
+ struct rte_crypto_sgl sgl;
+ struct rte_crypto_vec vec[UINT8_MAX] = { {0} };
+ int ret;
+ uint32_t min_ofs = 0, max_len = 0;
+ uint64_t drv_data;
+ uint64_t flags = RTE_CRYPTO_HW_ENQ_FLAG_START |
+ RTE_CRYPTO_HW_ENQ_FLAG_END |
+ RTE_CRYPTO_HW_ENQ_FLAG_SET_OPAQUE;
+ enum {
+ cipher = 0,
+ auth,
+ chain,
+ aead
+ } qat_api_test_type;
+ uint32_t count = 0;
+
+ memset(&job, 0, sizeof(job));
+
+ ret = rte_cryptodev_sym_get_hw_ops(dev_id, qp_id, &hw_ops);
+ if (ret) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ return;
+ }
+
+ sop = op->sym;
+
+ if (is_cipher && is_auth) {
+ qat_api_test_type = chain;
+ min_ofs = RTE_MIN(sop->cipher.data.offset,
+ sop->auth.data.offset);
+ max_len = RTE_MAX(sop->cipher.data.length,
+ sop->auth.data.length);
+ } else if (is_cipher) {
+ qat_api_test_type = cipher;
+ min_ofs = sop->cipher.data.offset;
+ max_len = sop->cipher.data.length;
+ } else if (is_auth) {
+ qat_api_test_type = auth;
+ min_ofs = sop->auth.data.offset;
+ max_len = sop->auth.data.length;
+ } else { /* aead */
+ qat_api_test_type = aead;
+ min_ofs = sop->aead.data.offset;
+ max_len = sop->aead.data.length;
+ }
+
+ if (len_in_bits) {
+ max_len = max_len >> 3;
+ min_ofs = min_ofs >> 3;
+ }
+
+ n = rte_crypto_mbuf_to_vec(sop->m_src, min_ofs, max_len,
+ vec, RTE_DIM(vec));
+ if (n < 0 || n != sop->m_src->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ if (n > 1) {
+ sgl.vec = vec;
+ sgl.num = n;
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ job.sgl = &sgl;
+ } else
+ job.data_iova = rte_pktmbuf_iova(sop->m_src);
+
+
+ switch (qat_api_test_type) {
+ case aead:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.aead.aead_ofs = min_ofs;
+ job.aead.aead_len = max_len;
+ job.aead.aad = sop->aead.aad.data;
+ job.aead.aad_iova = sop->aead.aad.phys_addr;
+ job.aead.tag_iova = sop->aead.digest.phys_addr;
+ ret = hw_ops.enqueue_aead(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case cipher:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.cipher_only.cipher_ofs = min_ofs;
+ job.cipher_only.cipher_len = max_len;
+ ret = hw_ops.enqueue_cipher(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case auth:
+ job.auth_only.auth_ofs = min_ofs;
+ job.auth_only.auth_len = max_len;
+ job.auth_only.digest_iova = sop->auth.digest.phys_addr;
+ ret = hw_ops.enqueue_auth(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case chain:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.chain.cipher_ofs = sop->cipher.data.offset;
+ job.chain.cipher_len = sop->cipher.data.length;
+ if (len_in_bits) {
+ job.chain.cipher_len = job.chain.cipher_len >> 3;
+ job.chain.cipher_ofs = job.chain.cipher_ofs >> 3;
+ }
+ job.chain.auth_ofs = sop->auth.data.offset;
+ job.chain.auth_len = sop->auth.data.length;
+ if (len_in_bits) {
+ job.chain.auth_len = job.chain.auth_len >> 3;
+ job.chain.auth_ofs = job.chain.auth_ofs >> 3;
+ }
+ job.chain.digest_iova = sop->auth.digest.phys_addr;
+ ret = hw_ops.enqueue_chain(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ }
+
+ if (ret < 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ ret = 0;
+
+ while (ret == 0 && count++ < 1024) {
+ ret = hw_ops.query_processed(hw_ops.qp, 1);
+ if (!ret)
+ rte_delay_ms(1);
+ }
+ if (ret < 0 || count >= 1024) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ flags = RTE_CRYPTO_HW_DEQ_FLAG_START;
+ op_ret = hw_ops.dequeue_one(hw_ops.qp, &drv_data, flags, &ret);
+ if (op_ret != op || ret != 1)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
static void
process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
{
@@ -2451,7 +2601,11 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -2530,7 +2684,11 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2600,6 +2758,9 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata)
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_crypt_auth_op(ts_params->valid_devs[0],
ut_params->op);
+ else if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
else
ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
@@ -2671,7 +2832,11 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2878,8 +3043,12 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
- ut_params->op);
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -2964,7 +3133,11 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3287,7 +3460,11 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3362,7 +3539,11 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3737,7 +3918,11 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -3905,7 +4090,11 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4000,7 +4189,11 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4136,7 +4329,11 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4325,7 +4522,11 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4507,7 +4708,11 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4697,7 +4902,11 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4838,7 +5047,11 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4925,7 +5138,11 @@ test_zuc_encryption(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5012,7 +5229,11 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5100,7 +5321,11 @@ test_zuc_authentication(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5232,7 +5457,11 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5418,7 +5647,11 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -7024,6 +7257,9 @@ test_authenticated_encryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -7993,6 +8229,9 @@ test_authenticated_decryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -11284,6 +11523,9 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
if (oop == IN_PLACE &&
gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (oop == IN_PLACE && qat_api_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -13241,6 +13483,75 @@ test_cryptodev_nitrox(void)
return unit_test_suite_runner(&cryptodev_nitrox_testsuite);
}
+static struct unit_test_suite cryptodev_sym_direct_api_testsuite = {
+ .suite_name = "Crypto Sym direct API Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_cipheronly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_qat_sym_direct_api(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ int ret;
+
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both "
+ "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM "
+ "are enabled in config file to run this testsuite.\n");
+ return TEST_SKIPPED;
+ }
+
+ qat_api_test = 1;
+ ret = unit_test_suite_runner(&cryptodev_sym_direct_api_testsuite);
+ qat_api_test = 0;
+
+ return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_qat_sym_api_autotest, test_qat_sym_direct_api);
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_mb_autotest,
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 41542e055..2854115aa 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -71,6 +71,8 @@
#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
#define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
+extern int qat_api_test;
+
/**
* Write (spread) data from buffer to mbuf data
*
@@ -209,4 +211,8 @@ create_segmented_mbuf(struct rte_mempool *mbuf_pool, int pkt_len,
return NULL;
}
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits);
+
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_blockcipher.c b/app/test/test_cryptodev_blockcipher.c
index 642b54971..dfa74a449 100644
--- a/app/test/test_cryptodev_blockcipher.c
+++ b/app/test/test_cryptodev_blockcipher.c
@@ -461,25 +461,43 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
/* Process crypto operation */
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Error sending packet for encryption");
- status = TEST_FAILED;
- goto error_exit;
- }
+ if (qat_api_test) {
+ uint8_t is_cipher = 0, is_auth = 0;
+
+ if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
+ RTE_LOG(DEBUG, USER1,
+ "QAT direct API does not support OOP, Test Skipped.\n");
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "SKIPPED");
+ status = TEST_SUCCESS;
+ goto error_exit;
+ }
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
+ is_cipher = 1;
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
+ is_auth = 1;
+
+ process_sym_hw_api_op(dev_id, 0, op, is_cipher, is_auth, 0);
+ } else {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for encryption");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
- op = NULL;
+ op = NULL;
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
- rte_pause();
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
+ rte_pause();
- if (!op) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Failed to process sym crypto op");
- status = TEST_FAILED;
- goto error_exit;
+ if (!op) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process sym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
}
debug_hexdump(stdout, "m_src(after):",
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
@ 2020-07-03 10:14 ` Fan Zhang
2 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 10:14 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch updates programmer's guide to demonstrate the usage
and limitations of cryptodev symmetric crypto data-path APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
doc/guides/prog_guide/cryptodev_lib.rst | 266 ++++++++++++++++++++++++
doc/guides/rel_notes/release_20_08.rst | 8 +
2 files changed, 274 insertions(+)
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index c14f750fa..9900a593a 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -861,6 +861,272 @@ using one of the crypto PMDs available in DPDK.
num_dequeued_ops);
} while (total_num_dequeued_ops < num_enqueued_ops);
+Cryptodev Direct Symmetric Crypto Data-path APIs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Direct symmetric crypto data-path APIs are a set of APIs that especially
+provided for Symmetric HW Crypto PMD that provides fast data-path
+enqueue/dequeue operations. The direct data-path APIs take advantage of
+existing Cryptodev APIs for device, queue pairs, and session management. In
+addition the user are required to get the queue pair pointer data and function
+pointers. The APIs are provided as an advanced feature as an alternative
+to ``rte_cryptodev_enqueue_burst`` and ``rte_cryptodev_dequeue_burst``. The
+APIs are designed for the user to develop close-to-native performance symmetric
+crypto data-path implementation for their applications that do not necessarily
+depend on cryptodev operations and cryptodev operation mempools, or mbufs.
+
+Cryptodev PMDs who supports this feature will have
+``RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API`` feature flag presented. The user uses
+``rte_cryptodev_sym_get_hw_ops`` function call to get all the function pointers
+for different enqueue and dequeue operations, plus the device specific
+queue pair data. After the ``rte_crypto_hw_ops`` structure is properly set by
+the driver, the user can use the function pointers and the queue data pointers
+in the structure to enqueue and dequeue crypto jobs.
+
+To simply the enqueue APIs a symmetric job structure is defined:
+
+.. code-block:: c
+
+ /**
+ * Asynchronous operation job descriptor.
+ * Used by HW crypto devices direct API call that supports such activity
+ **/
+ struct rte_crypto_sym_job {
+ union {
+ /**
+ * When RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL bit is set in flags, sgl
+ * field is used as input data. Otherwise data_iova is
+ * used.
+ **/
+ rte_iova_t data_iova;
+ struct rte_crypto_sgl *sgl;
+ };
+ union {
+ /**
+ * Different than cryptodev ops, all ofs and len fields have
+ * the unit of bytes (including Snow3G/Kasumi/Zuc.
+ **/
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ } cipher_only;
+ struct {
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } auth_only;
+ struct {
+ uint32_t aead_ofs;
+ uint32_t aead_len;
+ rte_iova_t tag_iova;
+ uint8_t *aad;
+ rte_iova_t aad_iova;
+ } aead;
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } chain;
+ };
+ uint8_t *iv;
+ rte_iova_t iv_iova;
+ };
+
+Different than Cryptodev operation, the ``rte_crypto_sym_job`` structure
+focuses only on the data field required for crypto PMD to execute a single job,
+and is not supposed stored as opaque data. The user can freely allocate the
+structure buffer from stack and reuse it to fill all jobs.
+
+To use the direct symmetric crypto APIs safely, the user has to carefully
+set the correct fields in rte_crypto_sym_job structure, otherwise the
+application or the system may crash. Also there are a few limitations to the
+direct symmetric crypto APIs:
+
+* Only support in-place operations.
+* APIs are NOT thread-safe.
+* CANNOT mix the direct API's enqueue with rte_cryptodev_enqueue_burst, or
+ vice versa.
+
+The following sample code shows how to use Cryptodev direct API to process a
+user defined frame with maximum 32 buffers with AES-CBC and HMAC-SHA chained
+algorithm of a frame defined by user.
+
+See *DPDK API Reference* for details on each API definitions.
+
+.. code-block:: c
+
+ #include <rte_cryptodev.h>
+
+ #define FRAME_ELT_OK 0
+ #define FRAME_ELT_FAIL 1
+ #define FRAME_OK 0
+ #define FRAME_SOME_ELT_ERROR 1
+ #define FRAME_SIZE 32
+
+ /* Sample frame element struct */
+ struct sample_frame_elt {
+ /* The status field of frame element */
+ uint8_t status;
+ /* Pre-created and initialized cryptodev session */
+ struct rte_cryptodev_sym_session *session;
+ union {
+ __rte_iova_t data;
+ struct rte_crypto_sgl sgl;
+ };
+ uint32_t data_len;
+ __rte_iova_t digest;
+ uint8_t *iv;
+ uint8_t is_sgl;
+ };
+
+ /* Sample frame struct to describe up to 32 crypto jobs */
+ struct sample_frame {
+ struct sample_frame_elt elts[FRAME_SIZE]; /**< All frame elements */
+ uint32_t n_elts; /**< Number of elements */
+ };
+
+ /* Global Cryptodev Direct API structure */
+ static struct rte_crypto_hw_ops hw_ops;
+
+ /* Initialization */
+ static int
+ frame_operation_init(
+ uint8_t cryptodev_id, /**< Initialized cryptodev ID */
+ uint16_t qp_id /**< Initialized queue pair ID */)
+ {
+ /* Get APIs */
+ ret = rte_cryptodev_sym_get_hw_ops(cryptodev_id, qp_id, &hw_ops);
+ /* If the device does not support this feature or queue pair is not
+ initialized, return -1 */
+ if (!ret)
+ return -1;
+ return 0;
+ }
+
+ /* Frame enqueue function use direct AES-CBC-* + HMAC-SHA* API */
+ static int
+ enqueue_frame_to_direct_api(
+ struct sample_frame *frame /**< Initialized user frame struct */)
+ {
+ struct rte_crypto_hw_ops hw_ops;
+ struct rte_crypto_sym_job job;
+ uint64_t drv_data, flags = 0;
+ uint32_t i;
+ int ret;
+
+ /* Fill all sample frame element data into HW queue pair */
+ for (i = 0; i < frame->n_elts; i++) {
+ struct sample_frame_elt *fe = &frame->elts[i];
+ int ret;
+
+ /* if it is the first element in the frame, set FIRST flag to
+ let the driver to know it is first frame and fill drv_data. */
+ if (i == 0)
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_START;
+ else
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_START;
+
+ /* if it is the last element in the frame, write LAST flag to
+ kick HW queue */
+ if (i == frame->n_elts - 1)
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_LAST;
+ else
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_LAST;
+
+ /* Fill the job data with frame element data */
+ if (fe->is_sgl != 0) {
+ /* The buffer is a SGL buffer */
+ job.sgl = &frame->sgl;
+ /* Set SGL flag */
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ } else {
+ job.data_iova = fe->data;
+ /* Unset SGL flag in the job */
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ }
+
+ job.chain.cipher_ofs = job.chain.auth_ofs = 0;
+ job.chain.cipher_len = job.chain.auth_len = fe->data_len;
+ job.chain.digest_iova = fe->digest;
+
+ job.iv = fe->iv;
+
+ /* Call direct data-path enqueue chaining op API */
+ ret = hw_ops.enqueue_chain(hw_ops.qp, fe->session, &job,
+ (void *frame), &drv_data, flags);
+ /**
+ * In case one element is failed to be enqueued, simply abandon
+ * enqueuing the whole frame.
+ **/
+ if (!ret)
+ return -1;
+
+ /**
+ * To this point the frame is enqueued. The job buffer can be
+ * safely reused for enqueuing next frame element.
+ **/
+ }
+
+ return 0;
+ }
+
+ /**
+ * Sample function to write frame element status field based on
+ * driver returned operation result. The function return and parameter
+ * should follow the prototype rte_crpyto_hw_user_post_deq_cb_fn() in
+ * rte_cryptodev.h
+ **/
+ static __rte_always_inline void
+ write_frame_elt_status(void *data, uint32_t index, uint8_t is_op_success)
+ {
+ struct sample_frame *frame = data;
+ frame->elts[index + 1].status = is_op_success ? FRAME_ELT_OK :
+ FRAME_ELT_FAIL;
+ }
+
+ /* Frame dequeue function use direct dequeue API */
+ static struct sample_frame *
+ dequeue_frame_with_direct_api(void)
+ {
+ struct sample_frame *ret_frame;
+ uint64_t flags, drv_data;
+ uint32_t n, n_fail, n_fail_first = 0;
+ int ret;
+
+ /* Dequeue first job, which should have frame data stored in opaque */
+ flags = RTE_CRYPTO_HW_DEQ_FLAG_START;
+ ret_frame = hw_ops.dequeue_one(hw_ops.qp, &drv_data, flags, &ret);
+ if (ret == 0) {
+ /* ret == 0, means it is still under processing */
+ return NULL;
+ } else if (ret == 1) {
+ /* ret_frame is successfully retrieved, the ret stores the
+ operation result */
+ ret_frame->elts[0].status = FRAME_ELT_OK;
+ } else {
+ ret_frame->elts[0].status = FRAME_ELT_FAIL;
+ n_fail_first = 1;
+ }
+
+ /* Query if n_elts has been processed, if not return NULL */
+ if (!hw_ops.query_processed(hw_ops.qp, frame->n_elts))
+ return NULL;
+
+ /* We are sure all elements have been processed, dequeue them all */
+ flag = 0;
+ ret = hw_ops.dequeue_many(hw_ops.qp, &drv_data, (void *)ret_frame,
+ write_frame_elt_status, ret_frame->n_elts - 1, flag, &n_fail);
+
+ if (n_fail + n_fail_first > 0)
+ ret_frame->status = FRAME_SOME_ELT_ERROR;
+ else
+ ret_frame->status = FRAME_OK;
+
+ return ret_frame;
+ }
+
Asymmetric Cryptography
-----------------------
diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
index 39064afbe..eb973693d 100644
--- a/doc/guides/rel_notes/release_20_08.rst
+++ b/doc/guides/rel_notes/release_20_08.rst
@@ -56,6 +56,14 @@ New Features
Also, make sure to start the actual text at the margin.
=========================================================
+ * **Add Cryptodev data-path APIs for no mbuf-centric data-path.**
+
+ Cryptodev is added a set of data-path APIs that are not based on
+ cryptodev operations. The APIs are designed for external applications
+ or libraries that want to use cryptodev but their data-path
+ implementations are not mbuf-centric. QAT Symmetric PMD is also updated
+ to add the support to this API.
+
Removed Items
-------------
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto " Fan Zhang
` (3 preceding siblings ...)
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
@ 2020-07-03 11:09 ` Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 1/4] " Fan Zhang
` (7 more replies)
4 siblings, 8 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 11:09 UTC (permalink / raw)
To: dev
Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang,
Piotr Bronowski
This patch adds symmetric crypto data-path APIs for Cryptodev. Direct
symmetric crypto data-path APIs are a set of APIs that provide
more HW friendly enqueue/dequeue data-path functions as an alternative
approach to ``rte_cryptodev_enqueue_burst`` and
``rte_cryptodev_dequeue_burst``. The APIs are designed for external
libraries/applications that want to use Cryptodev as symmetric crypto
data-path accelerator but not necessarily mbuf data-path centric. With
the APIs the cycle cost spent on conversion from their data structure to
DPDK cryptodev operations/mbufs can be reduced, and the dependency on DPDK
crypto operation mempool can be relieved.
It is expected that the user can develop close-to-native performance
symmetric crypto data-path implementations with the functions provided
in this patchset.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
v3:
- Instead of QAT only API, moved the API to cryptodev
- Added cryptodev feature flags.
v2:
- Used a structure to simplify parameters.
- Added unit tests.
- Added documentation.
Fan Zhang (4):
cryptodev: add symmetric crypto data-path APIs
crypto/qat: add support to direct data-path APIs
test/crypto: add unit-test for cryptodev direct APIs
doc: add cryptodev direct APIs guide
app/test/test_cryptodev.c | 353 +++++++++-
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 +-
doc/guides/prog_guide/cryptodev_lib.rst | 266 +++++++
doc/guides/rel_notes/release_20_08.rst | 8 +
drivers/common/qat/Makefile | 2 +
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_job.c | 661 ++++++++++++++++++
drivers/crypto/qat/qat_sym_job.h | 12 +
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
lib/librte_cryptodev/rte_crypto_sym.h | 48 ++
lib/librte_cryptodev/rte_cryptodev.c | 22 +
lib/librte_cryptodev/rte_cryptodev.h | 173 ++++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 12 +-
.../rte_cryptodev_version.map | 4 +
18 files changed, 1587 insertions(+), 46 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_job.c
create mode 100644 drivers/crypto/qat/qat_sym_job.h
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 1/4] cryptodev: add symmetric crypto data-path APIs
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
@ 2020-07-03 11:09 ` Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
` (6 subsequent siblings)
7 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 11:09 UTC (permalink / raw)
To: dev
Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang,
Piotr Bronowski
This patch adds data-path APIs to cryptodev. The APIs are organized as
a data structure containing function pointers for different enqueue and
dequeue operations. An added public API is added to obtain the function
pointers and necessary queue pair data from the device queue pair.
This patch depends on patch-72157 ("cryptodev: add function to check
if qp was setup")
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
---
lib/librte_cryptodev/rte_crypto_sym.h | 48 +++++
lib/librte_cryptodev/rte_cryptodev.c | 22 +++
lib/librte_cryptodev/rte_cryptodev.h | 173 +++++++++++++++++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 12 +-
.../rte_cryptodev_version.map | 4 +
5 files changed, 255 insertions(+), 4 deletions(-)
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index da961a19d..e237e3cfa 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -87,6 +87,54 @@ union rte_crypto_sym_ofs {
} ofs;
};
+
+/**
+ * Asynchronous operation job descriptor.
+ * Used by HW crypto devices direct API call that supports such activity
+ **/
+struct rte_crypto_sym_job {
+ union {
+ /**
+ * When RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL bit is set in flags, sgl
+ * field is used as input data. Otherwise data_iova is
+ * used.
+ **/
+ rte_iova_t data_iova;
+ struct rte_crypto_sgl *sgl;
+ };
+ union {
+ /**
+ * Different than cryptodev ops, all ofs and len fields have
+ * the unit of bytes (including Snow3G/Kasumi/Zuc.
+ **/
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ } cipher_only;
+ struct {
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } auth_only;
+ struct {
+ uint32_t aead_ofs;
+ uint32_t aead_len;
+ rte_iova_t tag_iova;
+ uint8_t *aad;
+ rte_iova_t aad_iova;
+ } aead;
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } chain;
+ };
+ uint8_t *iv;
+ rte_iova_t iv_iova;
+};
+
/** Symmetric Cipher Algorithms */
enum rte_crypto_cipher_algorithm {
RTE_CRYPTO_CIPHER_NULL = 1,
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 705387b8b..5d5f84e27 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1866,6 +1866,28 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
}
+int
+rte_cryptodev_sym_get_hw_ops(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_hw_ops *hw_ops)
+{
+ struct rte_cryptodev *dev;
+
+ if (!hw_ops)
+ return -EINVAL;
+
+ memset(hw_ops, 0, sizeof(*hw_ops));
+
+ if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
+ return -EINVAL;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API) ||
+ *dev->dev_ops->sym_get_hw_ops == NULL)
+ return -ENOTSUP;
+
+ return dev->dev_ops->sym_get_hw_ops(dev, qp_id, hw_ops);
+}
+
/** Initialise rte_crypto_op mempool element */
static void
rte_crypto_op_init(struct rte_mempool *mempool,
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index d01a65825..7cd2095d7 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -466,7 +466,8 @@ rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
/**< Support symmetric session-less operations */
#define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23)
/**< Support operations on data which is not byte aligned */
-
+#define RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API (1ULL << 24)
+/**< Support hardware accelerator specific raw data as input */
/**
* Get the name of a crypto device feature flag
@@ -737,7 +738,7 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
* - 1: qp was configured
* - -ENODEV: device was not configured
*/
-int
+__rte_experimental int
rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
/**
@@ -1348,6 +1349,174 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
struct rte_crypto_sym_vec *vec);
+
+/* HW direct symmetric crypto data-path APIs */
+
+/* Bit-masks used for enqueuing job */
+#define RTE_CRYPTO_HW_ENQ_FLAG_START (1ULL << 0)
+/**< Bit-mask to indicate the first job in a burst. With this bit set the
+ * driver may write but not read the drv_data buffer, otherwise the driver
+ * shall read and update the drv_data.
+ */
+#define RTE_CRYPTO_HW_ENQ_FLAG_SET_OPAQUE (1ULL << 1)
+/**< Bit-mask to indicate write opaque pointer into HW crypto descriptor. */
+#define RTE_CRYPTO_HW_ENQ_FLAG_END (1ULL << 2)
+/**< Bit-mask to indicate the last job in a burst. With this bit set the
+ * driver may read but not write the drv_data buffer, and kick the HW to
+ * start processing all jobs written.
+ */
+#define RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL (1ULL << 3)
+/**< Bit-mask to indicate the input job is an SGL buffer */
+
+/* Bit-masks used for dequeuing job */
+#define RTE_CRYPTO_HW_DEQ_FLAG_START (1ULL << 0)
+/**< Bit-mask to indicate the first job to be dequeued. With this bit set the
+ * driver may write but not read the drv_data buffer, otherwise the driver
+ * shall read and update the drv_data.
+ */
+#define RTE_CRYPTO_HW_DEQ_FLAG_EXHAUST (1ULL << 1)
+/**< Bit-mask to indicate dequeuing as many as n jobs in dequeue-many function.
+ * Without this bit once the driver found out the ready-to-dequeue jobs are
+ * not as many as n, it shall stop immediate, leave all processed jobs in the
+ * queue, and return the ready jobs in negative. With this bit set the
+ * function shall continue dequeue all done jobs and return the dequeued
+ * job count in positive.
+ */
+
+/**
+ * Typedef for HW direct data-path enqueue callback function.
+ *
+ * @param qp Queue pair data.
+ * @param sess Cryptodev session.
+ * @param job Job data.
+ * @param opaque Opaque data to be written to queue descriptor
+ * when RTE_CRYPTO_HW_ENQ_SET_OPAQUE is
+ * set.
+ * @param drv_data User created temporary driver data for the
+ * driver to store and update data used between
+ * adjacent enqueues operations.
+ * @param flags Bitmask of RTE_CRYPTO_HW_ENQ_* flags
+ * @return
+ * - On success return 0
+ * - On fail return -1
+ **/
+typedef int (*rte_crypto_hw_enq_cb_fn)(void *qp,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags);
+
+/**
+ * Typedef for HW direct data-path dequeue one job callback function.
+ *
+ * @param qp Queue pair data.
+ * @param drv_data User created temporary driver data for the
+ * driver to store and update data used between
+ * adjacent enqueues operations.
+ * @param flags Bitmask of RTE_CRYPTO_HW_DEQ_* flags
+ * @param status The buffer for the driver to write operation
+ * status.
+ * @return
+ * - On success return the opaque data user write in enqueue (if any) and
+ * - status written as 1 when operation is successful.
+ * - status written as -1 when operation is failed (e.g. bad MAC)
+ * - On fail return NULL and status written as 0 when operation is still
+ * under processing.
+ **/
+typedef void * (*rte_crypto_hw_deq_one_cb_fn)(void *qp, uint64_t *drv_data,
+ uint64_t flags, int *status);
+
+/**
+ * Typedef that the user provided to deal with jobs' status when
+ * dequeue in a bulk.
+ *
+ * @param data User provided data.
+ * @param index Index number of the processed job.
+ * @param is_op_success Driver filled operation status.
+ **/
+typedef void (*rte_crpyto_hw_user_post_deq_cb_fn)(void *data, uint32_t index,
+ uint8_t is_op_success);
+
+/**
+ * Typedef for HW direct data-path dequeue bulk jobs callback function.
+ *
+ * @param qp Queue pair data.
+ * @param drv_data User created temporary driver data for the
+ * driver to store and update data used between
+ * adjacent enqueues operations.
+ * @param user_data User provided data to be passed into cb
+ * function.
+ * @param cb User provided callback functions to deal with
+ * driver returned job status.
+ * @param n The number of expected jobs to be dequeued.
+ * @param flags Bitmask of RTE_CRYPTO_HW_DEQ_* flags
+ * @param n_fail The buffer for driver to write the number of
+ * failed jobs.
+ * @return
+ * - Return the number of dequeued jobs.
+ **/
+typedef uint32_t (*rte_crypto_hw_deq_many_cb_fn)(void *qp, uint64_t *drv_data,
+ void *user_data, rte_crpyto_hw_user_post_deq_cb_fn cb,
+ uint32_t n, uint64_t flags, uint32_t *n_fail);
+/**
+ * Typedef for querying HW the number of processed jobs.
+ *
+ * @param qp Queue pair data.
+ * @param nb_jobs The expected processed job number.
+ * @return
+ * - If the nb_jobs ready, return 1.
+ * - Otherwise return 0.
+ **/
+typedef int (*rte_crypto_hw_query_processed)(void *qp, uint32_t nb_jobs);
+
+/* Struct for user to perform HW specific enqueue/dequeue function calls */
+struct rte_crypto_hw_ops {
+ /* Driver written queue pair data pointer, should NOT be alterred by
+ * the user.
+ */
+ void *qp;
+ /* Function handler to enqueue AEAD job */
+ rte_crypto_hw_enq_cb_fn enqueue_aead;
+ /* Function handler to enqueue cipher only job */
+ rte_crypto_hw_enq_cb_fn enqueue_cipher;
+ /* Function handler to enqueue auth only job */
+ rte_crypto_hw_enq_cb_fn enqueue_auth;
+ /* Function handler to enqueue cipher + hash chaining job */
+ rte_crypto_hw_enq_cb_fn enqueue_chain;
+ /* Function handler to query processed jobs */
+ rte_crypto_hw_query_processed query_processed;
+ /* Function handler to dequeue one job and return opaque data stored */
+ rte_crypto_hw_deq_one_cb_fn dequeue_one;
+ /* Function handler to dequeue many jobs */
+ rte_crypto_hw_deq_many_cb_fn dequeue_many;
+ /* Reserved */
+ void *reserved[8];
+};
+
+/**
+ * Get the symmetric crypto hardware ops function pointers and queue pair data.
+ *
+ * @param dev_id The device identifier.
+ * @param qp_id The index of the queue pair from which to retrieve
+ * processed packets. The value must be in the range
+ * [0, nb_queue_pair - 1] previously supplied to
+ * rte_cryptodev_configure().
+ * @param hw_ops User provided rte_crypto_hw_ops buffer.
+ *
+ * @return
+ * - On success hw_ops will be written the HW crypto device's queue pair data
+ * and function pointers for data enqueue/dequeue.
+ * - On fail hw_ops is cleared and negative integer is returned.
+ */
+__rte_experimental
+int
+rte_cryptodev_sym_get_hw_ops(
+ uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_hw_ops *hw_ops);
+int
+rte_cryptodev_sym_get_hw_ops(
+ uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_hw_ops *hw_ops);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 81975d72b..28f75d1da 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -316,6 +316,10 @@ typedef uint32_t (*cryptodev_sym_cpu_crypto_process_t)
(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess,
union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec);
+struct rte_crypto_hw_ops;
+
+typedef int (*cryptodev_sym_hw_get_ops_t)(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops);
/** Crypto device operations function pointer table */
struct rte_cryptodev_ops {
@@ -348,8 +352,12 @@ struct rte_cryptodev_ops {
/**< Clear a Crypto sessions private data. */
cryptodev_asym_free_session_t asym_session_clear;
/**< Clear a Crypto sessions private data. */
- cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
- /**< process input data synchronously (cpu-crypto). */
+ union {
+ cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
+ /**< process input data synchronously (cpu-crypto). */
+ cryptodev_sym_hw_get_ops_t sym_get_hw_ops;
+ /**< Get HW crypto data-path call back functions and data */
+ };
};
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index 07a2d2f02..56f5684c8 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -85,6 +85,7 @@ EXPERIMENTAL {
rte_cryptodev_sym_session_set_user_data;
rte_crypto_asym_op_strings;
rte_crypto_asym_xform_strings;
+ rte_cryptodev_get_qp_status;
# added in 20.05
__rte_cryptodev_trace_configure;
@@ -103,4 +104,7 @@ EXPERIMENTAL {
__rte_cryptodev_trace_asym_session_clear;
__rte_cryptodev_trace_dequeue_burst;
__rte_cryptodev_trace_enqueue_burst;
+
+ # added in 20.08
+ rte_cryptodev_sym_get_hw_ops;
};
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct data-path APIs
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 1/4] " Fan Zhang
@ 2020-07-03 11:09 ` Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 2/4] " Fan Zhang
` (5 subsequent siblings)
7 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 11:09 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch add symmetric crypto data-path APIs support to QAT-SYM PMD.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
drivers/common/qat/Makefile | 2 +
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_job.c | 661 +++++++++++++++++++++++++++++++
drivers/crypto/qat/qat_sym_job.h | 12 +
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
8 files changed, 686 insertions(+), 5 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_job.c
create mode 100644 drivers/crypto/qat/qat_sym_job.h
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 28bd5668f..6655fd2bc 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -39,6 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
SRCS-y += qat_sym.c
SRCS-y += qat_sym_session.c
SRCS-y += qat_sym_pmd.c
+ SRCS-y += qat_sym_job.c
+
build_qat = yes
endif
endif
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 8e6dd04eb..06e2d8c8a 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
q->csr_tail = q->tail;
}
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
{
uint32_t old_head, new_head;
uint32_t max_head;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..8add1b049 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
void *op_cookie __rte_unused,
uint64_t *dequeue_err_count __rte_unused);
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
+
#endif /* _QAT_QP_H_ */
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index fc65923a7..8a3921293 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -13,6 +13,7 @@ if dep.found()
qat_sources += files('qat_sym_pmd.c',
'qat_sym.c',
'qat_sym_session.c',
+ 'qat_sym_job.c',
'qat_asym_pmd.c',
'qat_asym.c')
qat_ext_deps += dep
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 25b6dd5f4..609180d3f 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -336,7 +336,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
set_cipher_iv(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, qat_req);
-
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
diff --git a/drivers/crypto/qat/qat_sym_job.c b/drivers/crypto/qat/qat_sym_job.c
new file mode 100644
index 000000000..7c0913459
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.c
@@ -0,0 +1,661 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym.h"
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_qp.h"
+#include "qat_sym_job.h"
+
+static __rte_always_inline int
+qat_sym_frame_fill_sgl(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_sgl *sgl, uint32_t max_len)
+{
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sgl *list;
+ int64_t len = max_len;
+ uint32_t i;
+
+ if (!sgl)
+ return -EINVAL;
+ if (sgl->num < 2 || sgl->num > QAT_SYM_SGL_MAX_NUMBER || !sgl->vec)
+ return -EINVAL;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
+ list = (struct qat_sgl *)&cookie->qat_sgl_src;
+
+ for (i = 0; i < sgl->num && len > 0; i++) {
+ list->buffers[i].len = RTE_MIN(sgl->vec[i].len, len);
+ list->buffers[i].resrvd = 0;
+ list->buffers[i].addr = sgl->vec[i].iova;
+ len -= list->buffers[i].len;
+ }
+
+ if (unlikely(len > 0))
+ return -1;
+
+ list->num_bufs = i;
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ req->comn_mid.src_length = req->comn_mid.dst_length = 0;
+ return 0;
+}
+
+static __rte_always_inline void
+qat_sym_set_cipher_param(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ uint32_t cipher_ofs, uint32_t cipher_len)
+{
+ cipher_param->cipher_offset = cipher_ofs;
+ cipher_param->cipher_length = cipher_len;
+}
+
+static __rte_always_inline void
+qat_sym_set_auth_param(struct icp_qat_fw_la_auth_req_params *auth_param,
+ uint32_t auth_ofs, uint32_t auth_len,
+ rte_iova_t digest_iova, rte_iova_t aad_iova)
+{
+ auth_param->auth_off = auth_ofs;
+ auth_param->auth_len = auth_len;
+ auth_param->auth_res_addr = digest_iova;
+ auth_param->u1.aad_adr = aad_iova;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_aead(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len;
+ uint8_t aad_len_field_sz;
+ uint32_t msg_len_be;
+ rte_iova_t aad_iova;
+ uint8_t q;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->aead.aead_ofs + job->aead.aead_len;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ aad_iova = job->aead.aad_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ aad_data = job->aead.aad;
+ aad_iova = job->aead.aad_iova;
+ aad_ccm_real_len = 0;
+ aad_len_field_sz = 0;
+ msg_len_be = rte_bswap32(job->aead.aead_len);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ aad_data = job->iv;
+ aad_iova = job->iv_iova;
+ }
+
+ q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+ rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx], 0, pad_len);
+ }
+
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+ + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(job->aead.aad +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ qat_sym_set_cipher_param(cipher_param, job->aead.aead_ofs,
+ job->aead.aead_len);
+ qat_sym_set_auth_param(auth_param, job->aead.aead_ofs,
+ job->aead.aead_len, job->aead.tag_iova, aad_iova);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->aead.aead_ofs + job->aead.aead_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = aad_iova;
+ cipher_param->spc_auth_res_addr = job->aead.tag_iova;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+
+static __rte_always_inline int
+qat_sym_job_enqueue_cipher(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->cipher_only.cipher_ofs,
+ job->cipher_only.cipher_len);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_auth(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ auth_param = (void *)((uint8_t *)&req->serv_specif_rqpars +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->auth_only.auth_ofs + job->auth_only.auth_len;
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->auth_only.auth_ofs,
+ job->auth_only.auth_len, job->auth_only.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->auth_only.auth_ofs +
+ job->auth_only.auth_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_chain(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t min_ofs = RTE_MIN(job->chain.cipher_ofs, job->chain.auth_ofs);
+ uint32_t max_len = RTE_MAX(job->chain.cipher_len, job->chain.auth_len);
+ rte_iova_t auth_iova_end;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr =
+ req->comn_mid.dest_data_addr = job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length = min_ofs + max_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->chain.cipher_ofs,
+ job->chain.cipher_len);
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->chain.auth_ofs,
+ job->chain.auth_len, job->chain.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ uint32_t len = job->chain.auth_ofs +
+ job->chain.auth_len;
+ struct rte_crypto_vec *vec = job->sgl->vec;
+ int auth_end_get = 0;
+ while (len) {
+ if (len <= vec->len) {
+ auth_iova_end = vec->iova + len;
+ auth_end_get = 1;
+ break;
+ }
+ len -= vec->len;
+ vec++;
+ }
+ if (!auth_end_get) {
+ QAT_DP_LOG(ERR, "Failed to get auth end");
+ return -1;
+ }
+ } else
+ auth_iova_end = job->data_iova + job->chain.auth_ofs +
+ job->chain.auth_len;
+
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset +
+ cipher_param->cipher_length) &&
+ (job->chain.digest_iova == auth_iova_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset +
+ cipher_param->cipher_length <
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length)
+ req->comn_mid.dst_length =
+ req->comn_mid.src_length =
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header =
+ &req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ min_ofs + max_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+#define get_rx_queue_message_at_index(q, h, i) \
+ (void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
+ q->modulo_mask))
+
+static __rte_always_inline int
+qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
+{
+ return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status);
+}
+
+static __rte_always_inline int
+qat_sym_query_processed_jobs(void *qat_sym_qp, uint32_t nb_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head = (rx_queue->head + (nb_jobs - 1) * rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG)
+ return 0;
+
+ return 1;
+}
+
+static __rte_always_inline void *
+qat_sym_job_dequeue_one(void *qat_sym_qp, uint64_t *drv_data, uint64_t flags,
+ int *is_op_success)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head;
+ void *opaque;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ *is_op_success = 0;
+ return NULL;
+ }
+
+ if (unlikely(qat_is_rx_msg_ok(resp) == 0))
+ *is_op_success = -1;
+ else
+ *is_op_success = 1;
+
+ opaque = (void *)(uintptr_t)resp->opaque_data;
+
+ rx_queue->head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ rx_queue->nb_processed_responses++;
+ qp->dequeued++;
+ qp->stats.dequeued_count++;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+
+ return opaque;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_job_dequeue_n(void *qat_sym_qp, uint64_t *drv_data,
+ void *user_data, rte_crpyto_hw_user_post_deq_cb_fn cb,
+ uint32_t n, uint64_t flags, uint32_t *n_failed_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head, i;
+ uint32_t status, total_fail = 0;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ for (i = 0; i < n; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_EXHAUST)
+ break;
+ return -i;
+ }
+
+ status = qat_is_rx_msg_ok(resp);
+ total_fail += status;
+ cb(user_data, i, status);
+
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ }
+
+ rx_queue->head = head;
+ rx_queue->nb_processed_responses += i;
+ qp->dequeued += i;
+ qp->stats.dequeued_count += i;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+ *n_failed_jobs = total_fail;
+
+ return i;
+}
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp->service_type != QAT_SERVICE_SYMMETRIC)
+ return -EINVAL;
+
+ hw_ops->qp = (void *)qp;
+ hw_ops->enqueue_aead = qat_sym_job_enqueue_aead;
+ hw_ops->enqueue_cipher = qat_sym_job_enqueue_cipher;
+ hw_ops->enqueue_auth = qat_sym_job_enqueue_auth;
+ hw_ops->enqueue_chain = qat_sym_job_enqueue_chain;
+ hw_ops->dequeue_one = qat_sym_job_dequeue_one;
+ hw_ops->dequeue_many = qat_sym_job_dequeue_n;
+ hw_ops->query_processed = qat_sym_query_processed_jobs;
+
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_sym_job.h b/drivers/crypto/qat/qat_sym_job.h
new file mode 100644
index 000000000..b11aeb841
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_FRAME_H_
+#define _QAT_SYM_FRAME_H_
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops);
+
+#endif /* _QAT_SYM_FRAME_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index e887c880f..be9d73c0a 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -13,6 +13,7 @@
#include "qat_sym.h"
#include "qat_sym_session.h"
#include "qat_sym_pmd.h"
+#include "qat_sym_job.h"
#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
@@ -234,7 +235,8 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
/* Crypto related operations */
.sym_session_get_size = qat_sym_session_get_private_size,
.sym_session_configure = qat_sym_session_configure,
- .sym_session_clear = qat_sym_session_clear
+ .sym_session_clear = qat_sym_session_clear,
+ .sym_get_hw_ops = qat_sym_get_ops,
};
static uint16_t
@@ -308,7 +310,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
- RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
+ RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
+ RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API;
internals = cryptodev->data->dev_private;
internals->qat_dev = qat_pci_dev;
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 2/4] crypto/qat: add support to direct data-path APIs
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 1/4] " Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
@ 2020-07-03 11:09 ` Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
` (4 subsequent siblings)
7 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 11:09 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch add symmetric crypto data-path APIs support to QAT-SYM PMD.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
drivers/common/qat/Makefile | 2 +
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_job.c | 661 +++++++++++++++++++++++++++++++
drivers/crypto/qat/qat_sym_job.h | 12 +
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
8 files changed, 686 insertions(+), 5 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_job.c
create mode 100644 drivers/crypto/qat/qat_sym_job.h
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 28bd5668f..6655fd2bc 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -39,6 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
SRCS-y += qat_sym.c
SRCS-y += qat_sym_session.c
SRCS-y += qat_sym_pmd.c
+ SRCS-y += qat_sym_job.c
+
build_qat = yes
endif
endif
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 8e6dd04eb..06e2d8c8a 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
q->csr_tail = q->tail;
}
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
{
uint32_t old_head, new_head;
uint32_t max_head;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..8add1b049 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
void *op_cookie __rte_unused,
uint64_t *dequeue_err_count __rte_unused);
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
+
#endif /* _QAT_QP_H_ */
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index fc65923a7..8a3921293 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -13,6 +13,7 @@ if dep.found()
qat_sources += files('qat_sym_pmd.c',
'qat_sym.c',
'qat_sym_session.c',
+ 'qat_sym_job.c',
'qat_asym_pmd.c',
'qat_asym.c')
qat_ext_deps += dep
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 25b6dd5f4..609180d3f 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -336,7 +336,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
set_cipher_iv(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, qat_req);
-
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
diff --git a/drivers/crypto/qat/qat_sym_job.c b/drivers/crypto/qat/qat_sym_job.c
new file mode 100644
index 000000000..7c0913459
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.c
@@ -0,0 +1,661 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym.h"
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_qp.h"
+#include "qat_sym_job.h"
+
+static __rte_always_inline int
+qat_sym_frame_fill_sgl(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_sgl *sgl, uint32_t max_len)
+{
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sgl *list;
+ int64_t len = max_len;
+ uint32_t i;
+
+ if (!sgl)
+ return -EINVAL;
+ if (sgl->num < 2 || sgl->num > QAT_SYM_SGL_MAX_NUMBER || !sgl->vec)
+ return -EINVAL;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
+ list = (struct qat_sgl *)&cookie->qat_sgl_src;
+
+ for (i = 0; i < sgl->num && len > 0; i++) {
+ list->buffers[i].len = RTE_MIN(sgl->vec[i].len, len);
+ list->buffers[i].resrvd = 0;
+ list->buffers[i].addr = sgl->vec[i].iova;
+ len -= list->buffers[i].len;
+ }
+
+ if (unlikely(len > 0))
+ return -1;
+
+ list->num_bufs = i;
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ req->comn_mid.src_length = req->comn_mid.dst_length = 0;
+ return 0;
+}
+
+static __rte_always_inline void
+qat_sym_set_cipher_param(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ uint32_t cipher_ofs, uint32_t cipher_len)
+{
+ cipher_param->cipher_offset = cipher_ofs;
+ cipher_param->cipher_length = cipher_len;
+}
+
+static __rte_always_inline void
+qat_sym_set_auth_param(struct icp_qat_fw_la_auth_req_params *auth_param,
+ uint32_t auth_ofs, uint32_t auth_len,
+ rte_iova_t digest_iova, rte_iova_t aad_iova)
+{
+ auth_param->auth_off = auth_ofs;
+ auth_param->auth_len = auth_len;
+ auth_param->auth_res_addr = digest_iova;
+ auth_param->u1.aad_adr = aad_iova;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_aead(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len;
+ uint8_t aad_len_field_sz;
+ uint32_t msg_len_be;
+ rte_iova_t aad_iova;
+ uint8_t q;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->aead.aead_ofs + job->aead.aead_len;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ aad_iova = job->aead.aad_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ aad_data = job->aead.aad;
+ aad_iova = job->aead.aad_iova;
+ aad_ccm_real_len = 0;
+ aad_len_field_sz = 0;
+ msg_len_be = rte_bswap32(job->aead.aead_len);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ aad_data = job->iv;
+ aad_iova = job->iv_iova;
+ }
+
+ q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+ rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx], 0, pad_len);
+ }
+
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+ + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(job->aead.aad +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ qat_sym_set_cipher_param(cipher_param, job->aead.aead_ofs,
+ job->aead.aead_len);
+ qat_sym_set_auth_param(auth_param, job->aead.aead_ofs,
+ job->aead.aead_len, job->aead.tag_iova, aad_iova);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->aead.aead_ofs + job->aead.aead_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = aad_iova;
+ cipher_param->spc_auth_res_addr = job->aead.tag_iova;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+
+static __rte_always_inline int
+qat_sym_job_enqueue_cipher(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->cipher_only.cipher_ofs,
+ job->cipher_only.cipher_len);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_auth(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ auth_param = (void *)((uint8_t *)&req->serv_specif_rqpars +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->auth_only.auth_ofs + job->auth_only.auth_len;
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->auth_only.auth_ofs,
+ job->auth_only.auth_len, job->auth_only.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->auth_only.auth_ofs +
+ job->auth_only.auth_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_chain(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t min_ofs = RTE_MIN(job->chain.cipher_ofs, job->chain.auth_ofs);
+ uint32_t max_len = RTE_MAX(job->chain.cipher_len, job->chain.auth_len);
+ rte_iova_t auth_iova_end;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr =
+ req->comn_mid.dest_data_addr = job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length = min_ofs + max_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->chain.cipher_ofs,
+ job->chain.cipher_len);
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->chain.auth_ofs,
+ job->chain.auth_len, job->chain.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ uint32_t len = job->chain.auth_ofs +
+ job->chain.auth_len;
+ struct rte_crypto_vec *vec = job->sgl->vec;
+ int auth_end_get = 0;
+ while (len) {
+ if (len <= vec->len) {
+ auth_iova_end = vec->iova + len;
+ auth_end_get = 1;
+ break;
+ }
+ len -= vec->len;
+ vec++;
+ }
+ if (!auth_end_get) {
+ QAT_DP_LOG(ERR, "Failed to get auth end");
+ return -1;
+ }
+ } else
+ auth_iova_end = job->data_iova + job->chain.auth_ofs +
+ job->chain.auth_len;
+
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset +
+ cipher_param->cipher_length) &&
+ (job->chain.digest_iova == auth_iova_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset +
+ cipher_param->cipher_length <
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length)
+ req->comn_mid.dst_length =
+ req->comn_mid.src_length =
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header =
+ &req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ min_ofs + max_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+#define get_rx_queue_message_at_index(q, h, i) \
+ (void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
+ q->modulo_mask))
+
+static __rte_always_inline int
+qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
+{
+ return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status);
+}
+
+static __rte_always_inline int
+qat_sym_query_processed_jobs(void *qat_sym_qp, uint32_t nb_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head = (rx_queue->head + (nb_jobs - 1) * rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG)
+ return 0;
+
+ return 1;
+}
+
+static __rte_always_inline void *
+qat_sym_job_dequeue_one(void *qat_sym_qp, uint64_t *drv_data, uint64_t flags,
+ int *is_op_success)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head;
+ void *opaque;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ *is_op_success = 0;
+ return NULL;
+ }
+
+ if (unlikely(qat_is_rx_msg_ok(resp) == 0))
+ *is_op_success = -1;
+ else
+ *is_op_success = 1;
+
+ opaque = (void *)(uintptr_t)resp->opaque_data;
+
+ rx_queue->head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ rx_queue->nb_processed_responses++;
+ qp->dequeued++;
+ qp->stats.dequeued_count++;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+
+ return opaque;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_job_dequeue_n(void *qat_sym_qp, uint64_t *drv_data,
+ void *user_data, rte_crpyto_hw_user_post_deq_cb_fn cb,
+ uint32_t n, uint64_t flags, uint32_t *n_failed_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head, i;
+ uint32_t status, total_fail = 0;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ for (i = 0; i < n; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_EXHAUST)
+ break;
+ return -i;
+ }
+
+ status = qat_is_rx_msg_ok(resp);
+ total_fail += status;
+ cb(user_data, i, status);
+
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ }
+
+ rx_queue->head = head;
+ rx_queue->nb_processed_responses += i;
+ qp->dequeued += i;
+ qp->stats.dequeued_count += i;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+ *n_failed_jobs = total_fail;
+
+ return i;
+}
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp->service_type != QAT_SERVICE_SYMMETRIC)
+ return -EINVAL;
+
+ hw_ops->qp = (void *)qp;
+ hw_ops->enqueue_aead = qat_sym_job_enqueue_aead;
+ hw_ops->enqueue_cipher = qat_sym_job_enqueue_cipher;
+ hw_ops->enqueue_auth = qat_sym_job_enqueue_auth;
+ hw_ops->enqueue_chain = qat_sym_job_enqueue_chain;
+ hw_ops->dequeue_one = qat_sym_job_dequeue_one;
+ hw_ops->dequeue_many = qat_sym_job_dequeue_n;
+ hw_ops->query_processed = qat_sym_query_processed_jobs;
+
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_sym_job.h b/drivers/crypto/qat/qat_sym_job.h
new file mode 100644
index 000000000..b11aeb841
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_FRAME_H_
+#define _QAT_SYM_FRAME_H_
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops);
+
+#endif /* _QAT_SYM_FRAME_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index e887c880f..be9d73c0a 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -13,6 +13,7 @@
#include "qat_sym.h"
#include "qat_sym_session.h"
#include "qat_sym_pmd.h"
+#include "qat_sym_job.h"
#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
@@ -234,7 +235,8 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
/* Crypto related operations */
.sym_session_get_size = qat_sym_session_get_private_size,
.sym_session_configure = qat_sym_session_configure,
- .sym_session_clear = qat_sym_session_clear
+ .sym_session_clear = qat_sym_session_clear,
+ .sym_get_hw_ops = qat_sym_get_ops,
};
static uint16_t
@@ -308,7 +310,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
- RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
+ RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
+ RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API;
internals = cryptodev->data->dev_private;
internals->qat_dev = qat_pci_dev;
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
` (2 preceding siblings ...)
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 2/4] " Fan Zhang
@ 2020-07-03 11:09 ` Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
` (3 subsequent siblings)
7 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 11:09 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch adds the QAT test to use cryptodev symmetric crypto
direct APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
app/test/test_cryptodev.c | 353 ++++++++++++++++++++++++--
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 ++--
3 files changed, 372 insertions(+), 37 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 8f631468b..9fbbe1d6c 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -55,6 +55,8 @@ static int gbl_driver_id;
static enum rte_security_session_action_type gbl_action_type =
RTE_SECURITY_ACTION_TYPE_NONE;
+int qat_api_test;
+
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
struct rte_mempool *large_mbuf_pool;
@@ -142,6 +144,154 @@ ceil_byte_length(uint32_t num_bits)
return (num_bits >> 3);
}
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits)
+{
+ int32_t n;
+ struct rte_crypto_hw_ops hw_ops;
+ struct rte_crypto_op *op_ret;
+ struct rte_crypto_sym_op *sop;
+ struct rte_crypto_sym_job job;
+ struct rte_crypto_sgl sgl;
+ struct rte_crypto_vec vec[UINT8_MAX] = { {0} };
+ int ret;
+ uint32_t min_ofs = 0, max_len = 0;
+ uint64_t drv_data;
+ uint64_t flags = RTE_CRYPTO_HW_ENQ_FLAG_START |
+ RTE_CRYPTO_HW_ENQ_FLAG_END |
+ RTE_CRYPTO_HW_ENQ_FLAG_SET_OPAQUE;
+ enum {
+ cipher = 0,
+ auth,
+ chain,
+ aead
+ } qat_api_test_type;
+ uint32_t count = 0;
+
+ memset(&job, 0, sizeof(job));
+
+ ret = rte_cryptodev_sym_get_hw_ops(dev_id, qp_id, &hw_ops);
+ if (ret) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ return;
+ }
+
+ sop = op->sym;
+
+ if (is_cipher && is_auth) {
+ qat_api_test_type = chain;
+ min_ofs = RTE_MIN(sop->cipher.data.offset,
+ sop->auth.data.offset);
+ max_len = RTE_MAX(sop->cipher.data.length,
+ sop->auth.data.length);
+ } else if (is_cipher) {
+ qat_api_test_type = cipher;
+ min_ofs = sop->cipher.data.offset;
+ max_len = sop->cipher.data.length;
+ } else if (is_auth) {
+ qat_api_test_type = auth;
+ min_ofs = sop->auth.data.offset;
+ max_len = sop->auth.data.length;
+ } else { /* aead */
+ qat_api_test_type = aead;
+ min_ofs = sop->aead.data.offset;
+ max_len = sop->aead.data.length;
+ }
+
+ if (len_in_bits) {
+ max_len = max_len >> 3;
+ min_ofs = min_ofs >> 3;
+ }
+
+ n = rte_crypto_mbuf_to_vec(sop->m_src, min_ofs, max_len,
+ vec, RTE_DIM(vec));
+ if (n < 0 || n != sop->m_src->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ if (n > 1) {
+ sgl.vec = vec;
+ sgl.num = n;
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ job.sgl = &sgl;
+ } else
+ job.data_iova = rte_pktmbuf_iova(sop->m_src);
+
+
+ switch (qat_api_test_type) {
+ case aead:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.aead.aead_ofs = min_ofs;
+ job.aead.aead_len = max_len;
+ job.aead.aad = sop->aead.aad.data;
+ job.aead.aad_iova = sop->aead.aad.phys_addr;
+ job.aead.tag_iova = sop->aead.digest.phys_addr;
+ ret = hw_ops.enqueue_aead(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case cipher:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.cipher_only.cipher_ofs = min_ofs;
+ job.cipher_only.cipher_len = max_len;
+ ret = hw_ops.enqueue_cipher(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case auth:
+ job.auth_only.auth_ofs = min_ofs;
+ job.auth_only.auth_len = max_len;
+ job.auth_only.digest_iova = sop->auth.digest.phys_addr;
+ ret = hw_ops.enqueue_auth(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case chain:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.chain.cipher_ofs = sop->cipher.data.offset;
+ job.chain.cipher_len = sop->cipher.data.length;
+ if (len_in_bits) {
+ job.chain.cipher_len = job.chain.cipher_len >> 3;
+ job.chain.cipher_ofs = job.chain.cipher_ofs >> 3;
+ }
+ job.chain.auth_ofs = sop->auth.data.offset;
+ job.chain.auth_len = sop->auth.data.length;
+ if (len_in_bits) {
+ job.chain.auth_len = job.chain.auth_len >> 3;
+ job.chain.auth_ofs = job.chain.auth_ofs >> 3;
+ }
+ job.chain.digest_iova = sop->auth.digest.phys_addr;
+ ret = hw_ops.enqueue_chain(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ }
+
+ if (ret < 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ ret = 0;
+
+ while (ret == 0 && count++ < 1024) {
+ ret = hw_ops.query_processed(hw_ops.qp, 1);
+ if (!ret)
+ rte_delay_ms(1);
+ }
+ if (ret < 0 || count >= 1024) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ flags = RTE_CRYPTO_HW_DEQ_FLAG_START;
+ op_ret = hw_ops.dequeue_one(hw_ops.qp, &drv_data, flags, &ret);
+ if (op_ret != op || ret != 1)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
static void
process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
{
@@ -2451,7 +2601,11 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -2530,7 +2684,11 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2600,6 +2758,9 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata)
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_crypt_auth_op(ts_params->valid_devs[0],
ut_params->op);
+ else if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
else
ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
@@ -2671,7 +2832,11 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2878,8 +3043,12 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
- ut_params->op);
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -2964,7 +3133,11 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3287,7 +3460,11 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3362,7 +3539,11 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3737,7 +3918,11 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -3905,7 +4090,11 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4000,7 +4189,11 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4136,7 +4329,11 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4325,7 +4522,11 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4507,7 +4708,11 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4697,7 +4902,11 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4838,7 +5047,11 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4925,7 +5138,11 @@ test_zuc_encryption(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5012,7 +5229,11 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5100,7 +5321,11 @@ test_zuc_authentication(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5232,7 +5457,11 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5418,7 +5647,11 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -7024,6 +7257,9 @@ test_authenticated_encryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -7993,6 +8229,9 @@ test_authenticated_decryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -11284,6 +11523,9 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
if (oop == IN_PLACE &&
gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (oop == IN_PLACE && qat_api_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -13241,6 +13483,75 @@ test_cryptodev_nitrox(void)
return unit_test_suite_runner(&cryptodev_nitrox_testsuite);
}
+static struct unit_test_suite cryptodev_sym_direct_api_testsuite = {
+ .suite_name = "Crypto Sym direct API Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_cipheronly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_qat_sym_direct_api(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ int ret;
+
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both "
+ "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM "
+ "are enabled in config file to run this testsuite.\n");
+ return TEST_SKIPPED;
+ }
+
+ qat_api_test = 1;
+ ret = unit_test_suite_runner(&cryptodev_sym_direct_api_testsuite);
+ qat_api_test = 0;
+
+ return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_qat_sym_api_autotest, test_qat_sym_direct_api);
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_mb_autotest,
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 41542e055..2854115aa 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -71,6 +71,8 @@
#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
#define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
+extern int qat_api_test;
+
/**
* Write (spread) data from buffer to mbuf data
*
@@ -209,4 +211,8 @@ create_segmented_mbuf(struct rte_mempool *mbuf_pool, int pkt_len,
return NULL;
}
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits);
+
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_blockcipher.c b/app/test/test_cryptodev_blockcipher.c
index 642b54971..dfa74a449 100644
--- a/app/test/test_cryptodev_blockcipher.c
+++ b/app/test/test_cryptodev_blockcipher.c
@@ -461,25 +461,43 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
/* Process crypto operation */
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Error sending packet for encryption");
- status = TEST_FAILED;
- goto error_exit;
- }
+ if (qat_api_test) {
+ uint8_t is_cipher = 0, is_auth = 0;
+
+ if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
+ RTE_LOG(DEBUG, USER1,
+ "QAT direct API does not support OOP, Test Skipped.\n");
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "SKIPPED");
+ status = TEST_SUCCESS;
+ goto error_exit;
+ }
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
+ is_cipher = 1;
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
+ is_auth = 1;
+
+ process_sym_hw_api_op(dev_id, 0, op, is_cipher, is_auth, 0);
+ } else {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for encryption");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
- op = NULL;
+ op = NULL;
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
- rte_pause();
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
+ rte_pause();
- if (!op) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Failed to process sym crypto op");
- status = TEST_FAILED;
- goto error_exit;
+ if (!op) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process sym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
}
debug_hexdump(stdout, "m_src(after):",
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
` (3 preceding siblings ...)
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
@ 2020-07-03 11:09 ` Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
` (2 subsequent siblings)
7 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 11:09 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch updates programmer's guide to demonstrate the usage
and limitations of cryptodev symmetric crypto data-path APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
doc/guides/prog_guide/cryptodev_lib.rst | 266 ++++++++++++++++++++++++
doc/guides/rel_notes/release_20_08.rst | 8 +
2 files changed, 274 insertions(+)
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index c14f750fa..9900a593a 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -861,6 +861,272 @@ using one of the crypto PMDs available in DPDK.
num_dequeued_ops);
} while (total_num_dequeued_ops < num_enqueued_ops);
+Cryptodev Direct Symmetric Crypto Data-path APIs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Direct symmetric crypto data-path APIs are a set of APIs that especially
+provided for Symmetric HW Crypto PMD that provides fast data-path
+enqueue/dequeue operations. The direct data-path APIs take advantage of
+existing Cryptodev APIs for device, queue pairs, and session management. In
+addition the user are required to get the queue pair pointer data and function
+pointers. The APIs are provided as an advanced feature as an alternative
+to ``rte_cryptodev_enqueue_burst`` and ``rte_cryptodev_dequeue_burst``. The
+APIs are designed for the user to develop close-to-native performance symmetric
+crypto data-path implementation for their applications that do not necessarily
+depend on cryptodev operations and cryptodev operation mempools, or mbufs.
+
+Cryptodev PMDs who supports this feature will have
+``RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API`` feature flag presented. The user uses
+``rte_cryptodev_sym_get_hw_ops`` function call to get all the function pointers
+for different enqueue and dequeue operations, plus the device specific
+queue pair data. After the ``rte_crypto_hw_ops`` structure is properly set by
+the driver, the user can use the function pointers and the queue data pointers
+in the structure to enqueue and dequeue crypto jobs.
+
+To simply the enqueue APIs a symmetric job structure is defined:
+
+.. code-block:: c
+
+ /**
+ * Asynchronous operation job descriptor.
+ * Used by HW crypto devices direct API call that supports such activity
+ **/
+ struct rte_crypto_sym_job {
+ union {
+ /**
+ * When RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL bit is set in flags, sgl
+ * field is used as input data. Otherwise data_iova is
+ * used.
+ **/
+ rte_iova_t data_iova;
+ struct rte_crypto_sgl *sgl;
+ };
+ union {
+ /**
+ * Different than cryptodev ops, all ofs and len fields have
+ * the unit of bytes (including Snow3G/Kasumi/Zuc.
+ **/
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ } cipher_only;
+ struct {
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } auth_only;
+ struct {
+ uint32_t aead_ofs;
+ uint32_t aead_len;
+ rte_iova_t tag_iova;
+ uint8_t *aad;
+ rte_iova_t aad_iova;
+ } aead;
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } chain;
+ };
+ uint8_t *iv;
+ rte_iova_t iv_iova;
+ };
+
+Different than Cryptodev operation, the ``rte_crypto_sym_job`` structure
+focuses only on the data field required for crypto PMD to execute a single job,
+and is not supposed stored as opaque data. The user can freely allocate the
+structure buffer from stack and reuse it to fill all jobs.
+
+To use the direct symmetric crypto APIs safely, the user has to carefully
+set the correct fields in rte_crypto_sym_job structure, otherwise the
+application or the system may crash. Also there are a few limitations to the
+direct symmetric crypto APIs:
+
+* Only support in-place operations.
+* APIs are NOT thread-safe.
+* CANNOT mix the direct API's enqueue with rte_cryptodev_enqueue_burst, or
+ vice versa.
+
+The following sample code shows how to use Cryptodev direct API to process a
+user defined frame with maximum 32 buffers with AES-CBC and HMAC-SHA chained
+algorithm of a frame defined by user.
+
+See *DPDK API Reference* for details on each API definitions.
+
+.. code-block:: c
+
+ #include <rte_cryptodev.h>
+
+ #define FRAME_ELT_OK 0
+ #define FRAME_ELT_FAIL 1
+ #define FRAME_OK 0
+ #define FRAME_SOME_ELT_ERROR 1
+ #define FRAME_SIZE 32
+
+ /* Sample frame element struct */
+ struct sample_frame_elt {
+ /* The status field of frame element */
+ uint8_t status;
+ /* Pre-created and initialized cryptodev session */
+ struct rte_cryptodev_sym_session *session;
+ union {
+ __rte_iova_t data;
+ struct rte_crypto_sgl sgl;
+ };
+ uint32_t data_len;
+ __rte_iova_t digest;
+ uint8_t *iv;
+ uint8_t is_sgl;
+ };
+
+ /* Sample frame struct to describe up to 32 crypto jobs */
+ struct sample_frame {
+ struct sample_frame_elt elts[FRAME_SIZE]; /**< All frame elements */
+ uint32_t n_elts; /**< Number of elements */
+ };
+
+ /* Global Cryptodev Direct API structure */
+ static struct rte_crypto_hw_ops hw_ops;
+
+ /* Initialization */
+ static int
+ frame_operation_init(
+ uint8_t cryptodev_id, /**< Initialized cryptodev ID */
+ uint16_t qp_id /**< Initialized queue pair ID */)
+ {
+ /* Get APIs */
+ ret = rte_cryptodev_sym_get_hw_ops(cryptodev_id, qp_id, &hw_ops);
+ /* If the device does not support this feature or queue pair is not
+ initialized, return -1 */
+ if (!ret)
+ return -1;
+ return 0;
+ }
+
+ /* Frame enqueue function use direct AES-CBC-* + HMAC-SHA* API */
+ static int
+ enqueue_frame_to_direct_api(
+ struct sample_frame *frame /**< Initialized user frame struct */)
+ {
+ struct rte_crypto_hw_ops hw_ops;
+ struct rte_crypto_sym_job job;
+ uint64_t drv_data, flags = 0;
+ uint32_t i;
+ int ret;
+
+ /* Fill all sample frame element data into HW queue pair */
+ for (i = 0; i < frame->n_elts; i++) {
+ struct sample_frame_elt *fe = &frame->elts[i];
+ int ret;
+
+ /* if it is the first element in the frame, set FIRST flag to
+ let the driver to know it is first frame and fill drv_data. */
+ if (i == 0)
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_START;
+ else
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_START;
+
+ /* if it is the last element in the frame, write LAST flag to
+ kick HW queue */
+ if (i == frame->n_elts - 1)
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_LAST;
+ else
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_LAST;
+
+ /* Fill the job data with frame element data */
+ if (fe->is_sgl != 0) {
+ /* The buffer is a SGL buffer */
+ job.sgl = &frame->sgl;
+ /* Set SGL flag */
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ } else {
+ job.data_iova = fe->data;
+ /* Unset SGL flag in the job */
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ }
+
+ job.chain.cipher_ofs = job.chain.auth_ofs = 0;
+ job.chain.cipher_len = job.chain.auth_len = fe->data_len;
+ job.chain.digest_iova = fe->digest;
+
+ job.iv = fe->iv;
+
+ /* Call direct data-path enqueue chaining op API */
+ ret = hw_ops.enqueue_chain(hw_ops.qp, fe->session, &job,
+ (void *frame), &drv_data, flags);
+ /**
+ * In case one element is failed to be enqueued, simply abandon
+ * enqueuing the whole frame.
+ **/
+ if (!ret)
+ return -1;
+
+ /**
+ * To this point the frame is enqueued. The job buffer can be
+ * safely reused for enqueuing next frame element.
+ **/
+ }
+
+ return 0;
+ }
+
+ /**
+ * Sample function to write frame element status field based on
+ * driver returned operation result. The function return and parameter
+ * should follow the prototype rte_crpyto_hw_user_post_deq_cb_fn() in
+ * rte_cryptodev.h
+ **/
+ static __rte_always_inline void
+ write_frame_elt_status(void *data, uint32_t index, uint8_t is_op_success)
+ {
+ struct sample_frame *frame = data;
+ frame->elts[index + 1].status = is_op_success ? FRAME_ELT_OK :
+ FRAME_ELT_FAIL;
+ }
+
+ /* Frame dequeue function use direct dequeue API */
+ static struct sample_frame *
+ dequeue_frame_with_direct_api(void)
+ {
+ struct sample_frame *ret_frame;
+ uint64_t flags, drv_data;
+ uint32_t n, n_fail, n_fail_first = 0;
+ int ret;
+
+ /* Dequeue first job, which should have frame data stored in opaque */
+ flags = RTE_CRYPTO_HW_DEQ_FLAG_START;
+ ret_frame = hw_ops.dequeue_one(hw_ops.qp, &drv_data, flags, &ret);
+ if (ret == 0) {
+ /* ret == 0, means it is still under processing */
+ return NULL;
+ } else if (ret == 1) {
+ /* ret_frame is successfully retrieved, the ret stores the
+ operation result */
+ ret_frame->elts[0].status = FRAME_ELT_OK;
+ } else {
+ ret_frame->elts[0].status = FRAME_ELT_FAIL;
+ n_fail_first = 1;
+ }
+
+ /* Query if n_elts has been processed, if not return NULL */
+ if (!hw_ops.query_processed(hw_ops.qp, frame->n_elts))
+ return NULL;
+
+ /* We are sure all elements have been processed, dequeue them all */
+ flag = 0;
+ ret = hw_ops.dequeue_many(hw_ops.qp, &drv_data, (void *)ret_frame,
+ write_frame_elt_status, ret_frame->n_elts - 1, flag, &n_fail);
+
+ if (n_fail + n_fail_first > 0)
+ ret_frame->status = FRAME_SOME_ELT_ERROR;
+ else
+ ret_frame->status = FRAME_OK;
+
+ return ret_frame;
+ }
+
Asymmetric Cryptography
-----------------------
diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
index 39064afbe..eb973693d 100644
--- a/doc/guides/rel_notes/release_20_08.rst
+++ b/doc/guides/rel_notes/release_20_08.rst
@@ -56,6 +56,14 @@ New Features
Also, make sure to start the actual text at the margin.
=========================================================
+ * **Add Cryptodev data-path APIs for no mbuf-centric data-path.**
+
+ Cryptodev is added a set of data-path APIs that are not based on
+ cryptodev operations. The APIs are designed for external applications
+ or libraries that want to use cryptodev but their data-path
+ implementations are not mbuf-centric. QAT Symmetric PMD is also updated
+ to add the support to this API.
+
Removed Items
-------------
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 3/4] test/crypto: add unit-test for cryptodev direct APIs
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
` (4 preceding siblings ...)
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
@ 2020-07-03 11:09 ` Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 4/4] doc: add cryptodev direct APIs guide Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
7 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 11:09 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch adds the QAT test to use cryptodev symmetric crypto
direct APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
app/test/test_cryptodev.c | 353 ++++++++++++++++++++++++--
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 ++--
3 files changed, 372 insertions(+), 37 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 8f631468b..9fbbe1d6c 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -55,6 +55,8 @@ static int gbl_driver_id;
static enum rte_security_session_action_type gbl_action_type =
RTE_SECURITY_ACTION_TYPE_NONE;
+int qat_api_test;
+
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
struct rte_mempool *large_mbuf_pool;
@@ -142,6 +144,154 @@ ceil_byte_length(uint32_t num_bits)
return (num_bits >> 3);
}
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits)
+{
+ int32_t n;
+ struct rte_crypto_hw_ops hw_ops;
+ struct rte_crypto_op *op_ret;
+ struct rte_crypto_sym_op *sop;
+ struct rte_crypto_sym_job job;
+ struct rte_crypto_sgl sgl;
+ struct rte_crypto_vec vec[UINT8_MAX] = { {0} };
+ int ret;
+ uint32_t min_ofs = 0, max_len = 0;
+ uint64_t drv_data;
+ uint64_t flags = RTE_CRYPTO_HW_ENQ_FLAG_START |
+ RTE_CRYPTO_HW_ENQ_FLAG_END |
+ RTE_CRYPTO_HW_ENQ_FLAG_SET_OPAQUE;
+ enum {
+ cipher = 0,
+ auth,
+ chain,
+ aead
+ } qat_api_test_type;
+ uint32_t count = 0;
+
+ memset(&job, 0, sizeof(job));
+
+ ret = rte_cryptodev_sym_get_hw_ops(dev_id, qp_id, &hw_ops);
+ if (ret) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ return;
+ }
+
+ sop = op->sym;
+
+ if (is_cipher && is_auth) {
+ qat_api_test_type = chain;
+ min_ofs = RTE_MIN(sop->cipher.data.offset,
+ sop->auth.data.offset);
+ max_len = RTE_MAX(sop->cipher.data.length,
+ sop->auth.data.length);
+ } else if (is_cipher) {
+ qat_api_test_type = cipher;
+ min_ofs = sop->cipher.data.offset;
+ max_len = sop->cipher.data.length;
+ } else if (is_auth) {
+ qat_api_test_type = auth;
+ min_ofs = sop->auth.data.offset;
+ max_len = sop->auth.data.length;
+ } else { /* aead */
+ qat_api_test_type = aead;
+ min_ofs = sop->aead.data.offset;
+ max_len = sop->aead.data.length;
+ }
+
+ if (len_in_bits) {
+ max_len = max_len >> 3;
+ min_ofs = min_ofs >> 3;
+ }
+
+ n = rte_crypto_mbuf_to_vec(sop->m_src, min_ofs, max_len,
+ vec, RTE_DIM(vec));
+ if (n < 0 || n != sop->m_src->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ if (n > 1) {
+ sgl.vec = vec;
+ sgl.num = n;
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ job.sgl = &sgl;
+ } else
+ job.data_iova = rte_pktmbuf_iova(sop->m_src);
+
+
+ switch (qat_api_test_type) {
+ case aead:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.aead.aead_ofs = min_ofs;
+ job.aead.aead_len = max_len;
+ job.aead.aad = sop->aead.aad.data;
+ job.aead.aad_iova = sop->aead.aad.phys_addr;
+ job.aead.tag_iova = sop->aead.digest.phys_addr;
+ ret = hw_ops.enqueue_aead(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case cipher:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.cipher_only.cipher_ofs = min_ofs;
+ job.cipher_only.cipher_len = max_len;
+ ret = hw_ops.enqueue_cipher(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case auth:
+ job.auth_only.auth_ofs = min_ofs;
+ job.auth_only.auth_len = max_len;
+ job.auth_only.digest_iova = sop->auth.digest.phys_addr;
+ ret = hw_ops.enqueue_auth(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case chain:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.chain.cipher_ofs = sop->cipher.data.offset;
+ job.chain.cipher_len = sop->cipher.data.length;
+ if (len_in_bits) {
+ job.chain.cipher_len = job.chain.cipher_len >> 3;
+ job.chain.cipher_ofs = job.chain.cipher_ofs >> 3;
+ }
+ job.chain.auth_ofs = sop->auth.data.offset;
+ job.chain.auth_len = sop->auth.data.length;
+ if (len_in_bits) {
+ job.chain.auth_len = job.chain.auth_len >> 3;
+ job.chain.auth_ofs = job.chain.auth_ofs >> 3;
+ }
+ job.chain.digest_iova = sop->auth.digest.phys_addr;
+ ret = hw_ops.enqueue_chain(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ }
+
+ if (ret < 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ ret = 0;
+
+ while (ret == 0 && count++ < 1024) {
+ ret = hw_ops.query_processed(hw_ops.qp, 1);
+ if (!ret)
+ rte_delay_ms(1);
+ }
+ if (ret < 0 || count >= 1024) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ flags = RTE_CRYPTO_HW_DEQ_FLAG_START;
+ op_ret = hw_ops.dequeue_one(hw_ops.qp, &drv_data, flags, &ret);
+ if (op_ret != op || ret != 1)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
static void
process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
{
@@ -2451,7 +2601,11 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -2530,7 +2684,11 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2600,6 +2758,9 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata)
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_crypt_auth_op(ts_params->valid_devs[0],
ut_params->op);
+ else if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
else
ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
@@ -2671,7 +2832,11 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2878,8 +3043,12 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
- ut_params->op);
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -2964,7 +3133,11 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3287,7 +3460,11 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3362,7 +3539,11 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3737,7 +3918,11 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -3905,7 +4090,11 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4000,7 +4189,11 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4136,7 +4329,11 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4325,7 +4522,11 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4507,7 +4708,11 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4697,7 +4902,11 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4838,7 +5047,11 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4925,7 +5138,11 @@ test_zuc_encryption(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5012,7 +5229,11 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5100,7 +5321,11 @@ test_zuc_authentication(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5232,7 +5457,11 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5418,7 +5647,11 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -7024,6 +7257,9 @@ test_authenticated_encryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -7993,6 +8229,9 @@ test_authenticated_decryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -11284,6 +11523,9 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
if (oop == IN_PLACE &&
gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (oop == IN_PLACE && qat_api_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -13241,6 +13483,75 @@ test_cryptodev_nitrox(void)
return unit_test_suite_runner(&cryptodev_nitrox_testsuite);
}
+static struct unit_test_suite cryptodev_sym_direct_api_testsuite = {
+ .suite_name = "Crypto Sym direct API Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_cipheronly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_qat_sym_direct_api(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ int ret;
+
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both "
+ "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM "
+ "are enabled in config file to run this testsuite.\n");
+ return TEST_SKIPPED;
+ }
+
+ qat_api_test = 1;
+ ret = unit_test_suite_runner(&cryptodev_sym_direct_api_testsuite);
+ qat_api_test = 0;
+
+ return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_qat_sym_api_autotest, test_qat_sym_direct_api);
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_mb_autotest,
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 41542e055..2854115aa 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -71,6 +71,8 @@
#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
#define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
+extern int qat_api_test;
+
/**
* Write (spread) data from buffer to mbuf data
*
@@ -209,4 +211,8 @@ create_segmented_mbuf(struct rte_mempool *mbuf_pool, int pkt_len,
return NULL;
}
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits);
+
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_blockcipher.c b/app/test/test_cryptodev_blockcipher.c
index 642b54971..dfa74a449 100644
--- a/app/test/test_cryptodev_blockcipher.c
+++ b/app/test/test_cryptodev_blockcipher.c
@@ -461,25 +461,43 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
/* Process crypto operation */
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Error sending packet for encryption");
- status = TEST_FAILED;
- goto error_exit;
- }
+ if (qat_api_test) {
+ uint8_t is_cipher = 0, is_auth = 0;
+
+ if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
+ RTE_LOG(DEBUG, USER1,
+ "QAT direct API does not support OOP, Test Skipped.\n");
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "SKIPPED");
+ status = TEST_SUCCESS;
+ goto error_exit;
+ }
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
+ is_cipher = 1;
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
+ is_auth = 1;
+
+ process_sym_hw_api_op(dev_id, 0, op, is_cipher, is_auth, 0);
+ } else {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for encryption");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
- op = NULL;
+ op = NULL;
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
- rte_pause();
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
+ rte_pause();
- if (!op) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Failed to process sym crypto op");
- status = TEST_FAILED;
- goto error_exit;
+ if (!op) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process sym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
}
debug_hexdump(stdout, "m_src(after):",
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v3 4/4] doc: add cryptodev direct APIs guide
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
` (5 preceding siblings ...)
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
@ 2020-07-03 11:09 ` Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
7 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 11:09 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch updates programmer's guide to demonstrate the usage
and limitations of cryptodev symmetric crypto data-path APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
doc/guides/prog_guide/cryptodev_lib.rst | 266 ++++++++++++++++++++++++
doc/guides/rel_notes/release_20_08.rst | 8 +
2 files changed, 274 insertions(+)
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index c14f750fa..9900a593a 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -861,6 +861,272 @@ using one of the crypto PMDs available in DPDK.
num_dequeued_ops);
} while (total_num_dequeued_ops < num_enqueued_ops);
+Cryptodev Direct Symmetric Crypto Data-path APIs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Direct symmetric crypto data-path APIs are a set of APIs that especially
+provided for Symmetric HW Crypto PMD that provides fast data-path
+enqueue/dequeue operations. The direct data-path APIs take advantage of
+existing Cryptodev APIs for device, queue pairs, and session management. In
+addition the user are required to get the queue pair pointer data and function
+pointers. The APIs are provided as an advanced feature as an alternative
+to ``rte_cryptodev_enqueue_burst`` and ``rte_cryptodev_dequeue_burst``. The
+APIs are designed for the user to develop close-to-native performance symmetric
+crypto data-path implementation for their applications that do not necessarily
+depend on cryptodev operations and cryptodev operation mempools, or mbufs.
+
+Cryptodev PMDs who supports this feature will have
+``RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API`` feature flag presented. The user uses
+``rte_cryptodev_sym_get_hw_ops`` function call to get all the function pointers
+for different enqueue and dequeue operations, plus the device specific
+queue pair data. After the ``rte_crypto_hw_ops`` structure is properly set by
+the driver, the user can use the function pointers and the queue data pointers
+in the structure to enqueue and dequeue crypto jobs.
+
+To simply the enqueue APIs a symmetric job structure is defined:
+
+.. code-block:: c
+
+ /**
+ * Asynchronous operation job descriptor.
+ * Used by HW crypto devices direct API call that supports such activity
+ **/
+ struct rte_crypto_sym_job {
+ union {
+ /**
+ * When RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL bit is set in flags, sgl
+ * field is used as input data. Otherwise data_iova is
+ * used.
+ **/
+ rte_iova_t data_iova;
+ struct rte_crypto_sgl *sgl;
+ };
+ union {
+ /**
+ * Different than cryptodev ops, all ofs and len fields have
+ * the unit of bytes (including Snow3G/Kasumi/Zuc.
+ **/
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ } cipher_only;
+ struct {
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } auth_only;
+ struct {
+ uint32_t aead_ofs;
+ uint32_t aead_len;
+ rte_iova_t tag_iova;
+ uint8_t *aad;
+ rte_iova_t aad_iova;
+ } aead;
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } chain;
+ };
+ uint8_t *iv;
+ rte_iova_t iv_iova;
+ };
+
+Different than Cryptodev operation, the ``rte_crypto_sym_job`` structure
+focuses only on the data field required for crypto PMD to execute a single job,
+and is not supposed stored as opaque data. The user can freely allocate the
+structure buffer from stack and reuse it to fill all jobs.
+
+To use the direct symmetric crypto APIs safely, the user has to carefully
+set the correct fields in rte_crypto_sym_job structure, otherwise the
+application or the system may crash. Also there are a few limitations to the
+direct symmetric crypto APIs:
+
+* Only support in-place operations.
+* APIs are NOT thread-safe.
+* CANNOT mix the direct API's enqueue with rte_cryptodev_enqueue_burst, or
+ vice versa.
+
+The following sample code shows how to use Cryptodev direct API to process a
+user defined frame with maximum 32 buffers with AES-CBC and HMAC-SHA chained
+algorithm of a frame defined by user.
+
+See *DPDK API Reference* for details on each API definitions.
+
+.. code-block:: c
+
+ #include <rte_cryptodev.h>
+
+ #define FRAME_ELT_OK 0
+ #define FRAME_ELT_FAIL 1
+ #define FRAME_OK 0
+ #define FRAME_SOME_ELT_ERROR 1
+ #define FRAME_SIZE 32
+
+ /* Sample frame element struct */
+ struct sample_frame_elt {
+ /* The status field of frame element */
+ uint8_t status;
+ /* Pre-created and initialized cryptodev session */
+ struct rte_cryptodev_sym_session *session;
+ union {
+ __rte_iova_t data;
+ struct rte_crypto_sgl sgl;
+ };
+ uint32_t data_len;
+ __rte_iova_t digest;
+ uint8_t *iv;
+ uint8_t is_sgl;
+ };
+
+ /* Sample frame struct to describe up to 32 crypto jobs */
+ struct sample_frame {
+ struct sample_frame_elt elts[FRAME_SIZE]; /**< All frame elements */
+ uint32_t n_elts; /**< Number of elements */
+ };
+
+ /* Global Cryptodev Direct API structure */
+ static struct rte_crypto_hw_ops hw_ops;
+
+ /* Initialization */
+ static int
+ frame_operation_init(
+ uint8_t cryptodev_id, /**< Initialized cryptodev ID */
+ uint16_t qp_id /**< Initialized queue pair ID */)
+ {
+ /* Get APIs */
+ ret = rte_cryptodev_sym_get_hw_ops(cryptodev_id, qp_id, &hw_ops);
+ /* If the device does not support this feature or queue pair is not
+ initialized, return -1 */
+ if (!ret)
+ return -1;
+ return 0;
+ }
+
+ /* Frame enqueue function use direct AES-CBC-* + HMAC-SHA* API */
+ static int
+ enqueue_frame_to_direct_api(
+ struct sample_frame *frame /**< Initialized user frame struct */)
+ {
+ struct rte_crypto_hw_ops hw_ops;
+ struct rte_crypto_sym_job job;
+ uint64_t drv_data, flags = 0;
+ uint32_t i;
+ int ret;
+
+ /* Fill all sample frame element data into HW queue pair */
+ for (i = 0; i < frame->n_elts; i++) {
+ struct sample_frame_elt *fe = &frame->elts[i];
+ int ret;
+
+ /* if it is the first element in the frame, set FIRST flag to
+ let the driver to know it is first frame and fill drv_data. */
+ if (i == 0)
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_START;
+ else
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_START;
+
+ /* if it is the last element in the frame, write LAST flag to
+ kick HW queue */
+ if (i == frame->n_elts - 1)
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_LAST;
+ else
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_LAST;
+
+ /* Fill the job data with frame element data */
+ if (fe->is_sgl != 0) {
+ /* The buffer is a SGL buffer */
+ job.sgl = &frame->sgl;
+ /* Set SGL flag */
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ } else {
+ job.data_iova = fe->data;
+ /* Unset SGL flag in the job */
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ }
+
+ job.chain.cipher_ofs = job.chain.auth_ofs = 0;
+ job.chain.cipher_len = job.chain.auth_len = fe->data_len;
+ job.chain.digest_iova = fe->digest;
+
+ job.iv = fe->iv;
+
+ /* Call direct data-path enqueue chaining op API */
+ ret = hw_ops.enqueue_chain(hw_ops.qp, fe->session, &job,
+ (void *frame), &drv_data, flags);
+ /**
+ * In case one element is failed to be enqueued, simply abandon
+ * enqueuing the whole frame.
+ **/
+ if (!ret)
+ return -1;
+
+ /**
+ * To this point the frame is enqueued. The job buffer can be
+ * safely reused for enqueuing next frame element.
+ **/
+ }
+
+ return 0;
+ }
+
+ /**
+ * Sample function to write frame element status field based on
+ * driver returned operation result. The function return and parameter
+ * should follow the prototype rte_crpyto_hw_user_post_deq_cb_fn() in
+ * rte_cryptodev.h
+ **/
+ static __rte_always_inline void
+ write_frame_elt_status(void *data, uint32_t index, uint8_t is_op_success)
+ {
+ struct sample_frame *frame = data;
+ frame->elts[index + 1].status = is_op_success ? FRAME_ELT_OK :
+ FRAME_ELT_FAIL;
+ }
+
+ /* Frame dequeue function use direct dequeue API */
+ static struct sample_frame *
+ dequeue_frame_with_direct_api(void)
+ {
+ struct sample_frame *ret_frame;
+ uint64_t flags, drv_data;
+ uint32_t n, n_fail, n_fail_first = 0;
+ int ret;
+
+ /* Dequeue first job, which should have frame data stored in opaque */
+ flags = RTE_CRYPTO_HW_DEQ_FLAG_START;
+ ret_frame = hw_ops.dequeue_one(hw_ops.qp, &drv_data, flags, &ret);
+ if (ret == 0) {
+ /* ret == 0, means it is still under processing */
+ return NULL;
+ } else if (ret == 1) {
+ /* ret_frame is successfully retrieved, the ret stores the
+ operation result */
+ ret_frame->elts[0].status = FRAME_ELT_OK;
+ } else {
+ ret_frame->elts[0].status = FRAME_ELT_FAIL;
+ n_fail_first = 1;
+ }
+
+ /* Query if n_elts has been processed, if not return NULL */
+ if (!hw_ops.query_processed(hw_ops.qp, frame->n_elts))
+ return NULL;
+
+ /* We are sure all elements have been processed, dequeue them all */
+ flag = 0;
+ ret = hw_ops.dequeue_many(hw_ops.qp, &drv_data, (void *)ret_frame,
+ write_frame_elt_status, ret_frame->n_elts - 1, flag, &n_fail);
+
+ if (n_fail + n_fail_first > 0)
+ ret_frame->status = FRAME_SOME_ELT_ERROR;
+ else
+ ret_frame->status = FRAME_OK;
+
+ return ret_frame;
+ }
+
Asymmetric Cryptography
-----------------------
diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
index 39064afbe..eb973693d 100644
--- a/doc/guides/rel_notes/release_20_08.rst
+++ b/doc/guides/rel_notes/release_20_08.rst
@@ -56,6 +56,14 @@ New Features
Also, make sure to start the actual text at the margin.
=========================================================
+ * **Add Cryptodev data-path APIs for no mbuf-centric data-path.**
+
+ Cryptodev is added a set of data-path APIs that are not based on
+ cryptodev operations. The APIs are designed for external applications
+ or libraries that want to use cryptodev but their data-path
+ implementations are not mbuf-centric. QAT Symmetric PMD is also updated
+ to add the support to this API.
+
Removed Items
-------------
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v4 0/4] cryptodev: add symmetric crypto data-path APIs
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
` (6 preceding siblings ...)
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 4/4] doc: add cryptodev direct APIs guide Fan Zhang
@ 2020-07-03 12:49 ` Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 1/4] " Fan Zhang
` (4 more replies)
7 siblings, 5 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 12:49 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang, Piotr Bronowski
This patch adds symmetric crypto data-path APIs for Cryptodev. Direct
symmetric crypto data-path APIs are a set of APIs that provide
more HW friendly enqueue/dequeue data-path functions as an alternative
approach to ``rte_cryptodev_enqueue_burst`` and
``rte_cryptodev_dequeue_burst``. The APIs are designed for external
libraries/applications that want to use Cryptodev as symmetric crypto
data-path accelerator but not necessarily mbuf data-path centric. With
the APIs the cycle cost spent on conversion from their data structure to
DPDK cryptodev operations/mbufs can be reduced, and the dependency on DPDK
crypto operation mempool can be relieved.
It is expected that the user can develop close-to-native performance
symmetric crypto data-path implementations with the functions provided
in this patchset.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
v4:
- Added missed patch.
v3:
- Instead of QAT only API, moved the API to cryptodev.
- Added cryptodev feature flags.
v2:
- Used a structure to simplify parameters.
- Added unit tests.
- Added documentation.
Fan Zhang (4):
cryptodev: add symmetric crypto data-path APIs
crypto/qat: add support to direct data-path APIs
test/crypto: add unit-test for cryptodev direct APIs
doc: add cryptodev direct APIs guide
app/test/test_cryptodev.c | 353 +++++++++-
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 +-
doc/guides/prog_guide/cryptodev_lib.rst | 266 +++++++
doc/guides/rel_notes/release_20_08.rst | 8 +
drivers/common/qat/Makefile | 2 +
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_job.c | 661 ++++++++++++++++++
drivers/crypto/qat/qat_sym_job.h | 12 +
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
lib/librte_cryptodev/rte_crypto_sym.h | 48 ++
lib/librte_cryptodev/rte_cryptodev.c | 22 +
lib/librte_cryptodev/rte_cryptodev.h | 173 ++++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 12 +-
.../rte_cryptodev_version.map | 4 +
18 files changed, 1587 insertions(+), 46 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_job.c
create mode 100644 drivers/crypto/qat/qat_sym_job.h
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v4 1/4] cryptodev: add symmetric crypto data-path APIs
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
@ 2020-07-03 12:49 ` Fan Zhang
2020-07-04 18:16 ` Akhil Goyal
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 2/4] crypto/qat: add support to direct " Fan Zhang
` (3 subsequent siblings)
4 siblings, 1 reply; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 12:49 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang, Piotr Bronowski
This patch adds data-path APIs to cryptodev. The APIs are organized as
a data structure containing function pointers for different enqueue and
dequeue operations. An added public API is added to obtain the function
pointers and necessary queue pair data from the device queue pair.
This patch depends on patch-72157 ("cryptodev: add function to check
if qp was setup")
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
---
lib/librte_cryptodev/rte_crypto_sym.h | 48 +++++
lib/librte_cryptodev/rte_cryptodev.c | 22 +++
lib/librte_cryptodev/rte_cryptodev.h | 173 +++++++++++++++++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 12 +-
.../rte_cryptodev_version.map | 4 +
5 files changed, 255 insertions(+), 4 deletions(-)
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index da961a19d..e237e3cfa 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -87,6 +87,54 @@ union rte_crypto_sym_ofs {
} ofs;
};
+
+/**
+ * Asynchronous operation job descriptor.
+ * Used by HW crypto devices direct API call that supports such activity
+ **/
+struct rte_crypto_sym_job {
+ union {
+ /**
+ * When RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL bit is set in flags, sgl
+ * field is used as input data. Otherwise data_iova is
+ * used.
+ **/
+ rte_iova_t data_iova;
+ struct rte_crypto_sgl *sgl;
+ };
+ union {
+ /**
+ * Different than cryptodev ops, all ofs and len fields have
+ * the unit of bytes (including Snow3G/Kasumi/Zuc.
+ **/
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ } cipher_only;
+ struct {
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } auth_only;
+ struct {
+ uint32_t aead_ofs;
+ uint32_t aead_len;
+ rte_iova_t tag_iova;
+ uint8_t *aad;
+ rte_iova_t aad_iova;
+ } aead;
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } chain;
+ };
+ uint8_t *iv;
+ rte_iova_t iv_iova;
+};
+
/** Symmetric Cipher Algorithms */
enum rte_crypto_cipher_algorithm {
RTE_CRYPTO_CIPHER_NULL = 1,
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 705387b8b..5d5f84e27 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1866,6 +1866,28 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
}
+int
+rte_cryptodev_sym_get_hw_ops(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_hw_ops *hw_ops)
+{
+ struct rte_cryptodev *dev;
+
+ if (!hw_ops)
+ return -EINVAL;
+
+ memset(hw_ops, 0, sizeof(*hw_ops));
+
+ if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
+ return -EINVAL;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API) ||
+ *dev->dev_ops->sym_get_hw_ops == NULL)
+ return -ENOTSUP;
+
+ return dev->dev_ops->sym_get_hw_ops(dev, qp_id, hw_ops);
+}
+
/** Initialise rte_crypto_op mempool element */
static void
rte_crypto_op_init(struct rte_mempool *mempool,
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index d01a65825..7cd2095d7 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -466,7 +466,8 @@ rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
/**< Support symmetric session-less operations */
#define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23)
/**< Support operations on data which is not byte aligned */
-
+#define RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API (1ULL << 24)
+/**< Support hardware accelerator specific raw data as input */
/**
* Get the name of a crypto device feature flag
@@ -737,7 +738,7 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
* - 1: qp was configured
* - -ENODEV: device was not configured
*/
-int
+__rte_experimental int
rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
/**
@@ -1348,6 +1349,174 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
struct rte_crypto_sym_vec *vec);
+
+/* HW direct symmetric crypto data-path APIs */
+
+/* Bit-masks used for enqueuing job */
+#define RTE_CRYPTO_HW_ENQ_FLAG_START (1ULL << 0)
+/**< Bit-mask to indicate the first job in a burst. With this bit set the
+ * driver may write but not read the drv_data buffer, otherwise the driver
+ * shall read and update the drv_data.
+ */
+#define RTE_CRYPTO_HW_ENQ_FLAG_SET_OPAQUE (1ULL << 1)
+/**< Bit-mask to indicate write opaque pointer into HW crypto descriptor. */
+#define RTE_CRYPTO_HW_ENQ_FLAG_END (1ULL << 2)
+/**< Bit-mask to indicate the last job in a burst. With this bit set the
+ * driver may read but not write the drv_data buffer, and kick the HW to
+ * start processing all jobs written.
+ */
+#define RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL (1ULL << 3)
+/**< Bit-mask to indicate the input job is an SGL buffer */
+
+/* Bit-masks used for dequeuing job */
+#define RTE_CRYPTO_HW_DEQ_FLAG_START (1ULL << 0)
+/**< Bit-mask to indicate the first job to be dequeued. With this bit set the
+ * driver may write but not read the drv_data buffer, otherwise the driver
+ * shall read and update the drv_data.
+ */
+#define RTE_CRYPTO_HW_DEQ_FLAG_EXHAUST (1ULL << 1)
+/**< Bit-mask to indicate dequeuing as many as n jobs in dequeue-many function.
+ * Without this bit once the driver found out the ready-to-dequeue jobs are
+ * not as many as n, it shall stop immediate, leave all processed jobs in the
+ * queue, and return the ready jobs in negative. With this bit set the
+ * function shall continue dequeue all done jobs and return the dequeued
+ * job count in positive.
+ */
+
+/**
+ * Typedef for HW direct data-path enqueue callback function.
+ *
+ * @param qp Queue pair data.
+ * @param sess Cryptodev session.
+ * @param job Job data.
+ * @param opaque Opaque data to be written to queue descriptor
+ * when RTE_CRYPTO_HW_ENQ_SET_OPAQUE is
+ * set.
+ * @param drv_data User created temporary driver data for the
+ * driver to store and update data used between
+ * adjacent enqueues operations.
+ * @param flags Bitmask of RTE_CRYPTO_HW_ENQ_* flags
+ * @return
+ * - On success return 0
+ * - On fail return -1
+ **/
+typedef int (*rte_crypto_hw_enq_cb_fn)(void *qp,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags);
+
+/**
+ * Typedef for HW direct data-path dequeue one job callback function.
+ *
+ * @param qp Queue pair data.
+ * @param drv_data User created temporary driver data for the
+ * driver to store and update data used between
+ * adjacent enqueues operations.
+ * @param flags Bitmask of RTE_CRYPTO_HW_DEQ_* flags
+ * @param status The buffer for the driver to write operation
+ * status.
+ * @return
+ * - On success return the opaque data user write in enqueue (if any) and
+ * - status written as 1 when operation is successful.
+ * - status written as -1 when operation is failed (e.g. bad MAC)
+ * - On fail return NULL and status written as 0 when operation is still
+ * under processing.
+ **/
+typedef void * (*rte_crypto_hw_deq_one_cb_fn)(void *qp, uint64_t *drv_data,
+ uint64_t flags, int *status);
+
+/**
+ * Typedef that the user provided to deal with jobs' status when
+ * dequeue in a bulk.
+ *
+ * @param data User provided data.
+ * @param index Index number of the processed job.
+ * @param is_op_success Driver filled operation status.
+ **/
+typedef void (*rte_crpyto_hw_user_post_deq_cb_fn)(void *data, uint32_t index,
+ uint8_t is_op_success);
+
+/**
+ * Typedef for HW direct data-path dequeue bulk jobs callback function.
+ *
+ * @param qp Queue pair data.
+ * @param drv_data User created temporary driver data for the
+ * driver to store and update data used between
+ * adjacent enqueues operations.
+ * @param user_data User provided data to be passed into cb
+ * function.
+ * @param cb User provided callback functions to deal with
+ * driver returned job status.
+ * @param n The number of expected jobs to be dequeued.
+ * @param flags Bitmask of RTE_CRYPTO_HW_DEQ_* flags
+ * @param n_fail The buffer for driver to write the number of
+ * failed jobs.
+ * @return
+ * - Return the number of dequeued jobs.
+ **/
+typedef uint32_t (*rte_crypto_hw_deq_many_cb_fn)(void *qp, uint64_t *drv_data,
+ void *user_data, rte_crpyto_hw_user_post_deq_cb_fn cb,
+ uint32_t n, uint64_t flags, uint32_t *n_fail);
+/**
+ * Typedef for querying HW the number of processed jobs.
+ *
+ * @param qp Queue pair data.
+ * @param nb_jobs The expected processed job number.
+ * @return
+ * - If the nb_jobs ready, return 1.
+ * - Otherwise return 0.
+ **/
+typedef int (*rte_crypto_hw_query_processed)(void *qp, uint32_t nb_jobs);
+
+/* Struct for user to perform HW specific enqueue/dequeue function calls */
+struct rte_crypto_hw_ops {
+ /* Driver written queue pair data pointer, should NOT be alterred by
+ * the user.
+ */
+ void *qp;
+ /* Function handler to enqueue AEAD job */
+ rte_crypto_hw_enq_cb_fn enqueue_aead;
+ /* Function handler to enqueue cipher only job */
+ rte_crypto_hw_enq_cb_fn enqueue_cipher;
+ /* Function handler to enqueue auth only job */
+ rte_crypto_hw_enq_cb_fn enqueue_auth;
+ /* Function handler to enqueue cipher + hash chaining job */
+ rte_crypto_hw_enq_cb_fn enqueue_chain;
+ /* Function handler to query processed jobs */
+ rte_crypto_hw_query_processed query_processed;
+ /* Function handler to dequeue one job and return opaque data stored */
+ rte_crypto_hw_deq_one_cb_fn dequeue_one;
+ /* Function handler to dequeue many jobs */
+ rte_crypto_hw_deq_many_cb_fn dequeue_many;
+ /* Reserved */
+ void *reserved[8];
+};
+
+/**
+ * Get the symmetric crypto hardware ops function pointers and queue pair data.
+ *
+ * @param dev_id The device identifier.
+ * @param qp_id The index of the queue pair from which to retrieve
+ * processed packets. The value must be in the range
+ * [0, nb_queue_pair - 1] previously supplied to
+ * rte_cryptodev_configure().
+ * @param hw_ops User provided rte_crypto_hw_ops buffer.
+ *
+ * @return
+ * - On success hw_ops will be written the HW crypto device's queue pair data
+ * and function pointers for data enqueue/dequeue.
+ * - On fail hw_ops is cleared and negative integer is returned.
+ */
+__rte_experimental
+int
+rte_cryptodev_sym_get_hw_ops(
+ uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_hw_ops *hw_ops);
+int
+rte_cryptodev_sym_get_hw_ops(
+ uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_hw_ops *hw_ops);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 81975d72b..28f75d1da 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -316,6 +316,10 @@ typedef uint32_t (*cryptodev_sym_cpu_crypto_process_t)
(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess,
union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec);
+struct rte_crypto_hw_ops;
+
+typedef int (*cryptodev_sym_hw_get_ops_t)(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops);
/** Crypto device operations function pointer table */
struct rte_cryptodev_ops {
@@ -348,8 +352,12 @@ struct rte_cryptodev_ops {
/**< Clear a Crypto sessions private data. */
cryptodev_asym_free_session_t asym_session_clear;
/**< Clear a Crypto sessions private data. */
- cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
- /**< process input data synchronously (cpu-crypto). */
+ union {
+ cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
+ /**< process input data synchronously (cpu-crypto). */
+ cryptodev_sym_hw_get_ops_t sym_get_hw_ops;
+ /**< Get HW crypto data-path call back functions and data */
+ };
};
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index 07a2d2f02..56f5684c8 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -85,6 +85,7 @@ EXPERIMENTAL {
rte_cryptodev_sym_session_set_user_data;
rte_crypto_asym_op_strings;
rte_crypto_asym_xform_strings;
+ rte_cryptodev_get_qp_status;
# added in 20.05
__rte_cryptodev_trace_configure;
@@ -103,4 +104,7 @@ EXPERIMENTAL {
__rte_cryptodev_trace_asym_session_clear;
__rte_cryptodev_trace_dequeue_burst;
__rte_cryptodev_trace_enqueue_burst;
+
+ # added in 20.08
+ rte_cryptodev_sym_get_hw_ops;
};
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v4 2/4] crypto/qat: add support to direct data-path APIs
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 1/4] " Fan Zhang
@ 2020-07-03 12:49 ` Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
` (2 subsequent siblings)
4 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 12:49 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang
This patch add symmetric crypto data-path APIs support to QAT-SYM PMD.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
drivers/common/qat/Makefile | 2 +
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_job.c | 661 +++++++++++++++++++++++++++++++
drivers/crypto/qat/qat_sym_job.h | 12 +
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
8 files changed, 686 insertions(+), 5 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_job.c
create mode 100644 drivers/crypto/qat/qat_sym_job.h
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 28bd5668f..6655fd2bc 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -39,6 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
SRCS-y += qat_sym.c
SRCS-y += qat_sym_session.c
SRCS-y += qat_sym_pmd.c
+ SRCS-y += qat_sym_job.c
+
build_qat = yes
endif
endif
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 8e6dd04eb..06e2d8c8a 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
q->csr_tail = q->tail;
}
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
{
uint32_t old_head, new_head;
uint32_t max_head;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..8add1b049 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
void *op_cookie __rte_unused,
uint64_t *dequeue_err_count __rte_unused);
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
+
#endif /* _QAT_QP_H_ */
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index fc65923a7..8a3921293 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -13,6 +13,7 @@ if dep.found()
qat_sources += files('qat_sym_pmd.c',
'qat_sym.c',
'qat_sym_session.c',
+ 'qat_sym_job.c',
'qat_asym_pmd.c',
'qat_asym.c')
qat_ext_deps += dep
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 25b6dd5f4..609180d3f 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -336,7 +336,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
set_cipher_iv(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, qat_req);
-
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
diff --git a/drivers/crypto/qat/qat_sym_job.c b/drivers/crypto/qat/qat_sym_job.c
new file mode 100644
index 000000000..7c0913459
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.c
@@ -0,0 +1,661 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym.h"
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_qp.h"
+#include "qat_sym_job.h"
+
+static __rte_always_inline int
+qat_sym_frame_fill_sgl(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_sgl *sgl, uint32_t max_len)
+{
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sgl *list;
+ int64_t len = max_len;
+ uint32_t i;
+
+ if (!sgl)
+ return -EINVAL;
+ if (sgl->num < 2 || sgl->num > QAT_SYM_SGL_MAX_NUMBER || !sgl->vec)
+ return -EINVAL;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
+ list = (struct qat_sgl *)&cookie->qat_sgl_src;
+
+ for (i = 0; i < sgl->num && len > 0; i++) {
+ list->buffers[i].len = RTE_MIN(sgl->vec[i].len, len);
+ list->buffers[i].resrvd = 0;
+ list->buffers[i].addr = sgl->vec[i].iova;
+ len -= list->buffers[i].len;
+ }
+
+ if (unlikely(len > 0))
+ return -1;
+
+ list->num_bufs = i;
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ req->comn_mid.src_length = req->comn_mid.dst_length = 0;
+ return 0;
+}
+
+static __rte_always_inline void
+qat_sym_set_cipher_param(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ uint32_t cipher_ofs, uint32_t cipher_len)
+{
+ cipher_param->cipher_offset = cipher_ofs;
+ cipher_param->cipher_length = cipher_len;
+}
+
+static __rte_always_inline void
+qat_sym_set_auth_param(struct icp_qat_fw_la_auth_req_params *auth_param,
+ uint32_t auth_ofs, uint32_t auth_len,
+ rte_iova_t digest_iova, rte_iova_t aad_iova)
+{
+ auth_param->auth_off = auth_ofs;
+ auth_param->auth_len = auth_len;
+ auth_param->auth_res_addr = digest_iova;
+ auth_param->u1.aad_adr = aad_iova;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_aead(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len;
+ uint8_t aad_len_field_sz;
+ uint32_t msg_len_be;
+ rte_iova_t aad_iova;
+ uint8_t q;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->aead.aead_ofs + job->aead.aead_len;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ aad_iova = job->aead.aad_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ aad_data = job->aead.aad;
+ aad_iova = job->aead.aad_iova;
+ aad_ccm_real_len = 0;
+ aad_len_field_sz = 0;
+ msg_len_be = rte_bswap32(job->aead.aead_len);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ aad_data = job->iv;
+ aad_iova = job->iv_iova;
+ }
+
+ q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+ rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx], 0, pad_len);
+ }
+
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+ + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(job->aead.aad +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ qat_sym_set_cipher_param(cipher_param, job->aead.aead_ofs,
+ job->aead.aead_len);
+ qat_sym_set_auth_param(auth_param, job->aead.aead_ofs,
+ job->aead.aead_len, job->aead.tag_iova, aad_iova);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->aead.aead_ofs + job->aead.aead_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = aad_iova;
+ cipher_param->spc_auth_res_addr = job->aead.tag_iova;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+
+static __rte_always_inline int
+qat_sym_job_enqueue_cipher(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->cipher_only.cipher_ofs,
+ job->cipher_only.cipher_len);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_auth(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ auth_param = (void *)((uint8_t *)&req->serv_specif_rqpars +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->auth_only.auth_ofs + job->auth_only.auth_len;
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->auth_only.auth_ofs,
+ job->auth_only.auth_len, job->auth_only.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->auth_only.auth_ofs +
+ job->auth_only.auth_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_chain(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t min_ofs = RTE_MIN(job->chain.cipher_ofs, job->chain.auth_ofs);
+ uint32_t max_len = RTE_MAX(job->chain.cipher_len, job->chain.auth_len);
+ rte_iova_t auth_iova_end;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr =
+ req->comn_mid.dest_data_addr = job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length = min_ofs + max_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->chain.cipher_ofs,
+ job->chain.cipher_len);
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->chain.auth_ofs,
+ job->chain.auth_len, job->chain.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ uint32_t len = job->chain.auth_ofs +
+ job->chain.auth_len;
+ struct rte_crypto_vec *vec = job->sgl->vec;
+ int auth_end_get = 0;
+ while (len) {
+ if (len <= vec->len) {
+ auth_iova_end = vec->iova + len;
+ auth_end_get = 1;
+ break;
+ }
+ len -= vec->len;
+ vec++;
+ }
+ if (!auth_end_get) {
+ QAT_DP_LOG(ERR, "Failed to get auth end");
+ return -1;
+ }
+ } else
+ auth_iova_end = job->data_iova + job->chain.auth_ofs +
+ job->chain.auth_len;
+
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset +
+ cipher_param->cipher_length) &&
+ (job->chain.digest_iova == auth_iova_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset +
+ cipher_param->cipher_length <
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length)
+ req->comn_mid.dst_length =
+ req->comn_mid.src_length =
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header =
+ &req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ min_ofs + max_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+#define get_rx_queue_message_at_index(q, h, i) \
+ (void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
+ q->modulo_mask))
+
+static __rte_always_inline int
+qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
+{
+ return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status);
+}
+
+static __rte_always_inline int
+qat_sym_query_processed_jobs(void *qat_sym_qp, uint32_t nb_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head = (rx_queue->head + (nb_jobs - 1) * rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG)
+ return 0;
+
+ return 1;
+}
+
+static __rte_always_inline void *
+qat_sym_job_dequeue_one(void *qat_sym_qp, uint64_t *drv_data, uint64_t flags,
+ int *is_op_success)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head;
+ void *opaque;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ *is_op_success = 0;
+ return NULL;
+ }
+
+ if (unlikely(qat_is_rx_msg_ok(resp) == 0))
+ *is_op_success = -1;
+ else
+ *is_op_success = 1;
+
+ opaque = (void *)(uintptr_t)resp->opaque_data;
+
+ rx_queue->head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ rx_queue->nb_processed_responses++;
+ qp->dequeued++;
+ qp->stats.dequeued_count++;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+
+ return opaque;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_job_dequeue_n(void *qat_sym_qp, uint64_t *drv_data,
+ void *user_data, rte_crpyto_hw_user_post_deq_cb_fn cb,
+ uint32_t n, uint64_t flags, uint32_t *n_failed_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head, i;
+ uint32_t status, total_fail = 0;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ for (i = 0; i < n; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_EXHAUST)
+ break;
+ return -i;
+ }
+
+ status = qat_is_rx_msg_ok(resp);
+ total_fail += status;
+ cb(user_data, i, status);
+
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ }
+
+ rx_queue->head = head;
+ rx_queue->nb_processed_responses += i;
+ qp->dequeued += i;
+ qp->stats.dequeued_count += i;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+ *n_failed_jobs = total_fail;
+
+ return i;
+}
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp->service_type != QAT_SERVICE_SYMMETRIC)
+ return -EINVAL;
+
+ hw_ops->qp = (void *)qp;
+ hw_ops->enqueue_aead = qat_sym_job_enqueue_aead;
+ hw_ops->enqueue_cipher = qat_sym_job_enqueue_cipher;
+ hw_ops->enqueue_auth = qat_sym_job_enqueue_auth;
+ hw_ops->enqueue_chain = qat_sym_job_enqueue_chain;
+ hw_ops->dequeue_one = qat_sym_job_dequeue_one;
+ hw_ops->dequeue_many = qat_sym_job_dequeue_n;
+ hw_ops->query_processed = qat_sym_query_processed_jobs;
+
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_sym_job.h b/drivers/crypto/qat/qat_sym_job.h
new file mode 100644
index 000000000..b11aeb841
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_FRAME_H_
+#define _QAT_SYM_FRAME_H_
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops);
+
+#endif /* _QAT_SYM_FRAME_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index e887c880f..be9d73c0a 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -13,6 +13,7 @@
#include "qat_sym.h"
#include "qat_sym_session.h"
#include "qat_sym_pmd.h"
+#include "qat_sym_job.h"
#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
@@ -234,7 +235,8 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
/* Crypto related operations */
.sym_session_get_size = qat_sym_session_get_private_size,
.sym_session_configure = qat_sym_session_configure,
- .sym_session_clear = qat_sym_session_clear
+ .sym_session_clear = qat_sym_session_clear,
+ .sym_get_hw_ops = qat_sym_get_ops,
};
static uint16_t
@@ -308,7 +310,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
- RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
+ RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
+ RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API;
internals = cryptodev->data->dev_private;
internals->qat_dev = qat_pci_dev;
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v4 3/4] test/crypto: add unit-test for cryptodev direct APIs
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 1/4] " Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 2/4] crypto/qat: add support to direct " Fan Zhang
@ 2020-07-03 12:49 ` Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 4/4] doc: add cryptodev direct APIs guide Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
4 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 12:49 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang
This patch adds the QAT test to use cryptodev symmetric crypto
direct APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
app/test/test_cryptodev.c | 353 ++++++++++++++++++++++++--
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 ++--
3 files changed, 372 insertions(+), 37 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 8f631468b..9fbbe1d6c 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -55,6 +55,8 @@ static int gbl_driver_id;
static enum rte_security_session_action_type gbl_action_type =
RTE_SECURITY_ACTION_TYPE_NONE;
+int qat_api_test;
+
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
struct rte_mempool *large_mbuf_pool;
@@ -142,6 +144,154 @@ ceil_byte_length(uint32_t num_bits)
return (num_bits >> 3);
}
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits)
+{
+ int32_t n;
+ struct rte_crypto_hw_ops hw_ops;
+ struct rte_crypto_op *op_ret;
+ struct rte_crypto_sym_op *sop;
+ struct rte_crypto_sym_job job;
+ struct rte_crypto_sgl sgl;
+ struct rte_crypto_vec vec[UINT8_MAX] = { {0} };
+ int ret;
+ uint32_t min_ofs = 0, max_len = 0;
+ uint64_t drv_data;
+ uint64_t flags = RTE_CRYPTO_HW_ENQ_FLAG_START |
+ RTE_CRYPTO_HW_ENQ_FLAG_END |
+ RTE_CRYPTO_HW_ENQ_FLAG_SET_OPAQUE;
+ enum {
+ cipher = 0,
+ auth,
+ chain,
+ aead
+ } qat_api_test_type;
+ uint32_t count = 0;
+
+ memset(&job, 0, sizeof(job));
+
+ ret = rte_cryptodev_sym_get_hw_ops(dev_id, qp_id, &hw_ops);
+ if (ret) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ return;
+ }
+
+ sop = op->sym;
+
+ if (is_cipher && is_auth) {
+ qat_api_test_type = chain;
+ min_ofs = RTE_MIN(sop->cipher.data.offset,
+ sop->auth.data.offset);
+ max_len = RTE_MAX(sop->cipher.data.length,
+ sop->auth.data.length);
+ } else if (is_cipher) {
+ qat_api_test_type = cipher;
+ min_ofs = sop->cipher.data.offset;
+ max_len = sop->cipher.data.length;
+ } else if (is_auth) {
+ qat_api_test_type = auth;
+ min_ofs = sop->auth.data.offset;
+ max_len = sop->auth.data.length;
+ } else { /* aead */
+ qat_api_test_type = aead;
+ min_ofs = sop->aead.data.offset;
+ max_len = sop->aead.data.length;
+ }
+
+ if (len_in_bits) {
+ max_len = max_len >> 3;
+ min_ofs = min_ofs >> 3;
+ }
+
+ n = rte_crypto_mbuf_to_vec(sop->m_src, min_ofs, max_len,
+ vec, RTE_DIM(vec));
+ if (n < 0 || n != sop->m_src->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ if (n > 1) {
+ sgl.vec = vec;
+ sgl.num = n;
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ job.sgl = &sgl;
+ } else
+ job.data_iova = rte_pktmbuf_iova(sop->m_src);
+
+
+ switch (qat_api_test_type) {
+ case aead:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.aead.aead_ofs = min_ofs;
+ job.aead.aead_len = max_len;
+ job.aead.aad = sop->aead.aad.data;
+ job.aead.aad_iova = sop->aead.aad.phys_addr;
+ job.aead.tag_iova = sop->aead.digest.phys_addr;
+ ret = hw_ops.enqueue_aead(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case cipher:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.cipher_only.cipher_ofs = min_ofs;
+ job.cipher_only.cipher_len = max_len;
+ ret = hw_ops.enqueue_cipher(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case auth:
+ job.auth_only.auth_ofs = min_ofs;
+ job.auth_only.auth_len = max_len;
+ job.auth_only.digest_iova = sop->auth.digest.phys_addr;
+ ret = hw_ops.enqueue_auth(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case chain:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.chain.cipher_ofs = sop->cipher.data.offset;
+ job.chain.cipher_len = sop->cipher.data.length;
+ if (len_in_bits) {
+ job.chain.cipher_len = job.chain.cipher_len >> 3;
+ job.chain.cipher_ofs = job.chain.cipher_ofs >> 3;
+ }
+ job.chain.auth_ofs = sop->auth.data.offset;
+ job.chain.auth_len = sop->auth.data.length;
+ if (len_in_bits) {
+ job.chain.auth_len = job.chain.auth_len >> 3;
+ job.chain.auth_ofs = job.chain.auth_ofs >> 3;
+ }
+ job.chain.digest_iova = sop->auth.digest.phys_addr;
+ ret = hw_ops.enqueue_chain(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ }
+
+ if (ret < 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ ret = 0;
+
+ while (ret == 0 && count++ < 1024) {
+ ret = hw_ops.query_processed(hw_ops.qp, 1);
+ if (!ret)
+ rte_delay_ms(1);
+ }
+ if (ret < 0 || count >= 1024) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ flags = RTE_CRYPTO_HW_DEQ_FLAG_START;
+ op_ret = hw_ops.dequeue_one(hw_ops.qp, &drv_data, flags, &ret);
+ if (op_ret != op || ret != 1)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
static void
process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
{
@@ -2451,7 +2601,11 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -2530,7 +2684,11 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2600,6 +2758,9 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata)
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_crypt_auth_op(ts_params->valid_devs[0],
ut_params->op);
+ else if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
else
ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
@@ -2671,7 +2832,11 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2878,8 +3043,12 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
- ut_params->op);
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -2964,7 +3133,11 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3287,7 +3460,11 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3362,7 +3539,11 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3737,7 +3918,11 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -3905,7 +4090,11 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4000,7 +4189,11 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4136,7 +4329,11 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4325,7 +4522,11 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4507,7 +4708,11 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4697,7 +4902,11 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4838,7 +5047,11 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4925,7 +5138,11 @@ test_zuc_encryption(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5012,7 +5229,11 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5100,7 +5321,11 @@ test_zuc_authentication(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5232,7 +5457,11 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5418,7 +5647,11 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -7024,6 +7257,9 @@ test_authenticated_encryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -7993,6 +8229,9 @@ test_authenticated_decryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -11284,6 +11523,9 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
if (oop == IN_PLACE &&
gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (oop == IN_PLACE && qat_api_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -13241,6 +13483,75 @@ test_cryptodev_nitrox(void)
return unit_test_suite_runner(&cryptodev_nitrox_testsuite);
}
+static struct unit_test_suite cryptodev_sym_direct_api_testsuite = {
+ .suite_name = "Crypto Sym direct API Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_cipheronly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_qat_sym_direct_api(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ int ret;
+
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both "
+ "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM "
+ "are enabled in config file to run this testsuite.\n");
+ return TEST_SKIPPED;
+ }
+
+ qat_api_test = 1;
+ ret = unit_test_suite_runner(&cryptodev_sym_direct_api_testsuite);
+ qat_api_test = 0;
+
+ return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_qat_sym_api_autotest, test_qat_sym_direct_api);
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_mb_autotest,
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 41542e055..2854115aa 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -71,6 +71,8 @@
#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
#define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
+extern int qat_api_test;
+
/**
* Write (spread) data from buffer to mbuf data
*
@@ -209,4 +211,8 @@ create_segmented_mbuf(struct rte_mempool *mbuf_pool, int pkt_len,
return NULL;
}
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits);
+
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_blockcipher.c b/app/test/test_cryptodev_blockcipher.c
index 642b54971..dfa74a449 100644
--- a/app/test/test_cryptodev_blockcipher.c
+++ b/app/test/test_cryptodev_blockcipher.c
@@ -461,25 +461,43 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
/* Process crypto operation */
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Error sending packet for encryption");
- status = TEST_FAILED;
- goto error_exit;
- }
+ if (qat_api_test) {
+ uint8_t is_cipher = 0, is_auth = 0;
+
+ if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
+ RTE_LOG(DEBUG, USER1,
+ "QAT direct API does not support OOP, Test Skipped.\n");
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "SKIPPED");
+ status = TEST_SUCCESS;
+ goto error_exit;
+ }
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
+ is_cipher = 1;
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
+ is_auth = 1;
+
+ process_sym_hw_api_op(dev_id, 0, op, is_cipher, is_auth, 0);
+ } else {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for encryption");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
- op = NULL;
+ op = NULL;
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
- rte_pause();
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
+ rte_pause();
- if (!op) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Failed to process sym crypto op");
- status = TEST_FAILED;
- goto error_exit;
+ if (!op) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process sym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
}
debug_hexdump(stdout, "m_src(after):",
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v4 4/4] doc: add cryptodev direct APIs guide
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
` (2 preceding siblings ...)
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
@ 2020-07-03 12:49 ` Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
4 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-03 12:49 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang
This patch updates programmer's guide to demonstrate the usage
and limitations of cryptodev symmetric crypto data-path APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
doc/guides/prog_guide/cryptodev_lib.rst | 266 ++++++++++++++++++++++++
doc/guides/rel_notes/release_20_08.rst | 8 +
2 files changed, 274 insertions(+)
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index c14f750fa..9900a593a 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -861,6 +861,272 @@ using one of the crypto PMDs available in DPDK.
num_dequeued_ops);
} while (total_num_dequeued_ops < num_enqueued_ops);
+Cryptodev Direct Symmetric Crypto Data-path APIs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Direct symmetric crypto data-path APIs are a set of APIs that especially
+provided for Symmetric HW Crypto PMD that provides fast data-path
+enqueue/dequeue operations. The direct data-path APIs take advantage of
+existing Cryptodev APIs for device, queue pairs, and session management. In
+addition the user are required to get the queue pair pointer data and function
+pointers. The APIs are provided as an advanced feature as an alternative
+to ``rte_cryptodev_enqueue_burst`` and ``rte_cryptodev_dequeue_burst``. The
+APIs are designed for the user to develop close-to-native performance symmetric
+crypto data-path implementation for their applications that do not necessarily
+depend on cryptodev operations and cryptodev operation mempools, or mbufs.
+
+Cryptodev PMDs who supports this feature will have
+``RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API`` feature flag presented. The user uses
+``rte_cryptodev_sym_get_hw_ops`` function call to get all the function pointers
+for different enqueue and dequeue operations, plus the device specific
+queue pair data. After the ``rte_crypto_hw_ops`` structure is properly set by
+the driver, the user can use the function pointers and the queue data pointers
+in the structure to enqueue and dequeue crypto jobs.
+
+To simply the enqueue APIs a symmetric job structure is defined:
+
+.. code-block:: c
+
+ /**
+ * Asynchronous operation job descriptor.
+ * Used by HW crypto devices direct API call that supports such activity
+ **/
+ struct rte_crypto_sym_job {
+ union {
+ /**
+ * When RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL bit is set in flags, sgl
+ * field is used as input data. Otherwise data_iova is
+ * used.
+ **/
+ rte_iova_t data_iova;
+ struct rte_crypto_sgl *sgl;
+ };
+ union {
+ /**
+ * Different than cryptodev ops, all ofs and len fields have
+ * the unit of bytes (including Snow3G/Kasumi/Zuc.
+ **/
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ } cipher_only;
+ struct {
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } auth_only;
+ struct {
+ uint32_t aead_ofs;
+ uint32_t aead_len;
+ rte_iova_t tag_iova;
+ uint8_t *aad;
+ rte_iova_t aad_iova;
+ } aead;
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } chain;
+ };
+ uint8_t *iv;
+ rte_iova_t iv_iova;
+ };
+
+Different than Cryptodev operation, the ``rte_crypto_sym_job`` structure
+focuses only on the data field required for crypto PMD to execute a single job,
+and is not supposed stored as opaque data. The user can freely allocate the
+structure buffer from stack and reuse it to fill all jobs.
+
+To use the direct symmetric crypto APIs safely, the user has to carefully
+set the correct fields in rte_crypto_sym_job structure, otherwise the
+application or the system may crash. Also there are a few limitations to the
+direct symmetric crypto APIs:
+
+* Only support in-place operations.
+* APIs are NOT thread-safe.
+* CANNOT mix the direct API's enqueue with rte_cryptodev_enqueue_burst, or
+ vice versa.
+
+The following sample code shows how to use Cryptodev direct API to process a
+user defined frame with maximum 32 buffers with AES-CBC and HMAC-SHA chained
+algorithm of a frame defined by user.
+
+See *DPDK API Reference* for details on each API definitions.
+
+.. code-block:: c
+
+ #include <rte_cryptodev.h>
+
+ #define FRAME_ELT_OK 0
+ #define FRAME_ELT_FAIL 1
+ #define FRAME_OK 0
+ #define FRAME_SOME_ELT_ERROR 1
+ #define FRAME_SIZE 32
+
+ /* Sample frame element struct */
+ struct sample_frame_elt {
+ /* The status field of frame element */
+ uint8_t status;
+ /* Pre-created and initialized cryptodev session */
+ struct rte_cryptodev_sym_session *session;
+ union {
+ __rte_iova_t data;
+ struct rte_crypto_sgl sgl;
+ };
+ uint32_t data_len;
+ __rte_iova_t digest;
+ uint8_t *iv;
+ uint8_t is_sgl;
+ };
+
+ /* Sample frame struct to describe up to 32 crypto jobs */
+ struct sample_frame {
+ struct sample_frame_elt elts[FRAME_SIZE]; /**< All frame elements */
+ uint32_t n_elts; /**< Number of elements */
+ };
+
+ /* Global Cryptodev Direct API structure */
+ static struct rte_crypto_hw_ops hw_ops;
+
+ /* Initialization */
+ static int
+ frame_operation_init(
+ uint8_t cryptodev_id, /**< Initialized cryptodev ID */
+ uint16_t qp_id /**< Initialized queue pair ID */)
+ {
+ /* Get APIs */
+ ret = rte_cryptodev_sym_get_hw_ops(cryptodev_id, qp_id, &hw_ops);
+ /* If the device does not support this feature or queue pair is not
+ initialized, return -1 */
+ if (!ret)
+ return -1;
+ return 0;
+ }
+
+ /* Frame enqueue function use direct AES-CBC-* + HMAC-SHA* API */
+ static int
+ enqueue_frame_to_direct_api(
+ struct sample_frame *frame /**< Initialized user frame struct */)
+ {
+ struct rte_crypto_hw_ops hw_ops;
+ struct rte_crypto_sym_job job;
+ uint64_t drv_data, flags = 0;
+ uint32_t i;
+ int ret;
+
+ /* Fill all sample frame element data into HW queue pair */
+ for (i = 0; i < frame->n_elts; i++) {
+ struct sample_frame_elt *fe = &frame->elts[i];
+ int ret;
+
+ /* if it is the first element in the frame, set FIRST flag to
+ let the driver to know it is first frame and fill drv_data. */
+ if (i == 0)
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_START;
+ else
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_START;
+
+ /* if it is the last element in the frame, write LAST flag to
+ kick HW queue */
+ if (i == frame->n_elts - 1)
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_LAST;
+ else
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_LAST;
+
+ /* Fill the job data with frame element data */
+ if (fe->is_sgl != 0) {
+ /* The buffer is a SGL buffer */
+ job.sgl = &frame->sgl;
+ /* Set SGL flag */
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ } else {
+ job.data_iova = fe->data;
+ /* Unset SGL flag in the job */
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ }
+
+ job.chain.cipher_ofs = job.chain.auth_ofs = 0;
+ job.chain.cipher_len = job.chain.auth_len = fe->data_len;
+ job.chain.digest_iova = fe->digest;
+
+ job.iv = fe->iv;
+
+ /* Call direct data-path enqueue chaining op API */
+ ret = hw_ops.enqueue_chain(hw_ops.qp, fe->session, &job,
+ (void *frame), &drv_data, flags);
+ /**
+ * In case one element is failed to be enqueued, simply abandon
+ * enqueuing the whole frame.
+ **/
+ if (!ret)
+ return -1;
+
+ /**
+ * To this point the frame is enqueued. The job buffer can be
+ * safely reused for enqueuing next frame element.
+ **/
+ }
+
+ return 0;
+ }
+
+ /**
+ * Sample function to write frame element status field based on
+ * driver returned operation result. The function return and parameter
+ * should follow the prototype rte_crpyto_hw_user_post_deq_cb_fn() in
+ * rte_cryptodev.h
+ **/
+ static __rte_always_inline void
+ write_frame_elt_status(void *data, uint32_t index, uint8_t is_op_success)
+ {
+ struct sample_frame *frame = data;
+ frame->elts[index + 1].status = is_op_success ? FRAME_ELT_OK :
+ FRAME_ELT_FAIL;
+ }
+
+ /* Frame dequeue function use direct dequeue API */
+ static struct sample_frame *
+ dequeue_frame_with_direct_api(void)
+ {
+ struct sample_frame *ret_frame;
+ uint64_t flags, drv_data;
+ uint32_t n, n_fail, n_fail_first = 0;
+ int ret;
+
+ /* Dequeue first job, which should have frame data stored in opaque */
+ flags = RTE_CRYPTO_HW_DEQ_FLAG_START;
+ ret_frame = hw_ops.dequeue_one(hw_ops.qp, &drv_data, flags, &ret);
+ if (ret == 0) {
+ /* ret == 0, means it is still under processing */
+ return NULL;
+ } else if (ret == 1) {
+ /* ret_frame is successfully retrieved, the ret stores the
+ operation result */
+ ret_frame->elts[0].status = FRAME_ELT_OK;
+ } else {
+ ret_frame->elts[0].status = FRAME_ELT_FAIL;
+ n_fail_first = 1;
+ }
+
+ /* Query if n_elts has been processed, if not return NULL */
+ if (!hw_ops.query_processed(hw_ops.qp, frame->n_elts))
+ return NULL;
+
+ /* We are sure all elements have been processed, dequeue them all */
+ flag = 0;
+ ret = hw_ops.dequeue_many(hw_ops.qp, &drv_data, (void *)ret_frame,
+ write_frame_elt_status, ret_frame->n_elts - 1, flag, &n_fail);
+
+ if (n_fail + n_fail_first > 0)
+ ret_frame->status = FRAME_SOME_ELT_ERROR;
+ else
+ ret_frame->status = FRAME_OK;
+
+ return ret_frame;
+ }
+
Asymmetric Cryptography
-----------------------
diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
index 39064afbe..eb973693d 100644
--- a/doc/guides/rel_notes/release_20_08.rst
+++ b/doc/guides/rel_notes/release_20_08.rst
@@ -56,6 +56,14 @@ New Features
Also, make sure to start the actual text at the margin.
=========================================================
+ * **Add Cryptodev data-path APIs for no mbuf-centric data-path.**
+
+ Cryptodev is added a set of data-path APIs that are not based on
+ cryptodev operations. The APIs are designed for external applications
+ or libraries that want to use cryptodev but their data-path
+ implementations are not mbuf-centric. QAT Symmetric PMD is also updated
+ to add the support to this API.
+
Removed Items
-------------
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [dpdk-dev v4 1/4] cryptodev: add symmetric crypto data-path APIs
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 1/4] " Fan Zhang
@ 2020-07-04 18:16 ` Akhil Goyal
2020-07-06 10:02 ` Zhang, Roy Fan
0 siblings, 1 reply; 39+ messages in thread
From: Akhil Goyal @ 2020-07-04 18:16 UTC (permalink / raw)
To: Fan Zhang, dev, anoobj, asomalap, ruifeng.wang,
Nagadheeraj Rottela, Michael Shamis, Ankur Dwivedi, Jay Zhou,
Pablo de Lara
Cc: fiona.trahe, Piotr Bronowski, konstantin.ananyev, Thomas Monjalon
Hi Fan,
> +
> +/**
> + * Asynchronous operation job descriptor.
> + * Used by HW crypto devices direct API call that supports such activity
> + **/
> +struct rte_crypto_sym_job {
> + union {
> + /**
> + * When RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL bit is set in flags,
> sgl
> + * field is used as input data. Otherwise data_iova is
> + * used.
> + **/
> + rte_iova_t data_iova;
> + struct rte_crypto_sgl *sgl;
> + };
> + union {
> + /**
> + * Different than cryptodev ops, all ofs and len fields have
> + * the unit of bytes (including Snow3G/Kasumi/Zuc.
> + **/
> + struct {
> + uint32_t cipher_ofs;
> + uint32_t cipher_len;
> + } cipher_only;
> + struct {
> + uint32_t auth_ofs;
> + uint32_t auth_len;
> + rte_iova_t digest_iova;
> + } auth_only;
> + struct {
> + uint32_t aead_ofs;
> + uint32_t aead_len;
> + rte_iova_t tag_iova;
> + uint8_t *aad;
> + rte_iova_t aad_iova;
> + } aead;
> + struct {
> + uint32_t cipher_ofs;
> + uint32_t cipher_len;
> + uint32_t auth_ofs;
> + uint32_t auth_len;
> + rte_iova_t digest_iova;
> + } chain;
> + };
> + uint8_t *iv;
> + rte_iova_t iv_iova;
> +};
NACK,
Why do you need this structure definitions again when you have similar ones
(the ones used in CPU crypto) available for the same purpose.
In case of CPU crypto, there were 2 main requirements
- synchronous API instead of enq +deq
- raw buffers.
Now for this patchset, the requirement is
- raw buffers
- asynchronous APIs
The data structure for raw buffers and crypto related offsets are already defined
So they should be reused.
And I believe with some changes in rte_crypto_op and rte_crypto_sym_op,
We can support raw buffers with the same APIs.
Instead of m_src and m_dst, raw buffer data structures can be combined in a
Union and some of the fields in the rte_crypto_op can be left NULL in case of raw buffers.
> +/* Struct for user to perform HW specific enqueue/dequeue function calls */
> +struct rte_crypto_hw_ops {
> + /* Driver written queue pair data pointer, should NOT be alterred by
> + * the user.
> + */
> + void *qp;
> + /* Function handler to enqueue AEAD job */
> + rte_crypto_hw_enq_cb_fn enqueue_aead;
> + /* Function handler to enqueue cipher only job */
> + rte_crypto_hw_enq_cb_fn enqueue_cipher;
> + /* Function handler to enqueue auth only job */
> + rte_crypto_hw_enq_cb_fn enqueue_auth;
> + /* Function handler to enqueue cipher + hash chaining job */
> + rte_crypto_hw_enq_cb_fn enqueue_chain;
> + /* Function handler to query processed jobs */
> + rte_crypto_hw_query_processed query_processed;
> + /* Function handler to dequeue one job and return opaque data stored
> */
> + rte_crypto_hw_deq_one_cb_fn dequeue_one;
> + /* Function handler to dequeue many jobs */
> + rte_crypto_hw_deq_many_cb_fn dequeue_many;
> + /* Reserved */
> + void *reserved[8];
> +};
Why do we need such callbacks in the library?
These should be inside the drivers, or else we do the same for
Legacy case as well. The pain of finding the correct enq function for
Appropriate crypto operation is already handled by all the drivers
And we can reuse that or else we modify it there as well.
We should not add a lot of data paths for the user, otherwise the
APIs will become centric to a particular vendor and it will be very difficult
For the user to migrate from one vendor to another and would defeat the
Purpose of DPDK which provide uniform abstraction layer for all the hardware
Vendors.
Adding other vendors to comment.
Regards,
Akhil
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [dpdk-dev v4 1/4] cryptodev: add symmetric crypto data-path APIs
2020-07-04 18:16 ` Akhil Goyal
@ 2020-07-06 10:02 ` Zhang, Roy Fan
2020-07-06 12:13 ` Akhil Goyal
0 siblings, 1 reply; 39+ messages in thread
From: Zhang, Roy Fan @ 2020-07-06 10:02 UTC (permalink / raw)
To: Akhil Goyal, dev, anoobj, asomalap, ruifeng.wang,
Nagadheeraj Rottela, Michael Shamis, Ankur Dwivedi, Jay Zhou,
De Lara Guarch, Pablo
Cc: Trahe, Fiona, Bronowski, PiotrX, Ananyev, Konstantin, Thomas Monjalon
Hi Akhil,
> -----Original Message-----
> From: Akhil Goyal <akhil.goyal@nxp.com>
> Sent: Saturday, July 4, 2020 7:16 PM
> To: Zhang, Roy Fan <roy.fan.zhang@intel.com>; dev@dpdk.org;
> anoobj@marvell.com; asomalap@amd.com; ruifeng.wang@arm.com;
> Nagadheeraj Rottela <rnagadheeraj@marvell.com>; Michael Shamis
> <michaelsh@marvell.com>; Ankur Dwivedi <adwivedi@marvell.com>; Jay
> Zhou <jianjay.zhou@huawei.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>
> Cc: Trahe, Fiona <fiona.trahe@intel.com>; Bronowski, PiotrX
> <piotrx.bronowski@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Thomas Monjalon
> <thomas@monjalon.net>
> Subject: RE: [dpdk-dev v4 1/4] cryptodev: add symmetric crypto data-path
> APIs
>
> Hi Fan,
>
> > +
> > +/**
> > + * Asynchronous operation job descriptor.
> > + * Used by HW crypto devices direct API call that supports such activity
> > + **/
> > +struct rte_crypto_sym_job {
> > + union {
> > + /**
> > + * When RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL bit is set in
> flags,
> > sgl
> > + * field is used as input data. Otherwise data_iova is
> > + * used.
> > + **/
> > + rte_iova_t data_iova;
> > + struct rte_crypto_sgl *sgl;
> > + };
> > + union {
> > + /**
> > + * Different than cryptodev ops, all ofs and len fields have
> > + * the unit of bytes (including Snow3G/Kasumi/Zuc.
> > + **/
> > + struct {
> > + uint32_t cipher_ofs;
> > + uint32_t cipher_len;
> > + } cipher_only;
> > + struct {
> > + uint32_t auth_ofs;
> > + uint32_t auth_len;
> > + rte_iova_t digest_iova;
> > + } auth_only;
> > + struct {
> > + uint32_t aead_ofs;
> > + uint32_t aead_len;
> > + rte_iova_t tag_iova;
> > + uint8_t *aad;
> > + rte_iova_t aad_iova;
> > + } aead;
> > + struct {
> > + uint32_t cipher_ofs;
> > + uint32_t cipher_len;
> > + uint32_t auth_ofs;
> > + uint32_t auth_len;
> > + rte_iova_t digest_iova;
> > + } chain;
> > + };
> > + uint8_t *iv;
> > + rte_iova_t iv_iova;
> > +};
>
> NACK,
> Why do you need this structure definitions again when you have similar ones
> (the ones used in CPU crypto) available for the same purpose.
> In case of CPU crypto, there were 2 main requirements
> - synchronous API instead of enq +deq
> - raw buffers.
>
As you may have seen the structure definition is hW centric with the
IOVA addresses all over. Also as you will from the patch series the operation is
Per operation basis instead of operating in a burst. The external application
may sooner know when a specific enqueue is failed.
> Now for this patchset, the requirement is
> - raw buffers
> - asynchronous APIs
>
> The data structure for raw buffers and crypto related offsets are already
> defined
> So they should be reused.
> And I believe with some changes in rte_crypto_op and rte_crypto_sym_op,
> We can support raw buffers with the same APIs.
> Instead of m_src and m_dst, raw buffer data structures can be combined in a
> Union and some of the fields in the rte_crypto_op can be left NULL in case of
> raw buffers.
>
This is a good point but we still face too many unnecessary fields to be NULL, such as
digest pointers, I have given a lot thought to this structure. Hopefully it covers
all vendor's HW symmetric crypto needs and in the same time it well squeeze
the required HW addresses into 1 cacheline, instead of rte_crypto_op +
rte_crypto_sym_op 3 cacheline footprint. Another purpose of the structure design
is the structure buffer can be taken from stack and can be used to fill all
jobs to the PMD HW.
>
> > +/* Struct for user to perform HW specific enqueue/dequeue function calls
> */
> > +struct rte_crypto_hw_ops {
> > + /* Driver written queue pair data pointer, should NOT be alterred by
> > + * the user.
> > + */
> > + void *qp;
> > + /* Function handler to enqueue AEAD job */
> > + rte_crypto_hw_enq_cb_fn enqueue_aead;
> > + /* Function handler to enqueue cipher only job */
> > + rte_crypto_hw_enq_cb_fn enqueue_cipher;
> > + /* Function handler to enqueue auth only job */
> > + rte_crypto_hw_enq_cb_fn enqueue_auth;
> > + /* Function handler to enqueue cipher + hash chaining job */
> > + rte_crypto_hw_enq_cb_fn enqueue_chain;
> > + /* Function handler to query processed jobs */
> > + rte_crypto_hw_query_processed query_processed;
> > + /* Function handler to dequeue one job and return opaque data
> stored
> > */
> > + rte_crypto_hw_deq_one_cb_fn dequeue_one;
> > + /* Function handler to dequeue many jobs */
> > + rte_crypto_hw_deq_many_cb_fn dequeue_many;
> > + /* Reserved */
> > + void *reserved[8];
> > +};
>
> Why do we need such callbacks in the library?
> These should be inside the drivers, or else we do the same for
> Legacy case as well. The pain of finding the correct enq function for
> Appropriate crypto operation is already handled by all the drivers
> And we can reuse that or else we modify it there as well.
>
Providing different types of enqueue functions for specific operation type
could save a lot of branches for the driver to handle. As mentioned this
data-path API is intended to be used as an advanced feature to provide
close-to-native perf to external library/applications that are not mbuf
centric. And I don't agree classifying choosing 1 enqueue function from
4 candidates as "pain".
> We should not add a lot of data paths for the user, otherwise the
> APIs will become centric to a particular vendor and it will be very difficult
> For the user to migrate from one vendor to another and would defeat the
> Purpose of DPDK which provide uniform abstraction layer for all the
> hardware
> Vendors.
>
The purpose of adding data-path for the user is performance for non-mbuf
data-path centric applications/libraries, in the meantime not creating
confusion. In this version we aim to provide a more friendly data-path for
them, and aims to be useful to all vendor's PMDs. If there is any place in
the API that blocks a PMD please let me know.
> Adding other vendors to comment.
>
> Regards,
> Akhil
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [dpdk-dev v4 1/4] cryptodev: add symmetric crypto data-path APIs
2020-07-06 10:02 ` Zhang, Roy Fan
@ 2020-07-06 12:13 ` Akhil Goyal
2020-07-07 12:37 ` Zhang, Roy Fan
0 siblings, 1 reply; 39+ messages in thread
From: Akhil Goyal @ 2020-07-06 12:13 UTC (permalink / raw)
To: Zhang, Roy Fan, dev, anoobj, asomalap, ruifeng.wang,
Nagadheeraj Rottela, Michael Shamis, Ankur Dwivedi, Jay Zhou,
De Lara Guarch, Pablo
Cc: Trahe, Fiona, Bronowski, PiotrX, Ananyev, Konstantin, Thomas Monjalon
Hi Fan,
> Hi Akhil,
>
> > > +
> > > +/**
> > > + * Asynchronous operation job descriptor.
> > > + * Used by HW crypto devices direct API call that supports such activity
> > > + **/
> > > +struct rte_crypto_sym_job {
> > > + union {
> > > + /**
> > > + * When RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL bit is set in
> > flags,
> > > sgl
> > > + * field is used as input data. Otherwise data_iova is
> > > + * used.
> > > + **/
> > > + rte_iova_t data_iova;
> > > + struct rte_crypto_sgl *sgl;
> > > + };
> > > + union {
> > > + /**
> > > + * Different than cryptodev ops, all ofs and len fields have
> > > + * the unit of bytes (including Snow3G/Kasumi/Zuc.
> > > + **/
> > > + struct {
> > > + uint32_t cipher_ofs;
> > > + uint32_t cipher_len;
> > > + } cipher_only;
> > > + struct {
> > > + uint32_t auth_ofs;
> > > + uint32_t auth_len;
> > > + rte_iova_t digest_iova;
> > > + } auth_only;
> > > + struct {
> > > + uint32_t aead_ofs;
> > > + uint32_t aead_len;
> > > + rte_iova_t tag_iova;
> > > + uint8_t *aad;
> > > + rte_iova_t aad_iova;
> > > + } aead;
> > > + struct {
> > > + uint32_t cipher_ofs;
> > > + uint32_t cipher_len;
> > > + uint32_t auth_ofs;
> > > + uint32_t auth_len;
> > > + rte_iova_t digest_iova;
> > > + } chain;
> > > + };
> > > + uint8_t *iv;
> > > + rte_iova_t iv_iova;
> > > +};
> >
> > NACK,
> > Why do you need this structure definitions again when you have similar ones
> > (the ones used in CPU crypto) available for the same purpose.
> > In case of CPU crypto, there were 2 main requirements
> > - synchronous API instead of enq +deq
> > - raw buffers.
> >
>
> As you may have seen the structure definition is hW centric with the
> IOVA addresses all over. Also as you will from the patch series the operation is
> Per operation basis instead of operating in a burst. The external application
> may sooner know when a specific enqueue is failed.
You may also need to save a virtual address as well. As some hardware are able to
Convert virtual to physical addresses on it's own giving a performance improvement.
I do not see an issue in using enqueue burst with burst size=1 , but since you are doing
Optimizations, none of the hardware can perform well with burst = 1, I think it is always
Greater than 1.
>
> > Now for this patchset, the requirement is
> > - raw buffers
> > - asynchronous APIs
> >
> > The data structure for raw buffers and crypto related offsets are already
> > defined
> > So they should be reused.
> > And I believe with some changes in rte_crypto_op and rte_crypto_sym_op,
> > We can support raw buffers with the same APIs.
> > Instead of m_src and m_dst, raw buffer data structures can be combined in a
> > Union and some of the fields in the rte_crypto_op can be left NULL in case of
> > raw buffers.
> >
>
> This is a good point but we still face too many unnecessary fields to be NULL,
> such as
> digest pointers, I have given a lot thought to this structure. Hopefully it covers
> all vendor's HW symmetric crypto needs and in the same time it well squeeze
> the required HW addresses into 1 cacheline, instead of rte_crypto_op +
> rte_crypto_sym_op 3 cacheline footprint. Another purpose of the structure
> design
> is the structure buffer can be taken from stack and can be used to fill all
> jobs to the PMD HW.
Which fields you think are not useful and should be set as NULL?
Digest pointers you are anyways setting in the new structure.
Your new struct does not support session less as well as security sessions.
It does not take care of asymmetric crypto.
So whenever, a vendor need to support all these, we would end up getting
the rte_crypto_op structure.
IMO, you only need to make m_src and m_dst as union to a raw input/output
buffers. Everything else will be relevant.
Have you done some profiling with using rte_crypto_op instead of this new struct?
>
> >
> > > +/* Struct for user to perform HW specific enqueue/dequeue function calls
> > */
> > > +struct rte_crypto_hw_ops {
> > > + /* Driver written queue pair data pointer, should NOT be alterred by
> > > + * the user.
> > > + */
> > > + void *qp;
> > > + /* Function handler to enqueue AEAD job */
> > > + rte_crypto_hw_enq_cb_fn enqueue_aead;
> > > + /* Function handler to enqueue cipher only job */
> > > + rte_crypto_hw_enq_cb_fn enqueue_cipher;
> > > + /* Function handler to enqueue auth only job */
> > > + rte_crypto_hw_enq_cb_fn enqueue_auth;
> > > + /* Function handler to enqueue cipher + hash chaining job */
> > > + rte_crypto_hw_enq_cb_fn enqueue_chain;
> > > + /* Function handler to query processed jobs */
> > > + rte_crypto_hw_query_processed query_processed;
> > > + /* Function handler to dequeue one job and return opaque data
> > stored
> > > */
> > > + rte_crypto_hw_deq_one_cb_fn dequeue_one;
> > > + /* Function handler to dequeue many jobs */
> > > + rte_crypto_hw_deq_many_cb_fn dequeue_many;
> > > + /* Reserved */
> > > + void *reserved[8];
> > > +};
> >
> > Why do we need such callbacks in the library?
> > These should be inside the drivers, or else we do the same for
> > Legacy case as well. The pain of finding the correct enq function for
> > Appropriate crypto operation is already handled by all the drivers
> > And we can reuse that or else we modify it there as well.
> >
>
> Providing different types of enqueue functions for specific operation type
> could save a lot of branches for the driver to handle. As mentioned this
> data-path API is intended to be used as an advanced feature to provide
> close-to-native perf to external library/applications that are not mbuf
> centric. And I don't agree classifying choosing 1 enqueue function from
> 4 candidates as "pain".
My point is why don't we have it in the Legacy code path as well?
I think it is useful in both the paths. Branching is a pain for the driver.
>
> > We should not add a lot of data paths for the user, otherwise the
> > APIs will become centric to a particular vendor and it will be very difficult
> > For the user to migrate from one vendor to another and would defeat the
> > Purpose of DPDK which provide uniform abstraction layer for all the
> > hardware
> > Vendors.
> >
>
> The purpose of adding data-path for the user is performance for non-mbuf
> data-path centric applications/libraries, in the meantime not creating
> confusion. In this version we aim to provide a more friendly data-path for
I do not see the new path as friendly.
Adding a parallel new datapath with create more confusion for the application
developer. It would be convenient, if we can use the same path with minimal
changes so that people can migrate easily.
> them, and aims to be useful to all vendor's PMDs. If there is any place in
> the API that blocks a PMD please let me know.
As commented above, sessionless, rte_security sessions, asymmetric crypto
Not supported.
>
> > Adding other vendors to comment.
> >
> > Regards,
> > Akhil
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [dpdk-dev v4 1/4] cryptodev: add symmetric crypto data-path APIs
2020-07-06 12:13 ` Akhil Goyal
@ 2020-07-07 12:37 ` Zhang, Roy Fan
2020-07-07 20:37 ` Akhil Goyal
0 siblings, 1 reply; 39+ messages in thread
From: Zhang, Roy Fan @ 2020-07-07 12:37 UTC (permalink / raw)
To: Akhil Goyal, dev, anoobj, asomalap, ruifeng.wang,
Nagadheeraj Rottela, Michael Shamis, Ankur Dwivedi, Jay Zhou,
De Lara Guarch, Pablo
Cc: Trahe, Fiona, Bronowski, PiotrX, Ananyev, Konstantin, Thomas Monjalon
Hi Akhil,
> -----Original Message-----
> From: Akhil Goyal <akhil.goyal@nxp.com>
> Sent: Monday, July 6, 2020 1:13 PM
> To: Zhang, Roy Fan <roy.fan.zhang@intel.com>; dev@dpdk.org;
> anoobj@marvell.com; asomalap@amd.com; ruifeng.wang@arm.com;
> Nagadheeraj Rottela <rnagadheeraj@marvell.com>; Michael Shamis
> <michaelsh@marvell.com>; Ankur Dwivedi <adwivedi@marvell.com>; Jay
> Zhou <jianjay.zhou@huawei.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>
> Cc: Trahe, Fiona <fiona.trahe@intel.com>; Bronowski, PiotrX
> <piotrx.bronowski@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Thomas Monjalon
> <thomas@monjalon.net>
> Subject: RE: [dpdk-dev v4 1/4] cryptodev: add symmetric crypto data-path
> APIs
>
...
> >
> > As you may have seen the structure definition is hW centric with the
> > IOVA addresses all over. Also as you will from the patch series the
> operation is
> > Per operation basis instead of operating in a burst. The external application
> > may sooner know when a specific enqueue is failed.
>
> You may also need to save a virtual address as well. As some hardware are
> able to
> Convert virtual to physical addresses on it's own giving a performance
> improvement.
>
> I do not see an issue in using enqueue burst with burst size=1 , but since you
> are doing
> Optimizations, none of the hardware can perform well with burst = 1, I think
> it is always
> Greater than 1.
Shall I update the rte_crypto_sym_vec as the following - so the 2 problems can be
resolved?
struct rte_crypto_sym_vec {
/** array of SGL vectors */
struct rte_crypto_sgl *sgl;
union {
/* Supposed to be used with CPU crypto API call. */
struct {
/** array of pointers to IV */
void **iv;
/** array of pointers to AAD */
void **aad;
/** array of pointers to digest */
void **digest;
/**
* array of statuses for each operation:
* - 0 on success
* - errno on error
*/
int32_t *status;
};
/* Supposed to be used with HW crypto API call. */
struct {
/** array of pointers to IV */
struct rte_crypto_vec *iv_hw;
/** array of pointers to AAD */
struct rte_crypto_vec *aad_hw;
/** array of pointers to Digest */
struct rte_crypto_vec *digest_hw;
};
};
/** number of operations to perform */
uint32_t num;
};
> >
> > > Now for this patchset, the requirement is
> > > - raw buffers
> > > - asynchronous APIs
> > >
> > > The data structure for raw buffers and crypto related offsets are already
> > > defined
> > > So they should be reused.
> > > And I believe with some changes in rte_crypto_op and
> rte_crypto_sym_op,
> > > We can support raw buffers with the same APIs.
> > > Instead of m_src and m_dst, raw buffer data structures can be combined
> in a
> > > Union and some of the fields in the rte_crypto_op can be left NULL in
> case of
> > > raw buffers.
> > >
> >
> > This is a good point but we still face too many unnecessary fields to be
> NULL,
> > such as
> > digest pointers, I have given a lot thought to this structure. Hopefully it
> covers
> > all vendor's HW symmetric crypto needs and in the same time it well
> squeeze
> > the required HW addresses into 1 cacheline, instead of rte_crypto_op +
> > rte_crypto_sym_op 3 cacheline footprint. Another purpose of the
> structure
> > design
> > is the structure buffer can be taken from stack and can be used to fill all
> > jobs to the PMD HW.
>
> Which fields you think are not useful and should be set as NULL?
> Digest pointers you are anyways setting in the new structure.
> Your new struct does not support session less as well as security sessions.
> It does not take care of asymmetric crypto.
> So whenever, a vendor need to support all these, we would end up getting
> the rte_crypto_op structure.
> IMO, you only need to make m_src and m_dst as union to a raw
> input/output
> buffers. Everything else will be relevant.
>
Rte_crypto_op is designed to be allocated from mempool with HW address
info contained so it is possible to deduct IV and AAD physical address from
it. More importantly rte_crypto_op is designed to be taken from heap and
being freed after dequeue. So they cannot be allocated from stack - for this
reason I think rte_crypot_sym_vec is a better fit for the patch, do you agree?
(the Proposed change is at above).
> Have you done some profiling with using rte_crypto_op instead of this new
> struct?
>
Yes, the code are actually upstreamed in VPP
https://gerrit.fd.io/r/c/vpp/+/18036, please try out. If you have a look at the
enqueue/dequeue functions you should see the struggle we had to translate
ops, and creating a second software ring to make sure we only dequeue a
frame of data. Lucky VPP has space to store mbufs otherwise the perf will
be even worse.
> >
> > >
> > > > +/* Struct for user to perform HW specific enqueue/dequeue function
> calls
> > > */
> > > > +struct rte_crypto_hw_ops {
> > > > + /* Driver written queue pair data pointer, should NOT be alterred by
> > > > + * the user.
> > > > + */
> > > > + void *qp;
> > > > + /* Function handler to enqueue AEAD job */
> > > > + rte_crypto_hw_enq_cb_fn enqueue_aead;
> > > > + /* Function handler to enqueue cipher only job */
> > > > + rte_crypto_hw_enq_cb_fn enqueue_cipher;
> > > > + /* Function handler to enqueue auth only job */
> > > > + rte_crypto_hw_enq_cb_fn enqueue_auth;
> > > > + /* Function handler to enqueue cipher + hash chaining job */
> > > > + rte_crypto_hw_enq_cb_fn enqueue_chain;
> > > > + /* Function handler to query processed jobs */
> > > > + rte_crypto_hw_query_processed query_processed;
> > > > + /* Function handler to dequeue one job and return opaque data
> > > stored
> > > > */
> > > > + rte_crypto_hw_deq_one_cb_fn dequeue_one;
> > > > + /* Function handler to dequeue many jobs */
> > > > + rte_crypto_hw_deq_many_cb_fn dequeue_many;
> > > > + /* Reserved */
> > > > + void *reserved[8];
> > > > +};
> > >
> > > Why do we need such callbacks in the library?
> > > These should be inside the drivers, or else we do the same for
> > > Legacy case as well. The pain of finding the correct enq function for
> > > Appropriate crypto operation is already handled by all the drivers
> > > And we can reuse that or else we modify it there as well.
> > >
> >
> > Providing different types of enqueue functions for specific operation type
> > could save a lot of branches for the driver to handle. As mentioned this
> > data-path API is intended to be used as an advanced feature to provide
> > close-to-native perf to external library/applications that are not mbuf
> > centric. And I don't agree classifying choosing 1 enqueue function from
> > 4 candidates as "pain".
>
> My point is why don't we have it in the Legacy code path as well?
> I think it is useful in both the paths. Branching is a pain for the driver.
>
That's a good point :-) we definitely can do something about it in future releases.
> >
> > > We should not add a lot of data paths for the user, otherwise the
> > > APIs will become centric to a particular vendor and it will be very difficult
> > > For the user to migrate from one vendor to another and would defeat
> the
> > > Purpose of DPDK which provide uniform abstraction layer for all the
> > > hardware
> > > Vendors.
> > >
> >
> > The purpose of adding data-path for the user is performance for non-mbuf
> > data-path centric applications/libraries, in the meantime not creating
> > confusion. In this version we aim to provide a more friendly data-path for
>
> I do not see the new path as friendly.
> Adding a parallel new datapath with create more confusion for the
> application
> developer. It would be convenient, if we can use the same path with minimal
> changes so that people can migrate easily.
>
We are working on next version that is based on rte_crypto_sym_vec and single
Enqueue-then-dequeue API. To be honest it won't be that of friendly to application
developer that the applications are mbuf-based or already built on top of cryptodev,
however for the applications that are not mbuf based and having their own data-path
structures it will be surely friendlier than existing enqueue and dequeue APIs. No
dependency to mbuf, mbuf and crypto op pool, and no need to consider how to adapt
different working methods.
> > them, and aims to be useful to all vendor's PMDs. If there is any place in
> > the API that blocks a PMD please let me know.
>
> As commented above, sessionless, rte_security sessions, asymmetric crypto
> Not supported.
>
You are right -
Sessionless support aims the usability and is not intended to be used in high-throughput
Application.
Rte_security is built on top of mbuf and ethdev and is not intended to "mbuf-independent"
applications either.
>
> >
> > > Adding other vendors to comment.
> > >
> > > Regards,
> > > Akhil
Regards,
Fan
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [dpdk-dev v4 1/4] cryptodev: add symmetric crypto data-path APIs
2020-07-07 12:37 ` Zhang, Roy Fan
@ 2020-07-07 20:37 ` Akhil Goyal
2020-07-08 15:09 ` Zhang, Roy Fan
0 siblings, 1 reply; 39+ messages in thread
From: Akhil Goyal @ 2020-07-07 20:37 UTC (permalink / raw)
To: Zhang, Roy Fan, dev, anoobj, asomalap, ruifeng.wang,
Nagadheeraj Rottela, Michael Shamis, Ankur Dwivedi, Jay Zhou,
De Lara Guarch, Pablo, Hemant Agrawal
Cc: Trahe, Fiona, Bronowski, PiotrX, Ananyev, Konstantin, Thomas Monjalon
Hi Fan,
>
> Hi Akhil,
>
> ...
> > >
> > > As you may have seen the structure definition is hW centric with the
> > > IOVA addresses all over. Also as you will from the patch series the
> > operation is
> > > Per operation basis instead of operating in a burst. The external application
> > > may sooner know when a specific enqueue is failed.
> >
> > You may also need to save a virtual address as well. As some hardware are
> > able to
> > Convert virtual to physical addresses on it's own giving a performance
> > improvement.
> >
> > I do not see an issue in using enqueue burst with burst size=1 , but since you
> > are doing
> > Optimizations, none of the hardware can perform well with burst = 1, I think
> > it is always
> > Greater than 1.
>
> Shall I update the rte_crypto_sym_vec as the following - so the 2 problems can
> be
> resolved?
>
> struct rte_crypto_sym_vec {
> /** array of SGL vectors */
> struct rte_crypto_sgl *sgl;
> union {
> /* Supposed to be used with CPU crypto API call. */
> struct {
> /** array of pointers to IV */
> void **iv;
> /** array of pointers to AAD */
> void **aad;
> /** array of pointers to digest */
> void **digest;
> /**
> * array of statuses for each operation:
> * - 0 on success
> * - errno on error
> */
> int32_t *status;
> };
>
> /* Supposed to be used with HW crypto API call. */
> struct {
> /** array of pointers to IV */
> struct rte_crypto_vec *iv_hw;
> /** array of pointers to AAD */
> struct rte_crypto_vec *aad_hw;
> /** array of pointers to Digest */
> struct rte_crypto_vec *digest_hw;
> };
>
> };
> /** number of operations to perform */
> uint32_t num;
> };
Yes something of that sort can work.
The current use case was also discussed in the CPU crypto mail chain
About the need of non-mbuf use case for async enq/deq APIs.
http://patches.dpdk.org/patch/58528/#101995
>
> > >
> > > > Now for this patchset, the requirement is
> > > > - raw buffers
> > > > - asynchronous APIs
> > > >
> > > > The data structure for raw buffers and crypto related offsets are already
> > > > defined
> > > > So they should be reused.
> > > > And I believe with some changes in rte_crypto_op and
> > rte_crypto_sym_op,
> > > > We can support raw buffers with the same APIs.
> > > > Instead of m_src and m_dst, raw buffer data structures can be combined
> > in a
> > > > Union and some of the fields in the rte_crypto_op can be left NULL in
> > case of
> > > > raw buffers.
> > > >
> > >
> > > This is a good point but we still face too many unnecessary fields to be
> > NULL,
> > > such as
> > > digest pointers, I have given a lot thought to this structure. Hopefully it
> > covers
> > > all vendor's HW symmetric crypto needs and in the same time it well
> > squeeze
> > > the required HW addresses into 1 cacheline, instead of rte_crypto_op +
> > > rte_crypto_sym_op 3 cacheline footprint. Another purpose of the
> > structure
> > > design
> > > is the structure buffer can be taken from stack and can be used to fill all
> > > jobs to the PMD HW.
> >
> > Which fields you think are not useful and should be set as NULL?
> > Digest pointers you are anyways setting in the new structure.
> > Your new struct does not support session less as well as security sessions.
> > It does not take care of asymmetric crypto.
> > So whenever, a vendor need to support all these, we would end up getting
> > the rte_crypto_op structure.
> > IMO, you only need to make m_src and m_dst as union to a raw
> > input/output
> > buffers. Everything else will be relevant.
> >
>
> Rte_crypto_op is designed to be allocated from mempool with HW address
> info contained so it is possible to deduct IV and AAD physical address from
> it. More importantly rte_crypto_op is designed to be taken from heap and
> being freed after dequeue. So they cannot be allocated from stack - for this
> reason I think rte_crypot_sym_vec is a better fit for the patch, do you agree?
> (the Proposed change is at above).
Agreed.
>
> > Have you done some profiling with using rte_crypto_op instead of this new
> > struct?
> >
> Yes, the code are actually upstreamed in VPP
> https://gerrit.fd.io/r/c/vpp/+/18036, please try out. If you
> have a look at the
> enqueue/dequeue functions you should see the struggle we had to translate
> ops, and creating a second software ring to make sure we only dequeue a
> frame of data. Lucky VPP has space to store mbufs otherwise the perf will
> be even worse.
What is the performance gap do you see after making m_src and m_dst as
Raw buffers?
>
> > >
> > > >
> > > > > +/* Struct for user to perform HW specific enqueue/dequeue function
> > calls
> > > > */
> > > > > +struct rte_crypto_hw_ops {
> > > > > + /* Driver written queue pair data pointer, should NOT be
> alterred by
> > > > > + * the user.
> > > > > + */
> > > > > + void *qp;
> > > > > + /* Function handler to enqueue AEAD job */
> > > > > + rte_crypto_hw_enq_cb_fn enqueue_aead;
> > > > > + /* Function handler to enqueue cipher only job */
> > > > > + rte_crypto_hw_enq_cb_fn enqueue_cipher;
> > > > > + /* Function handler to enqueue auth only job */
> > > > > + rte_crypto_hw_enq_cb_fn enqueue_auth;
> > > > > + /* Function handler to enqueue cipher + hash chaining job */
> > > > > + rte_crypto_hw_enq_cb_fn enqueue_chain;
> > > > > + /* Function handler to query processed jobs */
> > > > > + rte_crypto_hw_query_processed query_processed;
> > > > > + /* Function handler to dequeue one job and return opaque data
> > > > stored
> > > > > */
> > > > > + rte_crypto_hw_deq_one_cb_fn dequeue_one;
> > > > > + /* Function handler to dequeue many jobs */
> > > > > + rte_crypto_hw_deq_many_cb_fn dequeue_many;
> > > > > + /* Reserved */
> > > > > + void *reserved[8];
> > > > > +};
> > > >
> > > > Why do we need such callbacks in the library?
> > > > These should be inside the drivers, or else we do the same for
> > > > Legacy case as well. The pain of finding the correct enq function for
> > > > Appropriate crypto operation is already handled by all the drivers
> > > > And we can reuse that or else we modify it there as well.
> > > >
> > >
> > > Providing different types of enqueue functions for specific operation type
> > > could save a lot of branches for the driver to handle. As mentioned this
> > > data-path API is intended to be used as an advanced feature to provide
> > > close-to-native perf to external library/applications that are not mbuf
> > > centric. And I don't agree classifying choosing 1 enqueue function from
> > > 4 candidates as "pain".
> >
> > My point is why don't we have it in the Legacy code path as well?
> > I think it is useful in both the paths. Branching is a pain for the driver.
> >
>
> That's a good point :-) we definitely can do something about it in future releases.
>
> > >
> > > > We should not add a lot of data paths for the user, otherwise the
> > > > APIs will become centric to a particular vendor and it will be very difficult
> > > > For the user to migrate from one vendor to another and would defeat
> > the
> > > > Purpose of DPDK which provide uniform abstraction layer for all the
> > > > hardware
> > > > Vendors.
> > > >
> > >
> > > The purpose of adding data-path for the user is performance for non-mbuf
> > > data-path centric applications/libraries, in the meantime not creating
> > > confusion. In this version we aim to provide a more friendly data-path for
> >
> > I do not see the new path as friendly.
> > Adding a parallel new datapath with create more confusion for the
> > application
> > developer. It would be convenient, if we can use the same path with minimal
> > changes so that people can migrate easily.
> >
>
> We are working on next version that is based on rte_crypto_sym_vec and single
> Enqueue-then-dequeue API. To be honest it won't be that of friendly to
> application
> developer that the applications are mbuf-based or already built on top of
> cryptodev,
> however for the applications that are not mbuf based and having their own
> data-path
> structures it will be surely friendlier than existing enqueue and dequeue APIs. No
> dependency to mbuf, mbuf and crypto op pool, and no need to consider how to
> adapt
> different working methods.
Agreed with your point. The intention is just not to create multiple copies of
Same information in multiple structures.
>
> > > them, and aims to be useful to all vendor's PMDs. If there is any place in
> > > the API that blocks a PMD please let me know.
> >
> > As commented above, sessionless, rte_security sessions, asymmetric crypto
> > Not supported.
> >
> You are right -
> Sessionless support aims the usability and is not intended to be used in high-
> throughput
> Application.
There may be some cases where a limited amount of control pkts can be sent
Which may be session less. We cannot ask people to use a different data path
For such traffic. So we may need to support that too.
> Rte_security is built on top of mbuf and ethdev and is not intended to "mbuf-
> independent"
> applications either.
Rte_security for lookaside protocol can be mbuf independent and NXP may
Support it in future especially in case of PDCP.
Regards,
Akhil
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [dpdk-dev v4 1/4] cryptodev: add symmetric crypto data-path APIs
2020-07-07 20:37 ` Akhil Goyal
@ 2020-07-08 15:09 ` Zhang, Roy Fan
0 siblings, 0 replies; 39+ messages in thread
From: Zhang, Roy Fan @ 2020-07-08 15:09 UTC (permalink / raw)
To: Akhil Goyal, dev, anoobj, asomalap, ruifeng.wang,
Nagadheeraj Rottela, Michael Shamis, Ankur Dwivedi, Jay Zhou,
De Lara Guarch, Pablo, Hemant Agrawal
Cc: Trahe, Fiona, Bronowski, PiotrX, Ananyev, Konstantin, Thomas Monjalon
Hi Akhil,
Thanks for the comments!
> -----Original Message-----
> From: Akhil Goyal <akhil.goyal@nxp.com>
> Sent: Tuesday, July 7, 2020 9:37 PM
> To: Zhang, Roy Fan <roy.fan.zhang@intel.com>; dev@dpdk.org;
> anoobj@marvell.com; asomalap@amd.com; ruifeng.wang@arm.com;
> Nagadheeraj Rottela <rnagadheeraj@marvell.com>; Michael Shamis
> <michaelsh@marvell.com>; Ankur Dwivedi <adwivedi@marvell.com>; Jay
> Zhou <jianjay.zhou@huawei.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>; Hemant Agrawal
> <hemant.agrawal@nxp.com>
> Cc: Trahe, Fiona <fiona.trahe@intel.com>; Bronowski, PiotrX
> <piotrx.bronowski@intel.com>; Ananyev, Konstantin
> <konstantin.ananyev@intel.com>; Thomas Monjalon
> <thomas@monjalon.net>
> Subject: RE: [dpdk-dev v4 1/4] cryptodev: add symmetric crypto data-path
> APIs
>
> Hi Fan,
> >
> > Hi Akhil,
> >
> > ...
> > > >
> > > > As you may have seen the structure definition is hW centric with the
> > > > IOVA addresses all over. Also as you will from the patch series the
> > > operation is
> > > > Per operation basis instead of operating in a burst. The external
> application
> > > > may sooner know when a specific enqueue is failed.
> > >
> > > You may also need to save a virtual address as well. As some hardware
> are
> > > able to
> > > Convert virtual to physical addresses on it's own giving a performance
> > > improvement.
> > >
> > > I do not see an issue in using enqueue burst with burst size=1 , but since
> you
> > > are doing
> > > Optimizations, none of the hardware can perform well with burst = 1, I
> think
> > > it is always
> > > Greater than 1.
> >
> > Shall I update the rte_crypto_sym_vec as the following - so the 2 problems
> can
> > be
> > resolved?
> >
> > struct rte_crypto_sym_vec {
> > /** array of SGL vectors */
> > struct rte_crypto_sgl *sgl;
> > union {
> > /* Supposed to be used with CPU crypto API call. */
> > struct {
> > /** array of pointers to IV */
> > void **iv;
> > /** array of pointers to AAD */
> > void **aad;
> > /** array of pointers to digest */
> > void **digest;
> > /**
> > * array of statuses for each operation:
> > * - 0 on success
> > * - errno on error
> > */
> > int32_t *status;
> > };
> >
> > /* Supposed to be used with HW crypto API call. */
> > struct {
> > /** array of pointers to IV */
> > struct rte_crypto_vec *iv_hw;
> > /** array of pointers to AAD */
> > struct rte_crypto_vec *aad_hw;
> > /** array of pointers to Digest */
> > struct rte_crypto_vec *digest_hw;
> > };
> >
> > };
> > /** number of operations to perform */
> > uint32_t num;
> > };
>
> Yes something of that sort can work.
>
Will change it in v5.
...
> >
> > > Have you done some profiling with using rte_crypto_op instead of this
> new
> > > struct?
> > >
> > Yes, the code are actually upstreamed in VPP
> > https://gerrit.fd.io/r/c/vpp/+/18036, please try out. If you
> > have a look at the
> > enqueue/dequeue functions you should see the struggle we had to
> translate
> > ops, and creating a second software ring to make sure we only dequeue a
> > frame of data. Lucky VPP has space to store mbufs otherwise the perf will
> > be even worse.
> What is the performance gap do you see after making m_src and m_dst as
> Raw buffers?
>
Converting other projects data structure (such as VPP crypto op) into DPDK
cryptodev operation introduces some performance degradation.
>
> There may be some cases where a limited amount of control pkts can be sent
> Which may be session less. We cannot ask people to use a different data
> path
> For such traffic. So we may need to support that too.
>
Here is our proposal to the enqueue-dequeue API
typedef uint32_t (*cryptodev_sym_hw_dp_crypto_enqueue_dequeue_t)
(struct rte_cryptodev *dev, uint16_t qp_id,
struct rte_cryptodev_sym_session *sess,
union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
void **opaque, int *enqueued_num,
rte_cryptodev_get_dequeue_count_t get_dequeue_count,
rte_cryptodev_post_dequeue_t post_dequeue,
uint32_t flags);
So the idea is a single API that does enqueue and/or dequeue combined.
If the user wants to do enqueue she/he should have
RTE_CRYPTO_HW_DP_FF_DO_ENQUEUE in the flag, or
RTE_CRYPTO_HW_DP_FF_DO_DEQUEUE if dequeue is expected to be done.
Opaque could be single pointer or an array, also specified by the flags, so
If the user wants to do same as cryptodev_enqueue they can stores the
Crypto ops into opaque_in, and the dequeued opaque will be stored in
Opaque_out. There are 2 function pointers
rte_cryptodev_get_dequeue_count_t: return the number of jobs to dequeue,
which helps if the user wants to know the dequeue count from first
dequeued opaque data, or just return a fixed number same as cryptodev
enqueue/dequeue usage.
rte_cryptodev_post_dequeue_t: user provided function to operate post
dequeue, good to write status to user specified data structure (opaque).
To enable sessionless we may add the union number to replace sess. The
union is either a session pointer or xform pointer, may be specified by the
flag too.
You may ask why using a single function pointer for both enqueue and
dequeue, instead 2 separate functions... I only intended to squeeze it into
rte_cryptodev_ops to combine it with cryptodev_sym_cpu_crypto_process_t
as a union, without expanding rte_cryptodev_ops size.
struct rte_cryptodev_ops {
...
cryptodev_asym_free_session_t asym_session_clear;
/**< Clear a Crypto sessions private data. */
union {
cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
/**< process input data synchronously (cpu-crypto). */
cryptodev_sym_hw_crypto_enqueue_dequeue_t sym_hw_enq_deq;
/**< Get HW crypto data-path call back functions and data */
};
};
> > Rte_security is built on top of mbuf and ethdev and is not intended to
> "mbuf-
> > independent"
> > applications either.
Again we can replacing sess to the union of
union rte_cryptodev_hw_dp_ctx {
struct rte_cryptodev_sym_session *crypto_sess;
struct rte_crypto_sym_xform *xform;
struct rte_security_session *sec_sess;
};
>
> Rte_security for lookaside protocol can be mbuf independent and NXP may
> Support it in future especially in case of PDCP.
>
> Regards,
> Akhil
What do you think?
Regards,
Fan
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v5 0/4] cryptodev: add symmetric crypto data-path APIs
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
` (3 preceding siblings ...)
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 4/4] doc: add cryptodev direct APIs guide Fan Zhang
@ 2020-07-13 16:57 ` Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 1/4] cryptodev: add " Fan Zhang
` (3 more replies)
4 siblings, 4 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-13 16:57 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang, Piotr Bronowski
This patch adds symmetric crypto data-path APIs for Cryptodev. Direct
symmetric crypto data-path APIs are a set of APIs that provide
more HW friendly enqueue/dequeue data-path functions as an alternative
approach to ``rte_cryptodev_enqueue_burst`` and
``rte_cryptodev_dequeue_burst``. The APIs are designed for external
libraries/applications that want to use Cryptodev as symmetric crypto
data-path accelerator but not necessarily mbuf data-path centric. With
the APIs the cycle cost spent on conversion from their data structure to
DPDK cryptodev operations/mbufs can be reduced, and the dependency on DPDK
crypto operation mempool can be relieved.
It is expected that the user can develop close-to-native performance
symmetric crypto data-path implementations with the functions provided
in this patchset.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
v5:
- Changed to use rte_crypto_sym_vec as input.
- Changed to use public APIs instead of use function pointer.
v4:
- Added missed patch.
v3:
- Instead of QAT only API, moved the API to cryptodev.
- Added cryptodev feature flags.
v2:
- Used a structure to simplify parameters.
- Added unit tests.
- Added documentation.
Fan Zhang (4):
cryptodev: add data-path APIs
crypto/qat: add support to direct data-path APIs
test/crypto: add unit-test for cryptodev direct APIs
doc: add cryptodev direct APIs guide
app/test/test_cryptodev.c | 367 +++++++-
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 +-
doc/guides/prog_guide/cryptodev_lib.rst | 53 ++
doc/guides/rel_notes/release_20_08.rst | 8 +
drivers/common/qat/Makefile | 1 +
drivers/common/qat/qat_qp.h | 1 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.h | 3 +
drivers/crypto/qat/qat_sym_hw_dp.c | 850 ++++++++++++++++++
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
lib/librte_cryptodev/rte_crypto_sym.h | 27 +-
lib/librte_cryptodev/rte_cryptodev.c | 118 +++
lib/librte_cryptodev/rte_cryptodev.h | 256 +++++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 90 +-
.../rte_cryptodev_version.map | 5 +
16 files changed, 1795 insertions(+), 48 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v5 1/4] cryptodev: add data-path APIs
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
@ 2020-07-13 16:57 ` Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 2/4] crypto/qat: add support to direct " Fan Zhang
` (2 subsequent siblings)
3 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-13 16:57 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang, Piotr Bronowski
This patch adds data-path APIs for enqueue and dequeue operations to
cryptodev. The APIs support flexible user-define enqueue and dequeue
behaviors and operation modes.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
---
lib/librte_cryptodev/rte_crypto_sym.h | 27 +-
lib/librte_cryptodev/rte_cryptodev.c | 118 ++++++++
lib/librte_cryptodev/rte_cryptodev.h | 256 +++++++++++++++++-
lib/librte_cryptodev/rte_cryptodev_pmd.h | 90 +++++-
.../rte_cryptodev_version.map | 5 +
5 files changed, 487 insertions(+), 9 deletions(-)
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index f29c98051..8f3a93a3d 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -57,12 +57,27 @@ struct rte_crypto_sgl {
struct rte_crypto_sym_vec {
/** array of SGL vectors */
struct rte_crypto_sgl *sgl;
- /** array of pointers to IV */
- void **iv;
- /** array of pointers to AAD */
- void **aad;
- /** array of pointers to digest */
- void **digest;
+ union {
+ /* Supposed to be used with CPU crypto API call. */
+ struct {
+ /** array of pointers to IV */
+ void **iv;
+ /** array of pointers to AAD */
+ void **aad;
+ /** array of pointers to digest */
+ void **digest;
+ };
+
+ /* Supposed to be used with HW crypto API call. */
+ struct {
+ /** array of vectors to IV */
+ struct rte_crypto_vec *iv_vec;
+ /** array of vectors to AAD */
+ struct rte_crypto_vec *aad_vec;
+ /** array of vectors to Digest */
+ struct rte_crypto_vec *digest_vec;
+ };
+ };
/**
* array of statuses for each operation:
* - 0 on success
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 1dd795bcb..1e93762a0 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1914,6 +1914,124 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
}
+uint32_t
+rte_cryptodev_sym_hw_crypto_enqueue_aead(uint8_t dev_id, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
+ return -EINVAL;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API) ||
+ dev->dev_ops->sym_hw_enq_deq == NULL ||
+ dev->dev_ops->sym_hw_enq_deq->enqueue_aead == NULL)
+ return -ENOTSUP;
+ if (vec == NULL || vec->num == 0 || session.crypto_sess == NULL)
+ return -EINVAL;
+
+ return dev->dev_ops->sym_hw_enq_deq->enqueue_aead(dev, qp_id, session,
+ ofs, vec, opaque, flags);
+}
+
+uint32_t
+rte_cryptodev_sym_hw_crypto_enqueue_cipher(uint8_t dev_id, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
+ return -EINVAL;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API) ||
+ dev->dev_ops->sym_hw_enq_deq == NULL ||
+ dev->dev_ops->sym_hw_enq_deq->enqueue_cipher == NULL)
+ return -ENOTSUP;
+ if (vec == NULL || vec->num == 0 || session.crypto_sess == NULL)
+ return -EINVAL;
+
+ return dev->dev_ops->sym_hw_enq_deq->enqueue_cipher(dev, qp_id, session,
+ ofs, vec, opaque, flags);
+}
+
+uint32_t
+rte_cryptodev_sym_hw_crypto_enqueue_auth(uint8_t dev_id, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
+ return -EINVAL;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API) ||
+ dev->dev_ops->sym_hw_enq_deq == NULL ||
+ dev->dev_ops->sym_hw_enq_deq->enqueue_auth == NULL)
+ return -ENOTSUP;
+ if (vec == NULL || vec->num == 0 || session.crypto_sess == NULL)
+ return -EINVAL;
+
+ return dev->dev_ops->sym_hw_enq_deq->enqueue_auth(dev, qp_id, session,
+ ofs, vec, opaque, flags);
+}
+
+uint32_t
+rte_cryptodev_sym_hw_crypto_enqueue_chain(uint8_t dev_id, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
+ return -EINVAL;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API) ||
+ dev->dev_ops->sym_hw_enq_deq == NULL ||
+ dev->dev_ops->sym_hw_enq_deq->enqueue_chain == NULL)
+ return -ENOTSUP;
+ if (vec == NULL || vec->num == 0 || session.crypto_sess == NULL)
+ return -EINVAL;
+
+ return dev->dev_ops->sym_hw_enq_deq->enqueue_chain(dev, qp_id, session,
+ ofs, vec, opaque, flags);
+}
+
+uint32_t
+rte_cryptodev_sym_hw_crypto_dequeue(uint8_t dev_id, uint16_t qp_id,
+ rte_cryptodev_get_dequeue_count_t get_dequeue_count,
+ rte_cryptodev_post_dequeue_t post_dequeue,
+ void **out_opaque,
+ uint32_t *n_success_jobs, uint32_t flags)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_get_qp_status(dev_id, qp_id))
+ return -EINVAL;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ if (!(dev->feature_flags & RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API) ||
+ dev->dev_ops->sym_hw_enq_deq == NULL ||
+ dev->dev_ops->sym_hw_enq_deq->dequeue == NULL)
+ return -ENOTSUP;
+
+ if (!get_dequeue_count || !post_dequeue || !n_success_jobs)
+ return -EINVAL;
+
+ return dev->dev_ops->sym_hw_enq_deq->dequeue(dev, qp_id,
+ get_dequeue_count, post_dequeue, out_opaque,
+ n_success_jobs, flags);
+}
+
/** Initialise rte_crypto_op mempool element */
static void
rte_crypto_op_init(struct rte_mempool *mempool,
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 7b3ebc20f..83c9f072c 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -466,7 +466,8 @@ rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
/**< Support symmetric session-less operations */
#define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23)
/**< Support operations on data which is not byte aligned */
-
+#define RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API (1ULL << 24)
+/**< Support hardware accelerator specific raw data as input */
/**
* Get the name of a crypto device feature flag
@@ -1351,6 +1352,259 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
struct rte_crypto_sym_vec *vec);
+/* HW direct symmetric crypto data-path APIs */
+#define RTE_CRYPTO_HW_DP_FF_ENQUEUE_EXHAUST (1ULL << 0)
+/**< Bit-mask to indicate the last job in a burst. With this bit set the
+ * driver may read but not write the drv_data buffer, and kick the HW to
+ * start processing all jobs written.
+ */
+#define RTE_CRYPTO_HW_DP_FF_CRYPTO_SESSION (1ULL << 1)
+/**< Bit-mask indicating sess is a cryptodev sym session */
+#define RTE_CRYPTO_HW_DP_FF_SESSIONLESS (1ULL << 2)
+/**< Bit-mask indicating sess is a cryptodev sym xform and session-less
+ * operation is in-place
+ **/
+#define RTE_CRYPTO_HW_DP_FF_SECURITY_SESSION (1ULL << 3)
+/**< Bit-mask indicating sess is a security session */
+#define RTE_CRYPTO_HW_DP_FF_SET_OPAQUE_ARRAY (1ULL << 4)
+/**< Bit-mask to indicate opaque is an array, all elements in it will be
+ * stored as opaque data.
+ */
+#define RTE_CRYPTO_HW_DP_FF_KICK_QUEUE (1ULL << 5)
+/**< Bit-mask to command the HW to start processing all stored ops in the
+ * queue immediately.
+ */
+
+/**< Bit-masks used for dequeuing job */
+#define RTE_CRYPTO_HW_DP_FF_GET_OPAQUE_ARRAY (1ULL << 0)
+/**< Bit-mask to indicate opaque is an array with enough room to fill all
+ * dequeued opaque data pointers.
+ */
+#define RTE_CRYPTO_HW_DP_FF_DEQUEUE_EXHAUST (1ULL << 1)
+/**< Bit-mask to indicate dequeuing as many as n jobs in dequeue-many function.
+ * Without this bit once the driver found out the ready-to-dequeue jobs are
+ * not as many as n, it shall stop immediate, leave all processed jobs in the
+ * queue, and return the ready jobs in negative. With this bit set the
+ * function shall continue dequeue all done jobs and return the dequeued
+ * job count in positive.
+ */
+
+/**
+ * Typedef that the user provided to get the dequeue count. User may use it to
+ * return a fixed number or the number parsed from the opaque data stored in
+ * the first processed job.
+ *
+ * @param opaque Dequeued opaque data.
+ **/
+typedef uint32_t (*rte_cryptodev_get_dequeue_count_t)
+ (void *opaque);
+
+/**
+ * Typedef that the user provided to deal with post dequeue operation, such
+ * as filling status.
+ *
+ * @param opaque Dequeued opaque data. In case
+ * RTE_CRYPTO_HW_DP_FF_GET_OPAQUE_ARRAY bit is
+ * set, this value will be the opaque data stored
+ * in the specific processed jobs referenced by
+ * index, otherwise it will be the opaque data
+ * stored in the first processed job in the burst.
+ * @param index Index number of the processed job.
+ * @param is_op_success Driver filled operation status.
+ **/
+typedef void (*rte_cryptodev_post_dequeue_t)(void *opaque, uint32_t index,
+ uint8_t is_op_success);
+
+/**
+ * Union
+ */
+union rte_cryptodev_hw_session_ctx {
+ struct rte_cryptodev_sym_session *crypto_sess;
+ struct rte_crypto_sym_xform *xform;
+ struct rte_security_session *sec_sess;
+};
+
+/**
+ * Enqueue actual AEAD symmetric crypto processing on user provided data.
+ *
+ * @param dev_id The device identifier.
+ * @param qp_id The index of the queue pair from which to
+ * retrieve processed packets. The value must be
+ * in the range [0, nb_queue_pair - 1] previously
+ * supplied to rte_cryptodev_configure().
+ * @param session Union of different session types, depends on
+ * RTE_CRYPTO_HW_DP_FF_* flag.
+ * @param ofs Start and stop offsets for auth and cipher
+ * operations.
+ * @param vec Vectorized operation descriptor.
+ * @param opaque Opaque data to be written to HW
+ * descriptor for enqueue. In case
+ * RTE_CRYPTO_HW_DP_FF_SET_OPAQUE_ARRAY flag is
+ * set this value should be an array of all
+ * 'vec->num' opaque data with the size stated in
+ * the vec. Otherwise only the first opaque
+ * data in the array will be stored in the first
+ * HW descriptor waiting for dequeue.
+ * @param flags Bit-mask of one or more RTE_CRYPTO_HW_DP_FF_*
+ * flags.
+ *
+ * @return
+ * - Returns number of successfully processed packets. In case the returned
+ * value is smaller than 'vec->num', the vec's status array will be written
+ * the error number accordingly.
+ */
+__rte_experimental
+uint32_t
+rte_cryptodev_sym_hw_crypto_enqueue_aead(uint8_t dev_id, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags);
+
+/**
+ * Enqueue actual cipher-only symmetric crypto processing on user provided data.
+ *
+ * @param dev_id The device identifier.
+ * @param qp_id The index of the queue pair from which to
+ * retrieve processed packets. The value must be
+ * in the range [0, nb_queue_pair - 1] previously
+ * supplied to rte_cryptodev_configure().
+ * @param session Union of different session types, depends on
+ * RTE_CRYPTO_HW_DP_FF_* flag.
+ * @param ofs Start and stop offsets for auth and cipher
+ * operations.
+ * @param vec Vectorized operation descriptor.
+ * @param opaque Opaque data to be written to HW
+ * descriptor for enqueue. In case
+ * RTE_CRYPTO_HW_DP_FF_SET_OPAQUE_ARRAY flag is
+ * set this value should be an array of all
+ * 'vec->num' opaque data with the size stated in
+ * the vec. Otherwise only the first opaque
+ * data in the array will be stored in the first
+ * HW descriptor waiting for dequeue.
+ * @param flags Bit-mask of one or more RTE_CRYPTO_HW_DP_FF_*
+ * flags.
+ *
+ * @return
+ * - Returns number of successfully processed packets. In case the returned
+ * value is smaller than 'vec->num', the vec's status array will be written
+ * the error number accordingly.
+ */
+__rte_experimental
+uint32_t
+rte_cryptodev_sym_hw_crypto_enqueue_cipher(uint8_t dev_id, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags);
+
+/**
+ * Enqueue actual auth-only symmetric crypto processing on user provided data.
+ *
+ * @param dev_id The device identifier.
+ * @param qp_id The index of the queue pair from which to
+ * retrieve processed packets. The value must be
+ * in the range [0, nb_queue_pair - 1] previously
+ * supplied to rte_cryptodev_configure().
+ * @param session Union of different session types, depends on
+ * RTE_CRYPTO_HW_DP_FF_* flag.
+ * @param ofs Start and stop offsets for auth and cipher
+ * operations.
+ * @param vec Vectorized operation descriptor.
+ * @param opaque Opaque data to be written to HW
+ * descriptor for enqueue. In case
+ * RTE_CRYPTO_HW_DP_FF_SET_OPAQUE_ARRAY flag is
+ * set this value should be an array of all
+ * 'vec->num' opaque data with the size stated in
+ * the vec. Otherwise only the first opaque
+ * data in the array will be stored in the first
+ * HW descriptor waiting for dequeue.
+ * @param flags Bit-mask of one or more RTE_CRYPTO_HW_DP_FF_*
+ * flags.
+ *
+ * @return
+ * - Returns number of successfully processed packets. In case the returned
+ * value is smaller than 'vec->num', the vec's status array will be written
+ * the error number accordingly.
+ */
+__rte_experimental
+uint32_t
+rte_cryptodev_sym_hw_crypto_enqueue_auth(uint8_t dev_id, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags);
+
+/**
+ * Enqueue actual chained symmetric crypto processing on user provided data.
+ *
+ * @param dev_id The device identifier.
+ * @param qp_id The index of the queue pair from which to
+ * retrieve processed packets. The value must be
+ * in the range [0, nb_queue_pair - 1] previously
+ * supplied to rte_cryptodev_configure().
+ * @param session Union of different session types, depends on
+ * RTE_CRYPTO_HW_DP_FF_* flag.
+ * @param ofs Start and stop offsets for auth and cipher
+ * operations.
+ * @param vec Vectorized operation descriptor.
+ * @param opaque Opaque data to be written to HW
+ * descriptor for enqueue. In case
+ * RTE_CRYPTO_HW_DP_FF_SET_OPAQUE_ARRAY flag is
+ * set this value should be an array of all
+ * 'vec->num' opaque data with the size stated in
+ * the vec. Otherwise only the first opaque
+ * data in the array will be stored in the first
+ * HW descriptor waiting for dequeue.
+ * @param flags Bit-mask of one or more RTE_CRYPTO_HW_DP_FF_*
+ * flags.
+ *
+ * @return
+ * - Returns number of successfully processed packets. In case the returned
+ * value is smaller than 'vec->num', the vec's status array will be written
+ * the error number accordingly.
+ */
+__rte_experimental
+uint32_t
+rte_cryptodev_sym_hw_crypto_enqueue_chain(uint8_t dev_id, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags);
+
+/**
+ * Dequeue symmetric crypto processing of user provided data.
+ *
+ * @param dev_id The device identifier.
+ * @param qp_id The index of the queue pair from which
+ * to retrieve processed packets. The
+ * value must be in the range [0,
+ * nb_queue_pair - 1] previously
+ * supplied to rte_cryptodev_configure().
+ * @param get_dequeue_count User provided callback function to
+ * obtain dequeue count.
+ * @param post_dequeue User provided callback function to
+ * post-process a dequeued operation.
+ * @param out_opaque Opaque data to be retrieve from HW
+ * queue. In case of the flag
+ * RTE_CRYPTO_HW_DP_FF_GET_OPAQUE_ARRAY
+ * is set every dequeued operation
+ * will be written its stored opaque data
+ * into this array, otherwise only the
+ * first dequeued operation will be
+ * written the opaque data.
+ * @param n_success_jobs Driver written value to specific the
+ * total successful operations count.
+ * @param flags Bit-mask of one or more
+ * RTE_CRYPTO_HW_DP_FF_* flags.
+ *
+ * @return
+ * - Returns number of dequeued packets.
+ */
+__rte_experimental
+uint32_t
+rte_cryptodev_sym_hw_crypto_dequeue(uint8_t dev_id, uint16_t qp_id,
+ rte_cryptodev_get_dequeue_count_t get_dequeue_count,
+ rte_cryptodev_post_dequeue_t post_dequeue,
+ void **out_opaque,
+ uint32_t *n_success_jobs, uint32_t flags);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 81975d72b..7ece9f8e9 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -316,6 +316,88 @@ typedef uint32_t (*cryptodev_sym_cpu_crypto_process_t)
(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess,
union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec);
+/**
+ * Enqueue actual symmetric crypto processing on user provided data.
+ *
+ * @param dev Crypto device pointer
+ * @param qp_id The index of the queue pair from which to
+ * retrieve processed packets. The value must be
+ * in the range [0, nb_queue_pair - 1] previously
+ * supplied to rte_cryptodev_configure().
+ * @param session Union of different session types, depends on
+ * RTE_CRYPTO_HW_DP_FF_* flag.
+ * @param ofs Start and stop offsets for auth and cipher
+ * operations.
+ * @param vec Vectorized operation descriptor.
+ * @param opaque Opaque data to be written to HW
+ * descriptor for enqueue. In case
+ * RTE_CRYPTO_HW_DP_FF_SET_OPAQUE_ARRAY flag is
+ * set this value should be an array of all
+ * 'vec->num' opaque data with the size stated in
+ * the vec. Otherwise only the first opaque
+ * data in the array will be stored in the first
+ * HW descriptor waiting for dequeue.
+ * @param flags Bit-mask of one or more RTE_CRYPTO_HW_DP_FF_*
+ * flags.
+ *
+ * @return
+ * - Returns number of successfully processed packets. In case the returned
+ * value is smaller than 'vec->num', the vec's status array will be written
+ * the error number accordingly.
+ */
+typedef uint32_t (*cryptodev_sym_hw_crypto_enqueue_t)
+ (struct rte_cryptodev *dev, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags);
+
+/**
+ * Dequeue symmetric crypto processing of user provided data.
+ *
+ * @param dev Crypto device pointer
+ * @param qp_id The index of the queue pair from which
+ * to retrieve processed packets. The
+ * value must be in the range [0,
+ * nb_queue_pair - 1] previously
+ * supplied to rte_cryptodev_configure().
+ * @param get_dequeue_count User provided callback function to
+ * obtain dequeue count.
+ * @param post_dequeue User provided callback function to
+ * post-process a dequeued operation.
+ * @param out_opaque Opaque data to be retrieve from HW
+ * queue. In case of the flag
+ * RTE_CRYPTO_HW_DP_FF_GET_OPAQUE_ARRAY
+ * is set every dequeued operation
+ * will be written its stored opaque data
+ * into this array, otherwise only the
+ * first dequeued operation will be
+ * written the opaque data.
+ * @param n_success_jobs Driver written value to specific the
+ * total successful operations count.
+ * @param flags Bit-mask of one or more
+ * RTE_CRYPTO_HW_DP_FF_* flags.
+ *
+ * @return
+ * - Returns number of dequeued packets.
+ */
+typedef uint32_t (*cryptodev_sym_hw_crypto_dequeue_t)
+ (struct rte_cryptodev *dev, uint16_t qp_id,
+ rte_cryptodev_get_dequeue_count_t get_dequeue_count,
+ rte_cryptodev_post_dequeue_t post_dequeue,
+ void **out_opaque,
+ uint32_t *n_success_jobs, uint32_t flags);
+
+/**
+ * Structure of HW crypto Data-plane APIs.
+ */
+struct rte_crytodev_sym_hw_dp_ops {
+ cryptodev_sym_hw_crypto_enqueue_t enqueue_aead;
+ cryptodev_sym_hw_crypto_enqueue_t enqueue_cipher;
+ cryptodev_sym_hw_crypto_enqueue_t enqueue_auth;
+ cryptodev_sym_hw_crypto_enqueue_t enqueue_chain;
+ cryptodev_sym_hw_crypto_dequeue_t dequeue;
+ void *reserved[3];
+};
/** Crypto device operations function pointer table */
struct rte_cryptodev_ops {
@@ -348,8 +430,12 @@ struct rte_cryptodev_ops {
/**< Clear a Crypto sessions private data. */
cryptodev_asym_free_session_t asym_session_clear;
/**< Clear a Crypto sessions private data. */
- cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
- /**< process input data synchronously (cpu-crypto). */
+ union {
+ cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
+ /**< process input data synchronously (cpu-crypto). */
+ struct rte_crytodev_sym_hw_dp_ops *sym_hw_enq_deq;
+ /**< Get HW crypto data-path call back functions and data */
+ };
};
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index a7a78dc41..fb7ddb50c 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -106,4 +106,9 @@ EXPERIMENTAL {
# added in 20.08
rte_cryptodev_get_qp_status;
+ rte_cryptodev_sym_hw_crypto_enqueue_aead;
+ rte_cryptodev_sym_hw_crypto_enqueue_cipher;
+ rte_cryptodev_sym_hw_crypto_enqueue_auth;
+ rte_cryptodev_sym_hw_crypto_enqueue_chain;
+ rte_cryptodev_sym_hw_crypto_dequeue;
};
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v5 2/4] crypto/qat: add support to direct data-path APIs
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 1/4] cryptodev: add " Fan Zhang
@ 2020-07-13 16:57 ` Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 4/4] doc: add cryptodev direct APIs guide Fan Zhang
3 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-13 16:57 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang
This patch add symmetric crypto data-path APIs support to QAT-SYM PMD.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
drivers/common/qat/Makefile | 1 +
drivers/common/qat/qat_qp.h | 1 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.h | 3 +
drivers/crypto/qat/qat_sym_hw_dp.c | 850 +++++++++++++++++++++++++++++
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
6 files changed, 861 insertions(+), 2 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 85d420709..1b71bbbab 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -42,6 +42,7 @@ endif
SRCS-y += qat_sym.c
SRCS-y += qat_sym_session.c
SRCS-y += qat_sym_pmd.c
+ SRCS-y += qat_sym_hw_dp.c
build_qat = yes
endif
endif
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..ea40f2050 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -79,6 +79,7 @@ struct qat_qp {
/**< qat device this qp is on */
uint32_t enqueued;
uint32_t dequeued __rte_aligned(4);
+ uint16_t cached;
uint16_t max_inflights;
uint16_t min_enq_burst_threshold;
} __rte_cache_aligned;
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index a225f374a..bc90ec44c 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -15,6 +15,7 @@ if dep.found()
qat_sources += files('qat_sym_pmd.c',
'qat_sym.c',
'qat_sym_session.c',
+ 'qat_sym_hw_dp.c',
'qat_asym_pmd.c',
'qat_asym.c')
qat_ext_deps += dep
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index dbca74efb..383e3c3f7 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -212,11 +212,14 @@ qat_sym_process_response(void **op, uint8_t *resp)
}
*op = (void *)rx_op;
}
+
+extern struct rte_crytodev_sym_hw_dp_ops qat_hw_dp_ops;
#else
static inline void
qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
{
}
+
#endif
#endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
new file mode 100644
index 000000000..8a946c563
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -0,0 +1,850 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym.h"
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_qp.h"
+
+static __rte_always_inline int32_t
+qat_sym_dp_fill_sgl(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_sgl *sgl)
+{
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sgl *list;
+ uint32_t i;
+ uint32_t total_len = 0;
+
+ if (!sgl)
+ return -EINVAL;
+ if (sgl->num < 2 || sgl->num > QAT_SYM_SGL_MAX_NUMBER || !sgl->vec)
+ return -EINVAL;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
+ list = (struct qat_sgl *)&cookie->qat_sgl_src;
+
+ for (i = 0; i < sgl->num; i++) {
+ list->buffers[i].len = sgl->vec[i].len;
+ list->buffers[i].resrvd = 0;
+ list->buffers[i].addr = sgl->vec[i].iova;
+ if (total_len + sgl->vec[i].len > UINT32_MAX) {
+ QAT_DP_LOG(ERR, "Message too long");
+ return -ENOMEM;
+ }
+ total_len += sgl->vec[i].len;
+ }
+
+ list->num_bufs = i;
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ req->comn_mid.src_length = req->comn_mid.dst_length = 0;
+ return total_len;
+}
+
+static __rte_always_inline void
+set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_vec *iv, uint32_t iv_len,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ /* copy IV into request if it fits */
+ if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
+ rte_memcpy(cipher_param->u.cipher_IV_array, iv->base, iv_len);
+ else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr = iv->iova;
+ }
+}
+
+#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
+ (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
+
+#define QAT_SYM_DP_IS_VEC_VALID(qp, flag, n) \
+ (((qp)->service_type == QAT_SERVICE_SYMMETRIC) && \
+ (flags & RTE_CRYPTO_HW_DP_FF_SESSIONLESS) == 0 && \
+ (flags & RTE_CRYPTO_HW_DP_FF_SECURITY_SESSION) == 0 && \
+ ((qp)->enqueued + (qp)->cached + (n) < qp->nb_descriptors - 1))
+
+static __rte_always_inline void
+qat_sym_dp_update_tx_queue(struct qat_qp *qp, struct qat_queue *tx_queue,
+ uint32_t tail, uint32_t n, uint32_t flags)
+{
+ if (unlikely((flags & RTE_CRYPTO_HW_DP_FF_KICK_QUEUE) ||
+ qp->cached + n > QAT_CSR_HEAD_WRITE_THRESH)) {
+ qp->enqueued += n;
+ qp->stats.enqueued_count += n;
+
+ tx_queue->tail = tail;
+
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number, tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ qp->cached = 0;
+
+ return;
+ }
+
+ qp->cached += n;
+}
+
+static __rte_always_inline void
+qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
+{
+ uint32_t i;
+
+ for (i = 0; i < n; i++)
+ sta[i] = status;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_dp_enqueue_aead(struct rte_cryptodev *dev, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+ struct rte_cryptodev_sym_session *sess;
+ struct qat_queue *tx_queue;
+ struct qat_sym_session *ctx;
+ uint32_t i;
+ register uint32_t tail;
+
+ if (unlikely(QAT_SYM_DP_IS_VEC_VALID(qp, flags, vec->num) == 0)) {
+ QAT_DP_LOG(ERR, "Operation not supported");
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ return 0;
+ }
+
+ sess = session.crypto_sess;
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(sess,
+ dev->driver_id);
+ tx_queue = &qp->tx_q;
+ tail = (tx_queue->tail + qp->cached * tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+
+ for (i = 0; i < vec->num; i++) {
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ struct rte_crypto_sgl *sgl = &vec->sgl[i];
+ struct rte_crypto_vec *iv_vec = &vec->iv_vec[i];
+ struct rte_crypto_vec *aad_vec = &vec->aad_vec[i];
+ struct rte_crypto_vec *digest_vec = &vec->digest_vec[i];
+ uint8_t *aad_data;
+ uint8_t aad_ccm_real_len;
+ uint8_t aad_len_field_sz;
+ uint32_t aead_len, msg_len_be;
+ rte_iova_t aad_iova = 0;
+ uint8_t q;
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ rte_mov128((uint8_t *)req,
+ (const uint8_t *)&(ctx->fw_req));
+
+ if (i == 0 || (flags & RTE_CRYPTO_HW_DP_FF_SET_OPAQUE_ARRAY))
+ req->comn_mid.opaque_data = (uint64_t)opaque[i];
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ sgl->vec[0].iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ sgl->vec[0].len;
+
+ aead_len = sgl->vec[0].len - ofs.ofs.cipher.head -
+ ofs.ofs.cipher.tail;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ iv_vec->base, ctx->cipher_iv.length);
+ aad_iova = aad_vec->iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ aad_data = aad_vec->base;
+ aad_iova = aad_vec->iova;
+ aad_ccm_real_len = 0;
+ aad_len_field_sz = 0;
+ msg_len_be = rte_bswap32(aead_len);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ aad_data = iv_vec->base;
+ aad_iova = iv_vec->iova;
+ }
+
+ q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+ aad_len_field_sz, ctx->digest_length, q);
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be +
+ (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)
+ &aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+ rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len +
+ aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len +
+ aad_len_field_sz;
+ memset(&aad_data[pad_idx], 0, pad_len);
+ }
+
+ rte_memcpy(((uint8_t *)cipher_param->
+ u.cipher_IV_array) +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)iv_vec->base +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+ *(uint8_t *)&cipher_param->
+ u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ rte_memcpy((uint8_t *)aad_vec->base +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)iv_vec->base +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+ }
+ break;
+ default:
+ if (flags & RTE_CRYPTO_HW_DP_FF_ENQUEUE_EXHAUST)
+ break;
+ /* Give up enqueue if exhaust enqueue is not set */
+ QAT_DP_LOG(ERR, "Operation not supported");
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ return 0;
+ }
+
+ cipher_param->cipher_offset = ofs.ofs.cipher.head;
+ cipher_param->cipher_length = aead_len;
+ auth_param->auth_off = ofs.ofs.cipher.head;
+ auth_param->auth_len = aead_len;
+ auth_param->auth_res_addr = digest_vec->iova;
+ auth_param->u1.aad_adr = aad_iova;
+
+ /* SGL processing */
+ if (unlikely(sgl->num > 1)) {
+ int total_len = qat_sym_dp_fill_sgl(qp, req, sgl);
+
+ if (total_len < 0) {
+ if (flags & RTE_CRYPTO_HW_DP_FF_ENQUEUE_EXHAUST)
+ break;
+ /* Give up enqueue if exhaust is not set */
+ QAT_DP_LOG(ERR, "Operation not supported");
+ qat_sym_dp_fill_vec_status(vec->status, -1,
+ vec->num);
+ return 0;
+ }
+
+ cipher_param->cipher_length = auth_param->auth_len =
+ total_len - ofs.ofs.cipher.head -
+ ofs.ofs.cipher.tail;
+ }
+
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = aad_iova;
+ cipher_param->spc_auth_res_addr = digest_vec->iova;
+ }
+
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+
+ }
+
+ if (unlikely(i < vec->num))
+ qat_sym_dp_fill_vec_status(vec->status + i, -1, vec->num - i);
+
+ qat_sym_dp_update_tx_queue(qp, tx_queue, tail, i, flags);
+
+ return i;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_dp_enqueue_cipher(struct rte_cryptodev *dev, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+ struct rte_cryptodev_sym_session *sess;
+ struct qat_queue *tx_queue;
+ struct qat_sym_session *ctx;
+ uint32_t i;
+ register uint32_t tail;
+
+ if (unlikely(QAT_SYM_DP_IS_VEC_VALID(qp, flags, vec->num) == 0)) {
+ QAT_DP_LOG(ERR, "Operation not supported");
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ return 0;
+ }
+
+ sess = session.crypto_sess;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(sess,
+ dev->driver_id);
+
+ tx_queue = &qp->tx_q;
+ tail = (tx_queue->tail + qp->cached * tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+
+ for (i = 0; i < vec->num; i++) {
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct rte_crypto_sgl *sgl = &vec->sgl[i];
+ struct rte_crypto_vec *iv_vec = &vec->iv_vec[i];
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ rte_mov128((uint8_t *)req,
+ (const uint8_t *)&(ctx->fw_req));
+
+ if (i == 0 || (flags & RTE_CRYPTO_HW_DP_FF_SET_OPAQUE_ARRAY))
+ req->comn_mid.opaque_data = (uint64_t)opaque[i];
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ sgl->vec[0].iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ sgl->vec[0].len;
+
+ /* cipher IV */
+ set_cipher_iv(cipher_param, iv_vec, ctx->cipher_iv.length, req);
+ cipher_param->cipher_offset = ofs.ofs.cipher.head;
+ cipher_param->cipher_length = sgl->vec[0].len -
+ ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+ /* SGL processing */
+ if (unlikely(sgl->num > 1)) {
+ int total_len = qat_sym_dp_fill_sgl(qp, req, sgl);
+
+ if (total_len < 0) {
+ if (flags & RTE_CRYPTO_HW_DP_FF_ENQUEUE_EXHAUST)
+ break;
+ /* Give up enqueue if exhaust is not set */
+ QAT_DP_LOG(ERR, "Operation not supported");
+ qat_sym_dp_fill_vec_status(vec->status, -1,
+ vec->num);
+ return 0;
+ }
+
+ cipher_param->cipher_length = total_len -
+ ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+ }
+
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ }
+
+ qat_sym_dp_update_tx_queue(qp, tx_queue, tail, i, flags);
+
+ return i;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_dp_enqueue_auth(struct rte_cryptodev *dev, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+ struct rte_cryptodev_sym_session *sess;
+ struct qat_queue *tx_queue;
+ struct qat_sym_session *ctx;
+ uint32_t i;
+ register uint32_t tail;
+
+ if (unlikely(QAT_SYM_DP_IS_VEC_VALID(qp, flags, vec->num) == 0)) {
+ QAT_DP_LOG(ERR, "Operation not supported");
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ return 0;
+ }
+
+ sess = session.crypto_sess;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(sess,
+ dev->driver_id);
+
+ tx_queue = &qp->tx_q;
+ tail = (tx_queue->tail + qp->cached * tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+
+ for (i = 0; i < vec->num; i++) {
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ struct rte_crypto_sgl *sgl = &vec->sgl[i];
+ struct rte_crypto_vec *iv_vec = &vec->iv_vec[i];
+ struct rte_crypto_vec *digest_vec = &vec->digest_vec[i];
+ int total_len;
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ rte_mov128((uint8_t *)req,
+ (const uint8_t *)&(ctx->fw_req));
+
+ if (i == 0 || (flags & RTE_CRYPTO_HW_DP_FF_SET_OPAQUE_ARRAY))
+ req->comn_mid.opaque_data = (uint64_t)opaque[i];
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ sgl->vec[0].iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ sgl->vec[0].len;
+
+ auth_param->auth_off = ofs.ofs.auth.head;
+ auth_param->auth_len = sgl->vec[0].len - ofs.ofs.auth.head -
+ ofs.ofs.auth.tail;
+ auth_param->auth_res_addr = digest_vec->iova;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = iv_vec->iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ iv_vec->base, ctx->cipher_iv.length);
+ break;
+ default:
+ break;
+ }
+
+ /* SGL processing */
+ if (unlikely(sgl->num > 1)) {
+ total_len = qat_sym_dp_fill_sgl(qp, req, sgl);
+
+ if (total_len < 0) {
+ if (flags & RTE_CRYPTO_HW_DP_FF_ENQUEUE_EXHAUST)
+ break;
+ /* Give up enqueue if exhaust is not set */
+ QAT_DP_LOG(ERR, "Operation not supported");
+ qat_sym_dp_fill_vec_status(vec->status, -1,
+ vec->num);
+ return 0;
+ }
+
+ cipher_param->cipher_length = auth_param->auth_len =
+ total_len - ofs.ofs.cipher.head -
+ ofs.ofs.cipher.tail;
+ }
+
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+ }
+
+ if (unlikely(i < vec->num))
+ qat_sym_dp_fill_vec_status(vec->status + i, -1, vec->num - i);
+
+ qat_sym_dp_update_tx_queue(qp, tx_queue, tail, i, flags);
+
+ return i;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_dp_enqueue_chain(struct rte_cryptodev *dev, uint16_t qp_id,
+ union rte_cryptodev_hw_session_ctx session,
+ union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec,
+ void **opaque, uint32_t flags)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+ struct rte_cryptodev_sym_session *sess;
+ struct qat_queue *tx_queue;
+ struct qat_sym_session *ctx;
+ uint32_t i;
+ register uint32_t tail;
+
+ if (unlikely(QAT_SYM_DP_IS_VEC_VALID(qp, flags, vec->num) == 0)) {
+ QAT_DP_LOG(ERR, "Operation not supported");
+ qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+ return 0;
+ }
+
+ sess = session.crypto_sess;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(sess,
+ dev->driver_id);
+
+ tx_queue = &qp->tx_q;
+ tail = (tx_queue->tail + qp->cached * tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+
+ for (i = 0; i < vec->num; i++) {
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ struct rte_crypto_sgl *sgl = &vec->sgl[i];
+ struct rte_crypto_vec *iv_vec = &vec->iv_vec[i];
+ struct rte_crypto_vec *digest_vec = &vec->digest_vec[i];
+ rte_iova_t auth_iova_end;
+ int total_len;
+
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + tail);
+ rte_mov128((uint8_t *)req,
+ (const uint8_t *)&(ctx->fw_req));
+
+ if (i == 0 || (flags & RTE_CRYPTO_HW_DP_FF_SET_OPAQUE_ARRAY))
+ req->comn_mid.opaque_data = (uint64_t)opaque[i];
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ sgl->vec[0].iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ sgl->vec[0].len;
+
+ cipher_param->cipher_offset = ofs.ofs.cipher.head;
+ cipher_param->cipher_length = sgl->vec[0].len -
+ ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+ set_cipher_iv(cipher_param, iv_vec, ctx->cipher_iv.length, req);
+
+ auth_param->auth_off = ofs.ofs.cipher.head;
+ auth_param->auth_len = sgl->vec[0].len -
+ ofs.ofs.auth.head - ofs.ofs.auth.tail;
+ auth_param->auth_res_addr = digest_vec->iova;
+
+ /* SGL processing */
+ if (unlikely(sgl->num > 1)) {
+ total_len = qat_sym_dp_fill_sgl(qp, req, sgl);
+
+ if (total_len < 0) {
+ if (flags & RTE_CRYPTO_HW_DP_FF_ENQUEUE_EXHAUST)
+ break;
+ /* Give up enqueue if exhaust is not set */
+ QAT_DP_LOG(ERR, "Operation not supported");
+ qat_sym_dp_fill_vec_status(vec->status, -1,
+ vec->num);
+ return 0;
+ }
+
+ cipher_param->cipher_length = auth_param->auth_len =
+ total_len - ofs.ofs.cipher.head -
+ ofs.ofs.cipher.tail;
+ }
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = iv_vec->iova;
+
+ if (unlikely(sgl->num > 1)) {
+ int auth_end_get = 0, i = sgl->num - 1;
+ struct rte_crypto_vec *cvec = &sgl->vec[i];
+ uint32_t len;
+
+ if (total_len - ofs.ofs.auth.tail < 0) {
+ if (flags &
+ RTE_CRYPTO_HW_DP_FF_ENQUEUE_EXHAUST)
+ break;
+ /* Give up enqueue if exhaust not set */
+ QAT_DP_LOG(ERR, "Incorrect length");
+ qat_sym_dp_fill_vec_status(vec->status,
+ -1, vec->num);
+ return 0;
+ }
+
+ len = total_len - ofs.ofs.auth.tail;
+
+ while (i >= 0 && len > 0) {
+ if (cvec->len >= len) {
+ auth_iova_end = cvec->iova +
+ (cvec->len - len);
+ len = 0;
+ auth_end_get = 1;
+ break;
+ }
+ len -= cvec->len;
+ i--;
+ vec--;
+ }
+
+ if (!auth_end_get) {
+ QAT_DP_LOG(ERR, "Failed to get end");
+ if (flags &
+ RTE_CRYPTO_HW_DP_FF_ENQUEUE_EXHAUST)
+ break;
+ /* Give up enqueue if exhaust not set */
+ QAT_DP_LOG(ERR, "Incorrect length");
+ qat_sym_dp_fill_vec_status(vec->status,
+ -1, vec->num);
+ return 0;
+ }
+ } else
+ auth_iova_end = digest_vec->iova +
+ digest_vec->len;
+
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset +
+ cipher_param->cipher_length) &&
+ (digest_vec->iova == auth_iova_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset +
+ cipher_param->cipher_length <
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length)
+ req->comn_mid.dst_length =
+ req->comn_mid.src_length =
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header =
+ &req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as auth algo not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+ }
+
+ if (unlikely(i < vec->num))
+ qat_sym_dp_fill_vec_status(vec->status + i, -1, vec->num - i);
+
+ qat_sym_dp_update_tx_queue(qp, tx_queue, tail, i, flags);
+
+ return i;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_dp_dequeue(struct rte_cryptodev *dev, uint16_t qp_id,
+ rte_cryptodev_get_dequeue_count_t get_dequeue_count,
+ rte_cryptodev_post_dequeue_t post_dequeue,
+ void **out_opaque,
+ uint32_t *n_success_jobs, uint32_t flags)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+ register struct qat_queue *rx_queue;
+ struct icp_qat_fw_comn_resp *resp, *last_resp = 0;
+ void *resp_opaque;
+ uint32_t i, n;
+ uint32_t head;
+ uint8_t status;
+
+ *n_success_jobs = 0;
+ rx_queue = &qp->rx_q;
+ head = rx_queue->head;
+
+ resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
+ head);
+ /* no operation ready */
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+ return 0;
+
+ resp_opaque = (void *)(uintptr_t)resp->opaque_data;
+ /* get the dequeue count */
+ n = get_dequeue_count(resp_opaque);
+ assert(n > 0);
+
+ out_opaque[0] = resp_opaque;
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+ post_dequeue(resp_opaque, 0, status);
+ *n_success_jobs += status;
+
+ /* we already finished dequeue when n == 1 */
+ if (unlikely(n == 1)) {
+ i = 1;
+ goto update_rx_queue;
+ }
+
+ last_resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + ((head + rx_queue->msg_size *
+ (n - 2)) & rx_queue->modulo_mask));
+
+ /* if EXAUST is not set, check if we can dequeue that many jobs */
+ if (flags & RTE_CRYPTO_HW_DP_FF_DEQUEUE_EXHAUST) {
+ if (flags & RTE_CRYPTO_HW_DP_FF_GET_OPAQUE_ARRAY) {
+ for (i = 1; i < n - 1; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ if (unlikely(*(uint32_t *)resp ==
+ ADF_RING_EMPTY_SIG))
+ goto update_rx_queue;
+ out_opaque[i] = (void *)(uintptr_t)
+ resp->opaque_data;
+ status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+ *n_success_jobs += status;
+ post_dequeue(out_opaque[i], i, status);
+ head = (head + rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+ }
+
+ status = QAT_SYM_DP_IS_RESP_SUCCESS(last_resp);
+ out_opaque[i] = (void *)(uintptr_t)
+ last_resp->opaque_data;
+ post_dequeue(out_opaque[i], i, status);
+ *n_success_jobs += status;
+ i++;
+ head = (head + rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+ goto update_rx_queue;
+ }
+
+ /* (flags & RTE_CRYPTO_HW_DP_FF_GET_OPAQUE_ARRAY) == 0 */
+ for (i = 1; i < n - 1; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
+ goto update_rx_queue;
+ head = (head + rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+ post_dequeue(resp_opaque, i, status);
+ *n_success_jobs += status;
+ }
+ status = QAT_SYM_DP_IS_RESP_SUCCESS(last_resp);
+ post_dequeue(resp_opaque, i, status);
+ *n_success_jobs += status;
+ i++;
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ goto update_rx_queue;
+ }
+
+ /* not all operation ready */
+ if (unlikely(*(uint32_t *)last_resp == ADF_RING_EMPTY_SIG))
+ return 0;
+
+ if (flags & RTE_CRYPTO_HW_DP_FF_GET_OPAQUE_ARRAY) {
+ for (i = 1; i < n - 1; i++) {
+ uint8_t status;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ out_opaque[i] = (void *)(uintptr_t)resp->opaque_data;
+ status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+ *n_success_jobs += status;
+ post_dequeue(out_opaque[i], i, status);
+ head = (head + rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+ }
+ out_opaque[i] = (void *)(uintptr_t)last_resp->opaque_data;
+ post_dequeue(out_opaque[i], i,
+ QAT_SYM_DP_IS_RESP_SUCCESS(last_resp));
+ i++;
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ goto update_rx_queue;
+ }
+
+ /* (flags & RTE_CRYPTO_HW_DP_FF_GET_OPAQUE_ARRAY) == 0 */
+ for (i = 1; i < n - 1; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+ *n_success_jobs += status;
+ post_dequeue(resp_opaque, i, status);
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ }
+
+ status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ i++;
+ *n_success_jobs += status;
+ post_dequeue(resp_opaque, i, status);
+
+update_rx_queue:
+ rx_queue->head = head;
+ rx_queue->nb_processed_responses += i;
+ qp->dequeued += i;
+ qp->stats.dequeued_count += i;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
+ uint32_t old_head, new_head;
+ uint32_t max_head;
+
+ old_head = rx_queue->csr_head;
+ new_head = rx_queue->head;
+ max_head = qp->nb_descriptors * rx_queue->msg_size;
+
+ /* write out free descriptors */
+ void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
+
+ if (new_head < old_head) {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
+ max_head - old_head);
+ memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
+ new_head);
+ } else {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
+ old_head);
+ }
+ rx_queue->nb_processed_responses = 0;
+ rx_queue->csr_head = new_head;
+
+ /* write current head to CSR */
+ WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
+ rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
+ new_head);
+ }
+
+ return i;
+}
+
+struct rte_crytodev_sym_hw_dp_ops qat_hw_dp_ops = {
+ .enqueue_aead = qat_sym_dp_enqueue_aead,
+ .enqueue_cipher = qat_sym_dp_enqueue_cipher,
+ .enqueue_auth = qat_sym_dp_enqueue_auth,
+ .enqueue_chain = qat_sym_dp_enqueue_chain,
+ .dequeue = qat_sym_dp_dequeue
+};
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index c7e323cce..ba6c2130f 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -259,7 +259,9 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
/* Crypto related operations */
.sym_session_get_size = qat_sym_session_get_private_size,
.sym_session_configure = qat_sym_session_configure,
- .sym_session_clear = qat_sym_session_clear
+ .sym_session_clear = qat_sym_session_clear,
+
+ .sym_hw_enq_deq = &qat_hw_dp_ops
};
#ifdef RTE_LIBRTE_SECURITY
@@ -382,7 +384,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
- RTE_CRYPTODEV_FF_SECURITY;
+ RTE_CRYPTODEV_FF_SECURITY |
+ RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v5 3/4] test/crypto: add unit-test for cryptodev direct APIs
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 1/4] cryptodev: add " Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 2/4] crypto/qat: add support to direct " Fan Zhang
@ 2020-07-13 16:57 ` Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 4/4] doc: add cryptodev direct APIs guide Fan Zhang
3 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-13 16:57 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang
This patch adds the QAT test to use cryptodev symmetric crypto
direct APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
app/test/test_cryptodev.c | 367 ++++++++++++++++++++++++--
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 ++--
3 files changed, 386 insertions(+), 37 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index e71e73ae1..5e168c124 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -57,6 +57,8 @@ static int gbl_driver_id;
static enum rte_security_session_action_type gbl_action_type =
RTE_SECURITY_ACTION_TYPE_NONE;
+int hw_dp_test;
+
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
struct rte_mempool *large_mbuf_pool;
@@ -147,6 +149,168 @@ ceil_byte_length(uint32_t num_bits)
return (num_bits >> 3);
}
+static uint32_t
+get_dequeue_count(void *opaque __rte_unused)
+{
+ return 1;
+}
+
+static void
+write_status(void *opaque __rte_unused, uint32_t index __rte_unused,
+ uint8_t is_op_success)
+{
+ struct rte_crypto_op *op = opaque;
+ op->status = is_op_success ? RTE_CRYPTO_OP_STATUS_SUCCESS :
+ RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits)
+{
+ int32_t n;
+ struct rte_crypto_sym_op *sop;
+ struct rte_crypto_sym_vec vec;
+ struct rte_crypto_sgl sgl;
+ struct rte_crypto_op *ret_op = NULL;
+ struct rte_crypto_vec data_vec[UINT8_MAX], iv_vec, aad_vec, digest_vec;
+ union rte_crypto_sym_ofs ofs;
+ int32_t status;
+ uint32_t min_ofs, max_len, nb_ops;
+ uint32_t n_success_ops;
+ union rte_cryptodev_hw_session_ctx sess;
+ enum {
+ cipher = 0,
+ auth,
+ chain,
+ aead
+ } hw_dp_test_type;
+ uint32_t count = 0;
+ uint32_t flags = RTE_CRYPTO_HW_DP_FF_CRYPTO_SESSION |
+ RTE_CRYPTO_HW_DP_FF_SET_OPAQUE_ARRAY |
+ RTE_CRYPTO_HW_DP_FF_KICK_QUEUE;
+
+ memset(&vec, 0, sizeof(vec));
+
+ vec.sgl = &sgl;
+ vec.iv_vec = &iv_vec;
+ vec.aad_vec = &aad_vec;
+ vec.digest_vec = &digest_vec;
+ vec.status = &status;
+ vec.num = 1;
+
+ sop = op->sym;
+
+ sess.crypto_sess = sop->session;
+
+ if (is_cipher && is_auth) {
+ hw_dp_test_type = chain;
+ min_ofs = RTE_MIN(sop->cipher.data.offset,
+ sop->auth.data.offset);
+ max_len = RTE_MAX(sop->cipher.data.length,
+ sop->auth.data.length);
+ } else if (is_cipher) {
+ hw_dp_test_type = cipher;
+ min_ofs = sop->cipher.data.offset;
+ max_len = sop->cipher.data.length;
+ } else if (is_auth) {
+ hw_dp_test_type = auth;
+ min_ofs = sop->auth.data.offset;
+ max_len = sop->auth.data.length;
+ } else { /* aead */
+ hw_dp_test_type = aead;
+ min_ofs = sop->aead.data.offset;
+ max_len = sop->aead.data.length;
+ }
+
+ if (len_in_bits) {
+ max_len = max_len >> 3;
+ min_ofs = min_ofs >> 3;
+ }
+
+ n = rte_crypto_mbuf_to_vec(sop->m_src, 0, min_ofs + max_len,
+ data_vec, RTE_DIM(data_vec));
+ if (n < 0 || n != sop->m_src->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ sgl.vec = data_vec;
+ sgl.num = n;
+
+ ofs.raw = 0;
+
+ iv_vec.base = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ iv_vec.iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+
+ switch (hw_dp_test_type) {
+ case aead:
+ ofs.ofs.cipher.head = sop->cipher.data.offset;
+ aad_vec.base = (void *)sop->aead.aad.data;
+ aad_vec.iova = sop->aead.aad.phys_addr;
+ digest_vec.base = (void *)sop->aead.digest.data;
+ digest_vec.iova = sop->aead.digest.phys_addr;
+ if (len_in_bits) {
+ ofs.ofs.cipher.head >>= 3;
+ ofs.ofs.cipher.tail >>= 3;
+ }
+ nb_ops = rte_cryptodev_sym_hw_crypto_enqueue_aead(dev_id, qp_id,
+ sess, ofs, &vec, (void **)&op, flags);
+ break;
+ case cipher:
+ ofs.ofs.cipher.head = sop->cipher.data.offset;
+ if (len_in_bits) {
+ ofs.ofs.cipher.head >>= 3;
+ ofs.ofs.cipher.tail >>= 3;
+ }
+ nb_ops = rte_cryptodev_sym_hw_crypto_enqueue_cipher(dev_id,
+ qp_id, sess, ofs, &vec, (void **)&op, flags);
+ break;
+ case auth:
+ ofs.ofs.auth.head = sop->auth.data.offset;
+ digest_vec.base = (void *)sop->auth.digest.data;
+ digest_vec.iova = sop->auth.digest.phys_addr;
+ nb_ops = rte_cryptodev_sym_hw_crypto_enqueue_auth(dev_id, qp_id,
+ sess, ofs, &vec, (void **)&op, flags);
+ break;
+ case chain:
+ ofs.ofs.cipher.head =
+ sop->cipher.data.offset - sop->auth.data.offset;
+ ofs.ofs.cipher.tail =
+ (sop->auth.data.offset + sop->auth.data.length) -
+ (sop->cipher.data.offset + sop->cipher.data.length);
+ if (len_in_bits) {
+ ofs.ofs.cipher.head >>= 3;
+ ofs.ofs.cipher.tail >>= 3;
+ }
+ digest_vec.base = (void *)sop->auth.digest.data;
+ digest_vec.iova = sop->auth.digest.phys_addr;
+ nb_ops = rte_cryptodev_sym_hw_crypto_enqueue_chain(dev_id,
+ qp_id, sess, ofs, &vec, (void **)&op, flags);
+ break;
+ }
+
+ if (nb_ops < vec.num) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ nb_ops = 0;
+ flags = RTE_CRYPTO_HW_DP_FF_GET_OPAQUE_ARRAY |
+ RTE_CRYPTO_HW_DP_FF_DEQUEUE_EXHAUST;
+ while (count++ < 1024 && nb_ops < vec.num) {
+ nb_ops = rte_cryptodev_sym_hw_crypto_dequeue(dev_id, qp_id,
+ get_dequeue_count, write_status, (void **)&ret_op,
+ &n_success_ops, flags);
+ }
+
+ if (count == 1024 || n_success_ops == 0 || nb_ops == 0 ||
+ ret_op != op) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+}
+
static void
process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
{
@@ -2456,7 +2620,11 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -2535,7 +2703,11 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2605,6 +2777,9 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata)
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_crypt_auth_op(ts_params->valid_devs[0],
ut_params->op);
+ else if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
else
ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
@@ -2676,7 +2851,11 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2883,8 +3062,12 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
- ut_params->op);
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -2969,7 +3152,11 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3292,7 +3479,11 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3367,7 +3558,11 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3742,7 +3937,11 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -3910,7 +4109,11 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4005,7 +4208,11 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4141,7 +4348,11 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4330,7 +4541,11 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4512,7 +4727,11 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4702,7 +4921,11 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4843,7 +5066,11 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4930,7 +5157,11 @@ test_zuc_encryption(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5017,7 +5248,11 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5105,7 +5340,11 @@ test_zuc_authentication(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5237,7 +5476,11 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5423,7 +5666,11 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -7029,6 +7276,9 @@ test_authenticated_encryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (hw_dp_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -8521,6 +8771,9 @@ test_authenticated_decryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (hw_dp_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -11461,6 +11714,9 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
if (oop == IN_PLACE &&
gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (oop == IN_PLACE && hw_dp_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -13022,6 +13278,75 @@ test_cryptodev_nitrox(void)
return unit_test_suite_runner(&cryptodev_nitrox_testsuite);
}
+static struct unit_test_suite cryptodev_sym_direct_api_testsuite = {
+ .suite_name = "Crypto Sym direct API Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_cipheronly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_qat_sym_direct_api(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ int ret;
+
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both "
+ "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM "
+ "are enabled in config file to run this testsuite.\n");
+ return TEST_SKIPPED;
+ }
+
+ hw_dp_test = 1;
+ ret = unit_test_suite_runner(&cryptodev_sym_direct_api_testsuite);
+ hw_dp_test = 0;
+
+ return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_qat_sym_api_autotest, test_qat_sym_direct_api);
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_mb_autotest,
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 41542e055..c382c12c4 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -71,6 +71,8 @@
#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
#define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
+extern int hw_dp_test;
+
/**
* Write (spread) data from buffer to mbuf data
*
@@ -209,4 +211,8 @@ create_segmented_mbuf(struct rte_mempool *mbuf_pool, int pkt_len,
return NULL;
}
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits);
+
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_blockcipher.c b/app/test/test_cryptodev_blockcipher.c
index 642b54971..26f1c41c9 100644
--- a/app/test/test_cryptodev_blockcipher.c
+++ b/app/test/test_cryptodev_blockcipher.c
@@ -461,25 +461,43 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
/* Process crypto operation */
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Error sending packet for encryption");
- status = TEST_FAILED;
- goto error_exit;
- }
+ if (hw_dp_test) {
+ uint8_t is_cipher = 0, is_auth = 0;
+
+ if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
+ RTE_LOG(DEBUG, USER1,
+ "QAT direct API does not support OOP, Test Skipped.\n");
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "SKIPPED");
+ status = TEST_SUCCESS;
+ goto error_exit;
+ }
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
+ is_cipher = 1;
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
+ is_auth = 1;
+
+ process_sym_hw_api_op(dev_id, 0, op, is_cipher, is_auth, 0);
+ } else {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for encryption");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
- op = NULL;
+ op = NULL;
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
- rte_pause();
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
+ rte_pause();
- if (!op) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Failed to process sym crypto op");
- status = TEST_FAILED;
- goto error_exit;
+ if (!op) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process sym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
}
debug_hexdump(stdout, "m_src(after):",
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [dpdk-dev v5 4/4] doc: add cryptodev direct APIs guide
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
` (2 preceding siblings ...)
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
@ 2020-07-13 16:57 ` Fan Zhang
3 siblings, 0 replies; 39+ messages in thread
From: Fan Zhang @ 2020-07-13 16:57 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, Fan Zhang
This patch updates programmer's guide to demonstrate the usage
and limitations of cryptodev symmetric crypto data-path APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
doc/guides/prog_guide/cryptodev_lib.rst | 53 +++++++++++++++++++++++++
doc/guides/rel_notes/release_20_08.rst | 8 ++++
2 files changed, 61 insertions(+)
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index c14f750fa..6316fd1a4 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -631,6 +631,59 @@ a call argument. Status different than zero must be treated as error.
For more details, e.g. how to convert an mbuf to an SGL, please refer to an
example usage in the IPsec library implementation.
+Cryptodev Direct Symmetric Crypto Data-plane APIs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Direct symmetric crypto data-path APIs are a set of APIs that especially
+provided for Symmetric HW Crypto PMD that provides fast data-path
+enqueue/dequeue operations. The direct data-path APIs take advantage of
+existing Cryptodev APIs for device, queue pairs, and session management. In
+addition the user are required to get the queue pair pointer data and function
+pointers. The APIs are provided as an advanced feature as an alternative
+to ``rte_cryptodev_enqueue_burst`` and ``rte_cryptodev_dequeue_burst``. The
+APIs are designed for the user to develop close-to-native performance symmetric
+crypto data-path implementation for their applications that do not necessarily
+depend on cryptodev operations and cryptodev operation mempools, or mbufs.
+
+Cryptodev PMDs who supports this feature will have
+``RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API`` feature flag presented. The user uses
+``rte_cryptodev_sym_get_hw_ops`` function call to get all the function pointers
+for different enqueue and dequeue operations, plus the device specific
+queue pair data. After the ``rte_crypto_hw_ops`` structure is properly set by
+the driver, the user can use the function pointers and the queue data pointers
+in the structure to enqueue and dequeue crypto jobs.
+
+Direct Data-plane APIs share the same ``struct rte_crypto_sym_vec`` structure
+as synchronous mode. However to pass IOVA addresses the user are required to
+pass the ``struct rte_crypto_vec`` arrays for IV, AAD, and digests, instead
+of VOID pointers as synchronous mode.
+
+Different than Cryptodev operation, the ``rte_crypto_sym_vec`` structure
+focuses only on the data field required for crypto PMD to execute a single job,
+and is not supposed stored as opaque data. The user can freely allocate the
+structure buffer from stack and reuse it to fill all jobs.
+
+In addtion, to maximum the flexibility in the enqueue/dequeue operation, the
+data-plane APIs supports some special operations specified in the flag
+parameters in both enqueue and dequeue functions. For example, setting or
+unsetting the flag ``RTE_CRYPTO_HW_DP_FF_ENQUEUE_EXHAUST`` shall make the
+PMD behaving differently: setting the flag will make the PMD attempts enqueuing
+as many jobs in the ``struct rte_crypto_sym_vec``, but unsetting it will
+make the PMD only enqueue the ``num`` or zero operations depends on the
+queue status.
+
+To use the direct symmetric crypto APIs safely, the user has to carefully
+set the correct fields in rte_crypto_sym_vec structure, otherwise the
+application or the system may crash. Also there are a few limitations to the
+direct symmetric crypto APIs:
+
+* Only support in-place operations.
+* APIs are NOT thread-safe.
+* CANNOT mix the direct API's enqueue with rte_cryptodev_enqueue_burst, or
+ vice versa.
+
+See *DPDK API Reference* for details on each API definitions.
+
Sample code
-----------
diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
index f19b74872..a24797529 100644
--- a/doc/guides/rel_notes/release_20_08.rst
+++ b/doc/guides/rel_notes/release_20_08.rst
@@ -225,6 +225,14 @@ New Features
See the :doc:`../sample_app_ug/l2_forward_real_virtual` for more
details of this parameter usage.
+* **Add Cryptodev data-path APIs for no mbuf-centric data-path.**
+
+ Cryptodev is added a set of data-path APIs that are not based on
+ cryptodev operations. The APIs are designed for external applications
+ or libraries that want to use cryptodev but their data-path
+ implementations are not mbuf-centric. QAT Symmetric PMD is also updated
+ to add the support to these APIs.
+
Removed Items
-------------
--
2.20.1
^ permalink raw reply [flat|nested] 39+ messages in thread
end of thread, other threads:[~2020-07-13 16:58 UTC | newest]
Thread overview: 39+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-12 14:39 [dpdk-dev] [PATCH] crypto/qat: add data-path APIs Fan Zhang
2020-06-18 17:50 ` Trahe, Fiona
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto " Fan Zhang
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 1/3] crypto/qat: add " Fan Zhang
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 2/3] test/crypto: add unit-test for QAT direct APIs Fan Zhang
2020-06-30 17:47 ` Trahe, Fiona
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 3/3] doc: add QAT direct APIs guide Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 1/4] " Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 2/4] " Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 4/4] doc: add cryptodev direct APIs guide Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 1/4] " Fan Zhang
2020-07-04 18:16 ` Akhil Goyal
2020-07-06 10:02 ` Zhang, Roy Fan
2020-07-06 12:13 ` Akhil Goyal
2020-07-07 12:37 ` Zhang, Roy Fan
2020-07-07 20:37 ` Akhil Goyal
2020-07-08 15:09 ` Zhang, Roy Fan
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 2/4] crypto/qat: add support to direct " Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 12:49 ` [dpdk-dev] [dpdk-dev v4 4/4] doc: add cryptodev direct APIs guide Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 1/4] cryptodev: add " Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 2/4] crypto/qat: add support to direct " Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-13 16:57 ` [dpdk-dev] [dpdk-dev v5 4/4] doc: add cryptodev direct APIs guide Fan Zhang
2020-06-26 6:55 ` [dpdk-dev] [PATCH] crypto/qat: add data-path APIs Jerin Jacob
2020-06-26 10:38 ` [dpdk-dev] [dpdk-techboard] " Thomas Monjalon
2020-06-30 20:33 ` Honnappa Nagarahalli
2020-06-30 21:00 ` Thomas Monjalon
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).