DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] crypto/qat: add data-path APIs
@ 2020-06-12 14:39 Fan Zhang
  2020-06-18 17:50 ` Trahe, Fiona
                   ` (2 more replies)
  0 siblings, 3 replies; 39+ messages in thread
From: Fan Zhang @ 2020-06-12 14:39 UTC (permalink / raw)
  To: dev; +Cc: akhil.goyal, fiona.trahe, roy.fan.zhang, Piotr Bronowski

This patch adds data-path APIs to QAT symmetric dirver to support
raw data as input.

For applications/libraries that want to benefit from the data-path
encryption acceleration provided by QAT but not necessarily depends
on DPDK data-path structures (such as VPP), some performance
degradation is unavoidable to convert between their specific data
structure and DPDK cryptodev operation as well as mbufs.

This patch takes advantage of existing QAT implementations to form
symmetric data-path enqueue and dequeue APIs that support raw data
as input so that they can have wider usability towards those
applications/libraries without performance drop caused by the data
structure conversions. In the meantime the less performance-sensitive
cryptodev device and session management remains intact so that DPDK
cryptodev remains to be unified control path library for QAT.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
---
 drivers/common/qat/Makefile                  |   4 +-
 drivers/common/qat/qat_qp.c                  |   4 +-
 drivers/common/qat/qat_qp.h                  |   3 +
 drivers/compress/qat/rte_pmd_qat_version.map |  11 +
 drivers/crypto/qat/meson.build               |   5 +
 drivers/crypto/qat/qat_sym_frame.c           | 294 +++++++++++++++++++
 drivers/crypto/qat/qat_sym_frame.h           | 237 +++++++++++++++
 7 files changed, 555 insertions(+), 3 deletions(-)
 create mode 100644 drivers/crypto/qat/qat_sym_frame.c
 create mode 100644 drivers/crypto/qat/qat_sym_frame.h

diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 28bd5668f..3874f75ab 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -39,6 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
 	SRCS-y += qat_sym.c
 	SRCS-y += qat_sym_session.c
 	SRCS-y += qat_sym_pmd.c
+	SRCS-y += qat_sym_frame.c
+
 	build_qat = yes
 endif
 endif
@@ -62,7 +64,7 @@ ifdef build_qat
 	LDLIBS += -lrte_pci -lrte_bus_pci
 
 	# export include files
-	SYMLINK-y-include +=
+	SYMLINK-y-include += qat_sym_frame.h
 
 	# versioning export map
 	EXPORT_MAP := ../../compress/qat/rte_pmd_qat_version.map
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 8e6dd04eb..06e2d8c8a 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
 	q->csr_tail = q->tail;
 }
 
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
 {
 	uint32_t old_head, new_head;
 	uint32_t max_head;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..8add1b049 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
 			  void *op_cookie __rte_unused,
 			  uint64_t *dequeue_err_count __rte_unused);
 
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
+
 #endif /* _QAT_QP_H_ */
diff --git a/drivers/compress/qat/rte_pmd_qat_version.map b/drivers/compress/qat/rte_pmd_qat_version.map
index f9f17e4f6..a9160b157 100644
--- a/drivers/compress/qat/rte_pmd_qat_version.map
+++ b/drivers/compress/qat/rte_pmd_qat_version.map
@@ -1,3 +1,14 @@
 DPDK_20.0 {
 	local: *;
 };
+
+EXPERIMENTAL {
+	global:
+
+	qat_sym_get_qp;
+	qat_sym_enqueue_frame_aead;
+	qat_sym_enqueue_frame_cipher;
+	qat_sym_enqueue_frame_auth;
+	qat_sym_enqueue_frame_chain;
+	qat_sym_dequeue_frame;
+};
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index fc65923a7..8d53debcf 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -13,9 +13,14 @@ if dep.found()
 	qat_sources += files('qat_sym_pmd.c',
 			     'qat_sym.c',
 			     'qat_sym_session.c',
+			     'qat_sym_frame.c',
 			     'qat_asym_pmd.c',
 			     'qat_asym.c')
 	qat_ext_deps += dep
 	qat_cflags += '-DBUILD_QAT_SYM'
 	qat_cflags += '-DBUILD_QAT_ASYM'
+	headers = files(
+		'qat_sym_frame.h',
+	)
+	use_function_versioning = true
 endif
diff --git a/drivers/crypto/qat/qat_sym_frame.c b/drivers/crypto/qat/qat_sym_frame.c
new file mode 100644
index 000000000..27656c970
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_frame.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_sym_frame.h"
+#include "qat_qp.h"
+
+void *
+qat_sym_get_qp(uint8_t dev_id, uint16_t qp_id)
+{
+	struct rte_cryptodev *dev;
+	struct qat_qp *qp;
+	const char *drv_name;
+
+	/* make sure it is a QAT device */
+	if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
+		return NULL;
+	dev = rte_cryptodev_pmd_get_dev(dev_id);
+	drv_name = rte_cryptodev_driver_name_get(dev->driver_id);
+	if ((strncmp(drv_name, RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD),
+			sizeof(RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD))) != 0) ||
+			(qp_id > dev->data->nb_queue_pairs))
+		return NULL;
+
+	qp = dev->data->queue_pairs[qp_id];
+	if (qp->service_type != QAT_SERVICE_SYMMETRIC)
+		return NULL;
+
+	return (void *)qp;
+}
+
+int
+qat_sym_enqueue_frame_aead(void *qat_sym_qp,
+		struct rte_cryptodev_sym_session *session,
+		rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
+		struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
+		uint8_t *iv, rte_iova_t tag_iova, rte_iova_t aad_iova,
+		uint8_t is_first, uint8_t is_last, void *frame)
+{
+	struct qat_qp *qp = qat_sym_qp;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	register uint8_t *msg = (uint8_t *)tx_queue->base_addr +
+			tx_queue->tail;
+	struct qat_sym_session *ctx;
+	struct icp_qat_fw_la_bulk_req *req =
+			(struct icp_qat_fw_la_bulk_req *)msg;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	struct icp_qat_fw_la_auth_req_params *auth_param;
+
+	ctx = (struct qat_sym_session *)get_sym_session_private_data(
+			session, cryptodev_qat_driver_id);
+	rte_mov128(msg, (const uint8_t *)&(ctx->fw_req));
+
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	auth_param = (void *)((uint8_t *)cipher_param +
+			ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr = data_iova;
+	req->comn_mid.src_length = req->comn_mid.dst_length = cipher_ofs +
+			cipher_len;
+
+	/* TODO: add support to non-gcm algorithms */
+	if (ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_GALOIS_128 &&
+		ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_GALOIS_64)
+		return -1;
+
+	/* since we know it is GCM, iv has to be 12 bytes */
+	ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+
+	if (unlikely(is_first != 0))
+		req->comn_mid.opaque_data = (uintptr_t)frame;
+
+	rte_memcpy_generic(cipher_param->u.cipher_IV_array, iv,
+			ctx->auth_iv.length);
+
+	if (ctx->is_single_pass) {
+		cipher_param->spc_aad_addr = aad_iova;
+		cipher_param->spc_auth_res_addr = tag_iova;
+	}
+
+	if (sgl) {
+		if (!n_sgl_vecs)
+			return -1;
+		/* TODO: sgl process */
+	} else {
+		cipher_param->cipher_offset = cipher_ofs;
+		cipher_param->cipher_length = cipher_len;
+		auth_param->auth_off = cipher_ofs;
+		auth_param->auth_len = cipher_len;
+		auth_param->u1.aad_adr = aad_iova;
+		auth_param->auth_res_addr = tag_iova;
+	}
+
+	tx_queue->tail = (tx_queue->tail + tx_queue->msg_size) &
+			tx_queue->modulo_mask;
+
+	if (unlikely(is_last != 0)) {
+		qp->enqueued++;
+		qp->stats.enqueued_count++;
+		WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+				tx_queue->hw_bundle_number,
+				tx_queue->hw_queue_number,
+				tx_queue->tail);
+		tx_queue->csr_tail = tx_queue->tail;
+	}
+
+	return 0;
+}
+
+int
+qat_sym_enqueue_frame_chain(__rte_unused void *qat_sym_qp,
+		__rte_unused struct rte_cryptodev_sym_session *session,
+		__rte_unused rte_iova_t data_iova,
+		__rte_unused uint32_t cipher_ofs,
+		__rte_unused uint32_t cipher_len,
+		__rte_unused uint32_t auth_ofs,
+		__rte_unused uint32_t auth_len,
+		__rte_unused struct rte_crypto_vec *sgl,
+		__rte_unused uint32_t n_sgl_vecs,
+		__rte_unused uint8_t *iv, __rte_unused rte_iova_t digest_iova,
+		__rte_unused uint8_t is_first,
+		__rte_unused uint8_t is_last, __rte_unused void *frame)
+{
+	/* TODO: implement the body */
+	return 0;
+}
+
+int
+qat_sym_enqueue_frame_cipher(__rte_unused void *qat_sym_qp,
+		__rte_unused struct rte_cryptodev_sym_session *session,
+		__rte_unused rte_iova_t data_iova,
+		__rte_unused uint32_t cipher_ofs,
+		__rte_unused uint32_t cipher_len,
+		__rte_unused struct rte_crypto_vec *sgl,
+		__rte_unused uint32_t n_sgl_vecs,
+		__rte_unused uint8_t *iv,
+		__rte_unused uint8_t is_first,
+		__rte_unused uint8_t is_last, __rte_unused void *frame)
+{
+	/* TODO: implement the body */
+	return 0;
+}
+
+int
+qat_sym_enqueue_frame_auth(__rte_unused void *qat_sym_qp,
+		__rte_unused struct rte_cryptodev_sym_session *session,
+		__rte_unused rte_iova_t data_iova,
+		__rte_unused uint32_t auth_ofs,
+		__rte_unused uint32_t auth_len,
+		__rte_unused struct rte_crypto_vec *sgl,
+		__rte_unused uint32_t n_sgl_vecs,
+		__rte_unused uint8_t *iv, __rte_unused rte_iova_t digest_iova,
+		__rte_unused uint8_t is_first,
+		__rte_unused uint8_t is_last, __rte_unused void *frame)
+{
+	/* TODO: implement the body */
+	return 0;
+}
+
+#define get_rx_queue_message_at_index(q, h, i) \
+	(void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
+	q->modulo_mask))
+
+static __rte_always_inline int
+qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
+{
+	return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
+			ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+					resp_msg->comn_hdr.comn_status);
+}
+
+int
+qat_sym_dequeue_frame(void *qat_sym_qp, void **frame,
+		qat_qp_get_frame_n_element_t get_frame_n_elt,
+		uint32_t first_status_offset, uint32_t element_interval,
+		uint8_t element_status_success, uint8_t element_status_error)
+{
+	struct qat_qp *qp = qat_sym_qp;
+	struct qat_queue *rx_queue = &qp->rx_q;
+	struct icp_qat_fw_comn_resp *resp, *resp1, *resp2, *resp3;
+	void *f = NULL;
+	uint32_t n_elts, i;
+	uint8_t *status, *status1, *status2, *status3;
+	int n_fail = 0, n_fail1 = 0, n_fail2 = 0, n_fail3 = 0;
+	uint32_t head = rx_queue->head;
+
+	resp = (struct icp_qat_fw_comn_resp *)(
+			(uint8_t *)rx_queue->base_addr + head);
+
+	/* if message is not processed, return 0 */
+	if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG) {
+		*frame = NULL;
+		return -1;
+	}
+
+	f = (void *)(uintptr_t)resp->opaque_data;
+	if (unlikely(f == NULL)) {
+		*frame = NULL;
+		return -1;
+	}
+
+	*frame = f;
+	status = (uint8_t *)f + first_status_offset;
+
+	n_elts = (*get_frame_n_elt)(f);
+	if (unlikely(n_elts == 0))
+		return -1;
+
+	/* process the first message */
+	if (qat_is_rx_msg_ok(resp))
+		*status = element_status_success;
+	else {
+		*status = element_status_error;
+		n_fail--;
+	}
+
+	status += element_interval;
+
+	/* fetch 4 messages in a loop */
+	for (i = 1; i < n_elts - 4; i += 4) {
+		resp = get_rx_queue_message_at_index(rx_queue, head, 0);
+		resp1 = get_rx_queue_message_at_index(rx_queue, head, 1);
+		resp2 = get_rx_queue_message_at_index(rx_queue, head, 2);
+		resp3 = get_rx_queue_message_at_index(rx_queue, head, 3);
+
+		status1 = status + element_interval;
+		status2 = status + element_interval * 2;
+		status3 = status + element_interval * 3;
+
+		if (qat_is_rx_msg_ok(resp))
+			*status = element_status_success;
+		else {
+			*status = element_status_error;
+			n_fail--;
+		}
+
+		if (qat_is_rx_msg_ok(resp1))
+			*status1 = element_status_success;
+		else {
+			*status1 = element_status_error;
+			n_fail1--;
+		}
+
+		if (qat_is_rx_msg_ok(resp2))
+			*status2 = element_status_success;
+		else {
+			*status2 = element_status_error;
+			n_fail2--;
+		}
+
+		if (qat_is_rx_msg_ok(resp3))
+			*status3 = element_status_success;
+		else {
+			*status3 = element_status_error;
+			n_fail3--;
+		}
+
+		i += 4;
+		status = status3 + element_interval;
+		head = (head + rx_queue->msg_size * 4) & rx_queue->modulo_mask;
+	}
+
+	for (; i < n_elts; i++) {
+		resp = get_rx_queue_message_at_index(rx_queue, head, 0);
+		if (qat_is_rx_msg_ok(resp))
+			*status = element_status_success;
+		else {
+			*status = element_status_error;
+			n_fail--;
+		}
+		status += element_interval;
+		head = (head + rx_queue->msg_size * 4) & rx_queue->modulo_mask;
+	}
+
+	/* update queue pair head */
+	rx_queue->head = (rx_queue->head + i * rx_queue->msg_size) &
+			rx_queue->modulo_mask;
+	rx_queue->nb_processed_responses += i;
+	qp->dequeued += i;
+	qp->stats.dequeued_count += i;
+	if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+		rxq_free_desc(qp, rx_queue);
+
+	return n_fail + n_fail1 + n_fail2 + n_fail3;
+}
diff --git a/drivers/crypto/qat/qat_sym_frame.h b/drivers/crypto/qat/qat_sym_frame.h
new file mode 100644
index 000000000..e378cacb8
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_frame.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_FRAME_H_
+#define _QAT_SYM_FRAME_H_
+
+#include <rte_common.h>
+
+/**
+ * Get the QAT queue pair based on device id and queue pair id.
+ * Checks if passed arguments are valid.
+ *
+ * @param dev_id
+ *   cryptodev device id.
+ * @param qp_id
+ *   queue pair id
+ * @return
+ *   pointer to queue pair if passed parameters are valid.
+ *   NULL pointer otherwise.
+ **/
+__rte_experimental
+void *
+qat_sym_get_qp(uint8_t devi_id, uint16_t qp_id);
+
+/**
+ * enqueue one AEAD operation into QAT queue
+ *
+ * @param qat_sym_qp
+ *   queue pair data got from qat_sym_get_qp().
+ * @param session
+ *   configured cryptodev symmetric session data.
+ * @param data_iova
+ *   iova address of data.
+ * @param cipher_ofs
+ *   cipher offset start from data_iova.
+ * @param cipher_len
+ *   cipher total length.
+ * @param sgl
+ *   in case of SGL data, pointer to an array of sgl structure.
+ * @param n_sgl_vecs
+ *   number of SGL vectors in sgl array, 0 for non-sgl input.
+ * @param iv
+ *   pointer to iv data.
+ * @param tag_iova
+ *   Tag iova address.
+ * @param aad_iova
+ *   AAD iova address.
+ * @param is_first
+ *   1 if it is the first operation in the frame.
+ *   0 otherwise.
+ * @param is_last
+ *   1 if the data is the last element in the frame.
+ *   0 otherwise.
+ * @param frame
+ *   if is_first is set the frame pointer will be written in to the message.
+ *
+ * @return
+ *   0 if operation is successful, negative value if otherwise.
+ **/
+
+__rte_experimental
+int
+qat_sym_enqueue_frame_aead(void *qat_sym_qp,
+		struct rte_cryptodev_sym_session *session,
+		rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
+		struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
+		uint8_t *iv, rte_iova_t tag_iova, rte_iova_t aad_iova,
+		uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * enqueue one chaining operation (cipher and hash) into QAT queue
+ *
+ * @param qat_sym_qp
+ *   queue pair data got from qat_sym_get_qp().
+ * @param session
+ *   configured cryptodev symmetric session data.
+ * @param data_iova
+ *   iova address of data.
+ * @param cipher_ofs
+ *   cipher offset start from data_iova.
+ * @param cipher_len
+ *   cipher total length.
+ * @param sgl
+ *   in case of SGL data, pointer to an array of sgl structure.
+ * @param n_sgl_vecs
+ *   number of SGL vectors in sgl array, 0 for non-sgl input.
+ * @param iv
+ *   pointer to iv data.
+ * @param digest_iova
+ *   Digest iova address.
+ * @param is_first
+ *   1 if it is the first operation in the frame so that opaque is to written
+ *     into QAT queue message that can be retrieved upon dequeue.
+ *   0 otherwise.
+ * @param is_last
+ *   1 if the data is the last element in the frame, so that QAT queue tail
+ *     is kicked and the HW will start processing
+ *   0 otherwise.
+ * @param opaque
+ *   if is_first is set opaque will be written in to the message.
+ *
+ * @return
+ *   0 if operation is successful, negative value if otherwise.
+ **/
+__rte_experimental
+int
+qat_sym_enqueue_frame_chain(void *qat_sym_qp,
+		struct rte_cryptodev_sym_session *session,
+		rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
+		uint32_t auth_ofs, uint32_t auth_len,
+		struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
+		uint8_t *iv, rte_iova_t digest_iova,
+		uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * enqueue one cipher-only operation into QAT queue
+ *
+ * @param qat_sym_qp
+ *   queue pair data got from qat_sym_get_qp().
+ * @param session
+ *   configured cryptodev symmetric session data.
+ * @param data_iova
+ *   iova address of data.
+ * @param cipher_ofs
+ *   cipher offset start from data_iova.
+ * @param cipher_len
+ *   cipher total length.
+ * @param sgl
+ *   in case of SGL data, pointer to an array of sgl structure.
+ * @param n_sgl_vecs
+ *   number of SGL vectors in sgl array, 0 for non-sgl input.
+ * @param iv
+ *   pointer to iv data.
+ * @param is_first
+ *   1 if it is the first operation in the frame.
+ *   0 otherwise.
+ * @param is_last
+ *   1 if the data is the last element in the frame.
+ *   0 otherwise.
+ * @param frame
+ *   if is_first is set the frame pointer will be written in to the message.
+ *
+ * @return
+ *   0 if operation is successful, negative value if otherwise.
+ **/
+
+__rte_experimental
+int
+qat_sym_enqueue_frame_cipher(void *qat_sym_qp,
+		struct rte_cryptodev_sym_session *session,
+		rte_iova_t data_iova, uint32_t cipher_ofs, uint32_t cipher_len,
+		struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
+		uint8_t *iv, uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * enqueue one auth-only operation into QAT queue
+ *
+ * @param qat_sym_qp
+ *   queue pair data got from qat_sym_get_qp().
+ * @param session
+ *   configured cryptodev symmetric session data.
+ * @param data_iova
+ *   iova address of data.
+ * @param auth_ofs
+ *   authentication offset start from data_iova.
+ * @param auth_len
+ *   authentication total length.
+ * @param sgl
+ *   in case of SGL data, pointer to an array of sgl structure.
+ * @param n_sgl_vecs
+ *   number of SGL vectors in sgl array, 0 for non-sgl input.
+ * @param iv
+ *   pointer to iv data.
+ * @param digest_iova
+ *   digest iova address.
+ * @param is_first
+ *   1 if it is the first operation in the frame.
+ *   0 otherwise.
+ * @param is_last
+ *   1 if the data is the last element in the frame.
+ *   0 otherwise.
+ * @param frame
+ *   if is_first is set the frame pointer will be written in to the message.
+ *
+ * @return
+ *   0 if operation is successful, negative value if otherwise.
+ **/
+
+__rte_experimental
+int
+qat_sym_enqueue_frame_auth(void *qat_sym_qp,
+		struct rte_cryptodev_sym_session *session,
+		rte_iova_t data_iova, uint32_t auth_ofs, uint32_t auth_len,
+		struct rte_crypto_vec *sgl, uint32_t n_sgl_vecs,
+		uint8_t *iv, rte_iova_t digest_iova,
+		uint8_t is_first, uint8_t is_last, void *frame);
+
+/**
+ * Function prototype to get the number of elements in a frame in dequeue.
+ * This function should be provided by the user.
+ **/
+typedef uint32_t (*qat_qp_get_frame_n_element_t)(void *frame);
+
+/**
+ * Dequeue a frame from QAT queue
+ *
+ * @param qat_sym_qp
+ *   queue pair data got from qat_sym_get_qp().
+ * @param frame
+ *   return the frame dequeued.
+ * @param get_frame_n_elt
+ *   callback function that gets opaque_data from the first processed message.
+ * @param first_status_offset
+ *   the offset to status field of first frame element..
+ * @param element_interval
+ *   the size of frame element in the frame data, used to compute next
+ *   status field.
+ * @param element_status_success
+ *   value to set for successfully processed frame element.
+ * @param element_status_error
+ *   value to set for unsuccessfully processed frame element.
+ *
+ * @return
+ *   if a frame is retrieved from the queue pair it will be written
+ *   into "frame" parameter, otherwise "frame" will be written as NULL and
+ *   -1 will be returned. If all elements are successful 0 will be returned.
+ *   Negative number of failed elements will be returned.
+ **/
+__rte_experimental
+int
+qat_sym_dequeue_frame(void *qat_sym_qp, void **frame,
+		qat_qp_get_frame_n_element_t get_frame_n_elt,
+		uint32_t first_status_offset, uint32_t element_interval,
+		uint8_t element_status_success, uint8_t element_status_error);
+
+#endif /* _QAT_SYM_FRAME_H_ */
-- 
2.20.1


^ permalink raw reply	[flat|nested] 39+ messages in thread

end of thread, other threads:[~2020-07-13 16:58 UTC | newest]

Thread overview: 39+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-12 14:39 [dpdk-dev] [PATCH] crypto/qat: add data-path APIs Fan Zhang
2020-06-18 17:50 ` Trahe, Fiona
2020-06-25 13:31 ` [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto " Fan Zhang
2020-06-25 13:31   ` [dpdk-dev] [dpdk-dev v2 1/3] crypto/qat: add " Fan Zhang
2020-06-25 13:31   ` [dpdk-dev] [dpdk-dev v2 2/3] test/crypto: add unit-test for QAT direct APIs Fan Zhang
2020-06-30 17:47     ` Trahe, Fiona
2020-06-25 13:31   ` [dpdk-dev] [dpdk-dev v2 3/3] doc: add QAT direct APIs guide Fan Zhang
2020-07-03 10:14   ` [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 10:14     ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
2020-07-03 10:14     ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 10:14     ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
2020-07-03 11:09   ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 11:09     ` [dpdk-dev] [dpdk-dev v3 1/4] " Fan Zhang
2020-07-03 11:09     ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
2020-07-03 11:09     ` [dpdk-dev] [dpdk-dev v3 2/4] " Fan Zhang
2020-07-03 11:09     ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 11:09     ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
2020-07-03 11:09     ` [dpdk-dev] [dpdk-dev v3 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 11:09     ` [dpdk-dev] [dpdk-dev v3 4/4] doc: add cryptodev direct APIs guide Fan Zhang
2020-07-03 12:49     ` [dpdk-dev] [dpdk-dev v4 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 12:49       ` [dpdk-dev] [dpdk-dev v4 1/4] " Fan Zhang
2020-07-04 18:16         ` Akhil Goyal
2020-07-06 10:02           ` Zhang, Roy Fan
2020-07-06 12:13             ` Akhil Goyal
2020-07-07 12:37               ` Zhang, Roy Fan
2020-07-07 20:37                 ` Akhil Goyal
2020-07-08 15:09                   ` Zhang, Roy Fan
2020-07-03 12:49       ` [dpdk-dev] [dpdk-dev v4 2/4] crypto/qat: add support to direct " Fan Zhang
2020-07-03 12:49       ` [dpdk-dev] [dpdk-dev v4 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 12:49       ` [dpdk-dev] [dpdk-dev v4 4/4] doc: add cryptodev direct APIs guide Fan Zhang
2020-07-13 16:57       ` [dpdk-dev] [dpdk-dev v5 0/4] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-13 16:57         ` [dpdk-dev] [dpdk-dev v5 1/4] cryptodev: add " Fan Zhang
2020-07-13 16:57         ` [dpdk-dev] [dpdk-dev v5 2/4] crypto/qat: add support to direct " Fan Zhang
2020-07-13 16:57         ` [dpdk-dev] [dpdk-dev v5 3/4] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-13 16:57         ` [dpdk-dev] [dpdk-dev v5 4/4] doc: add cryptodev direct APIs guide Fan Zhang
2020-06-26  6:55 ` [dpdk-dev] [PATCH] crypto/qat: add data-path APIs Jerin Jacob
2020-06-26 10:38   ` [dpdk-dev] [dpdk-techboard] " Thomas Monjalon
2020-06-30 20:33     ` Honnappa Nagarahalli
2020-06-30 21:00       ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).