* [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs
@ 2020-07-03 10:12 Fan Zhang
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Fan Zhang @ 2020-07-03 10:12 UTC (permalink / raw)
To: dev
Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang,
Piotr Bronowski
This patch adds symmetric crypto data-path APIs for Cryptodev. Direct
symmetric crypto data-path APIs are a set of APIs that provide
more HW friendly enqueue/dequeue data-path functions as an alternative
approach to ``rte_cryptodev_enqueue_burst`` and
``rte_cryptodev_dequeue_burst``. The APIs are designed for external
libraries/applications that want to use Cryptodev as symmetric crypto
data-path accelerator but not necessarily mbuf data-path centric. With
the APIs the cycle cost spent on conversion from their data structure to
DPDK cryptodev operations/mbufs can be reduced, and the dependency on DPDK
crypto operation mempool can be relieved.
It is expected that the user can develop close-to-native performance
symmetric crypto data-path implementations with the functions provided
in this patchset.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
v3:
- Instead of QAT only API, moved the API to cryptodev
- Added cryptodev feature flags.
v2:
- Used a structure to simplify parameters.
- Added unit tests.
- Added documentation.
Fan Zhang (3):
crypto/qat: add support to direct data-path APIs
test/crypto: add unit-test for cryptodev direct APIs
doc: add cryptodev direct APIs guide
app/test/test_cryptodev.c | 353 ++++++++++++-
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 +-
doc/guides/prog_guide/cryptodev_lib.rst | 266 ++++++++++
doc/guides/rel_notes/release_20_08.rst | 8 +
drivers/common/qat/Makefile | 2 +
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_job.c | 661 ++++++++++++++++++++++++
drivers/crypto/qat/qat_sym_job.h | 12 +
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
13 files changed, 1332 insertions(+), 42 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_job.c
create mode 100644 drivers/crypto/qat/qat_sym_job.h
--
2.20.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct data-path APIs
2020-07-03 10:12 [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
@ 2020-07-03 10:12 ` Fan Zhang
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
2 siblings, 0 replies; 6+ messages in thread
From: Fan Zhang @ 2020-07-03 10:12 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch add symmetric crypto data-path APIs support to QAT-SYM PMD.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
drivers/common/qat/Makefile | 2 +
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_job.c | 661 +++++++++++++++++++++++++++++++
drivers/crypto/qat/qat_sym_job.h | 12 +
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
8 files changed, 686 insertions(+), 5 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_job.c
create mode 100644 drivers/crypto/qat/qat_sym_job.h
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 28bd5668f..6655fd2bc 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -39,6 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
SRCS-y += qat_sym.c
SRCS-y += qat_sym_session.c
SRCS-y += qat_sym_pmd.c
+ SRCS-y += qat_sym_job.c
+
build_qat = yes
endif
endif
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 8e6dd04eb..06e2d8c8a 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
q->csr_tail = q->tail;
}
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
{
uint32_t old_head, new_head;
uint32_t max_head;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..8add1b049 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
void *op_cookie __rte_unused,
uint64_t *dequeue_err_count __rte_unused);
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
+
#endif /* _QAT_QP_H_ */
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index fc65923a7..8a3921293 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -13,6 +13,7 @@ if dep.found()
qat_sources += files('qat_sym_pmd.c',
'qat_sym.c',
'qat_sym_session.c',
+ 'qat_sym_job.c',
'qat_asym_pmd.c',
'qat_asym.c')
qat_ext_deps += dep
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 25b6dd5f4..609180d3f 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -336,7 +336,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
set_cipher_iv(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, qat_req);
-
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
diff --git a/drivers/crypto/qat/qat_sym_job.c b/drivers/crypto/qat/qat_sym_job.c
new file mode 100644
index 000000000..7c0913459
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.c
@@ -0,0 +1,661 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym.h"
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_qp.h"
+#include "qat_sym_job.h"
+
+static __rte_always_inline int
+qat_sym_frame_fill_sgl(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_sgl *sgl, uint32_t max_len)
+{
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sgl *list;
+ int64_t len = max_len;
+ uint32_t i;
+
+ if (!sgl)
+ return -EINVAL;
+ if (sgl->num < 2 || sgl->num > QAT_SYM_SGL_MAX_NUMBER || !sgl->vec)
+ return -EINVAL;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
+ list = (struct qat_sgl *)&cookie->qat_sgl_src;
+
+ for (i = 0; i < sgl->num && len > 0; i++) {
+ list->buffers[i].len = RTE_MIN(sgl->vec[i].len, len);
+ list->buffers[i].resrvd = 0;
+ list->buffers[i].addr = sgl->vec[i].iova;
+ len -= list->buffers[i].len;
+ }
+
+ if (unlikely(len > 0))
+ return -1;
+
+ list->num_bufs = i;
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ req->comn_mid.src_length = req->comn_mid.dst_length = 0;
+ return 0;
+}
+
+static __rte_always_inline void
+qat_sym_set_cipher_param(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ uint32_t cipher_ofs, uint32_t cipher_len)
+{
+ cipher_param->cipher_offset = cipher_ofs;
+ cipher_param->cipher_length = cipher_len;
+}
+
+static __rte_always_inline void
+qat_sym_set_auth_param(struct icp_qat_fw_la_auth_req_params *auth_param,
+ uint32_t auth_ofs, uint32_t auth_len,
+ rte_iova_t digest_iova, rte_iova_t aad_iova)
+{
+ auth_param->auth_off = auth_ofs;
+ auth_param->auth_len = auth_len;
+ auth_param->auth_res_addr = digest_iova;
+ auth_param->u1.aad_adr = aad_iova;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_aead(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len;
+ uint8_t aad_len_field_sz;
+ uint32_t msg_len_be;
+ rte_iova_t aad_iova;
+ uint8_t q;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->aead.aead_ofs + job->aead.aead_len;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ aad_iova = job->aead.aad_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ aad_data = job->aead.aad;
+ aad_iova = job->aead.aad_iova;
+ aad_ccm_real_len = 0;
+ aad_len_field_sz = 0;
+ msg_len_be = rte_bswap32(job->aead.aead_len);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ aad_data = job->iv;
+ aad_iova = job->iv_iova;
+ }
+
+ q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+ rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx], 0, pad_len);
+ }
+
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+ + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(job->aead.aad +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ qat_sym_set_cipher_param(cipher_param, job->aead.aead_ofs,
+ job->aead.aead_len);
+ qat_sym_set_auth_param(auth_param, job->aead.aead_ofs,
+ job->aead.aead_len, job->aead.tag_iova, aad_iova);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->aead.aead_ofs + job->aead.aead_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = aad_iova;
+ cipher_param->spc_auth_res_addr = job->aead.tag_iova;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+
+static __rte_always_inline int
+qat_sym_job_enqueue_cipher(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->cipher_only.cipher_ofs,
+ job->cipher_only.cipher_len);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_auth(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ auth_param = (void *)((uint8_t *)&req->serv_specif_rqpars +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->auth_only.auth_ofs + job->auth_only.auth_len;
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->auth_only.auth_ofs,
+ job->auth_only.auth_len, job->auth_only.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->auth_only.auth_ofs +
+ job->auth_only.auth_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_chain(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t min_ofs = RTE_MIN(job->chain.cipher_ofs, job->chain.auth_ofs);
+ uint32_t max_len = RTE_MAX(job->chain.cipher_len, job->chain.auth_len);
+ rte_iova_t auth_iova_end;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr =
+ req->comn_mid.dest_data_addr = job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length = min_ofs + max_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->chain.cipher_ofs,
+ job->chain.cipher_len);
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->chain.auth_ofs,
+ job->chain.auth_len, job->chain.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ uint32_t len = job->chain.auth_ofs +
+ job->chain.auth_len;
+ struct rte_crypto_vec *vec = job->sgl->vec;
+ int auth_end_get = 0;
+ while (len) {
+ if (len <= vec->len) {
+ auth_iova_end = vec->iova + len;
+ auth_end_get = 1;
+ break;
+ }
+ len -= vec->len;
+ vec++;
+ }
+ if (!auth_end_get) {
+ QAT_DP_LOG(ERR, "Failed to get auth end");
+ return -1;
+ }
+ } else
+ auth_iova_end = job->data_iova + job->chain.auth_ofs +
+ job->chain.auth_len;
+
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset +
+ cipher_param->cipher_length) &&
+ (job->chain.digest_iova == auth_iova_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset +
+ cipher_param->cipher_length <
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length)
+ req->comn_mid.dst_length =
+ req->comn_mid.src_length =
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header =
+ &req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ min_ofs + max_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+#define get_rx_queue_message_at_index(q, h, i) \
+ (void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
+ q->modulo_mask))
+
+static __rte_always_inline int
+qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
+{
+ return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status);
+}
+
+static __rte_always_inline int
+qat_sym_query_processed_jobs(void *qat_sym_qp, uint32_t nb_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head = (rx_queue->head + (nb_jobs - 1) * rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG)
+ return 0;
+
+ return 1;
+}
+
+static __rte_always_inline void *
+qat_sym_job_dequeue_one(void *qat_sym_qp, uint64_t *drv_data, uint64_t flags,
+ int *is_op_success)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head;
+ void *opaque;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ *is_op_success = 0;
+ return NULL;
+ }
+
+ if (unlikely(qat_is_rx_msg_ok(resp) == 0))
+ *is_op_success = -1;
+ else
+ *is_op_success = 1;
+
+ opaque = (void *)(uintptr_t)resp->opaque_data;
+
+ rx_queue->head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ rx_queue->nb_processed_responses++;
+ qp->dequeued++;
+ qp->stats.dequeued_count++;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+
+ return opaque;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_job_dequeue_n(void *qat_sym_qp, uint64_t *drv_data,
+ void *user_data, rte_crpyto_hw_user_post_deq_cb_fn cb,
+ uint32_t n, uint64_t flags, uint32_t *n_failed_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head, i;
+ uint32_t status, total_fail = 0;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ for (i = 0; i < n; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_EXHAUST)
+ break;
+ return -i;
+ }
+
+ status = qat_is_rx_msg_ok(resp);
+ total_fail += status;
+ cb(user_data, i, status);
+
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ }
+
+ rx_queue->head = head;
+ rx_queue->nb_processed_responses += i;
+ qp->dequeued += i;
+ qp->stats.dequeued_count += i;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+ *n_failed_jobs = total_fail;
+
+ return i;
+}
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp->service_type != QAT_SERVICE_SYMMETRIC)
+ return -EINVAL;
+
+ hw_ops->qp = (void *)qp;
+ hw_ops->enqueue_aead = qat_sym_job_enqueue_aead;
+ hw_ops->enqueue_cipher = qat_sym_job_enqueue_cipher;
+ hw_ops->enqueue_auth = qat_sym_job_enqueue_auth;
+ hw_ops->enqueue_chain = qat_sym_job_enqueue_chain;
+ hw_ops->dequeue_one = qat_sym_job_dequeue_one;
+ hw_ops->dequeue_many = qat_sym_job_dequeue_n;
+ hw_ops->query_processed = qat_sym_query_processed_jobs;
+
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_sym_job.h b/drivers/crypto/qat/qat_sym_job.h
new file mode 100644
index 000000000..b11aeb841
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_FRAME_H_
+#define _QAT_SYM_FRAME_H_
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops);
+
+#endif /* _QAT_SYM_FRAME_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index e887c880f..be9d73c0a 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -13,6 +13,7 @@
#include "qat_sym.h"
#include "qat_sym_session.h"
#include "qat_sym_pmd.h"
+#include "qat_sym_job.h"
#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
@@ -234,7 +235,8 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
/* Crypto related operations */
.sym_session_get_size = qat_sym_session_get_private_size,
.sym_session_configure = qat_sym_session_configure,
- .sym_session_clear = qat_sym_session_clear
+ .sym_session_clear = qat_sym_session_clear,
+ .sym_get_hw_ops = qat_sym_get_ops,
};
static uint16_t
@@ -308,7 +310,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
- RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
+ RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
+ RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API;
internals = cryptodev->data->dev_private;
internals->qat_dev = qat_pci_dev;
--
2.20.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs
2020-07-03 10:12 [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
@ 2020-07-03 10:12 ` Fan Zhang
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
2 siblings, 0 replies; 6+ messages in thread
From: Fan Zhang @ 2020-07-03 10:12 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch adds the QAT test to use cryptodev symmetric crypto
direct APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
app/test/test_cryptodev.c | 353 ++++++++++++++++++++++++--
app/test/test_cryptodev.h | 6 +
app/test/test_cryptodev_blockcipher.c | 50 ++--
3 files changed, 372 insertions(+), 37 deletions(-)
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 8f631468b..9fbbe1d6c 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -55,6 +55,8 @@ static int gbl_driver_id;
static enum rte_security_session_action_type gbl_action_type =
RTE_SECURITY_ACTION_TYPE_NONE;
+int qat_api_test;
+
struct crypto_testsuite_params {
struct rte_mempool *mbuf_pool;
struct rte_mempool *large_mbuf_pool;
@@ -142,6 +144,154 @@ ceil_byte_length(uint32_t num_bits)
return (num_bits >> 3);
}
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits)
+{
+ int32_t n;
+ struct rte_crypto_hw_ops hw_ops;
+ struct rte_crypto_op *op_ret;
+ struct rte_crypto_sym_op *sop;
+ struct rte_crypto_sym_job job;
+ struct rte_crypto_sgl sgl;
+ struct rte_crypto_vec vec[UINT8_MAX] = { {0} };
+ int ret;
+ uint32_t min_ofs = 0, max_len = 0;
+ uint64_t drv_data;
+ uint64_t flags = RTE_CRYPTO_HW_ENQ_FLAG_START |
+ RTE_CRYPTO_HW_ENQ_FLAG_END |
+ RTE_CRYPTO_HW_ENQ_FLAG_SET_OPAQUE;
+ enum {
+ cipher = 0,
+ auth,
+ chain,
+ aead
+ } qat_api_test_type;
+ uint32_t count = 0;
+
+ memset(&job, 0, sizeof(job));
+
+ ret = rte_cryptodev_sym_get_hw_ops(dev_id, qp_id, &hw_ops);
+ if (ret) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ return;
+ }
+
+ sop = op->sym;
+
+ if (is_cipher && is_auth) {
+ qat_api_test_type = chain;
+ min_ofs = RTE_MIN(sop->cipher.data.offset,
+ sop->auth.data.offset);
+ max_len = RTE_MAX(sop->cipher.data.length,
+ sop->auth.data.length);
+ } else if (is_cipher) {
+ qat_api_test_type = cipher;
+ min_ofs = sop->cipher.data.offset;
+ max_len = sop->cipher.data.length;
+ } else if (is_auth) {
+ qat_api_test_type = auth;
+ min_ofs = sop->auth.data.offset;
+ max_len = sop->auth.data.length;
+ } else { /* aead */
+ qat_api_test_type = aead;
+ min_ofs = sop->aead.data.offset;
+ max_len = sop->aead.data.length;
+ }
+
+ if (len_in_bits) {
+ max_len = max_len >> 3;
+ min_ofs = min_ofs >> 3;
+ }
+
+ n = rte_crypto_mbuf_to_vec(sop->m_src, min_ofs, max_len,
+ vec, RTE_DIM(vec));
+ if (n < 0 || n != sop->m_src->nb_segs) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ if (n > 1) {
+ sgl.vec = vec;
+ sgl.num = n;
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ job.sgl = &sgl;
+ } else
+ job.data_iova = rte_pktmbuf_iova(sop->m_src);
+
+
+ switch (qat_api_test_type) {
+ case aead:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.aead.aead_ofs = min_ofs;
+ job.aead.aead_len = max_len;
+ job.aead.aad = sop->aead.aad.data;
+ job.aead.aad_iova = sop->aead.aad.phys_addr;
+ job.aead.tag_iova = sop->aead.digest.phys_addr;
+ ret = hw_ops.enqueue_aead(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case cipher:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.cipher_only.cipher_ofs = min_ofs;
+ job.cipher_only.cipher_len = max_len;
+ ret = hw_ops.enqueue_cipher(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case auth:
+ job.auth_only.auth_ofs = min_ofs;
+ job.auth_only.auth_len = max_len;
+ job.auth_only.digest_iova = sop->auth.digest.phys_addr;
+ ret = hw_ops.enqueue_auth(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ case chain:
+ job.iv = rte_crypto_op_ctod_offset(op, void *, IV_OFFSET);
+ job.iv_iova = rte_crypto_op_ctophys_offset(op, IV_OFFSET);
+ job.chain.cipher_ofs = sop->cipher.data.offset;
+ job.chain.cipher_len = sop->cipher.data.length;
+ if (len_in_bits) {
+ job.chain.cipher_len = job.chain.cipher_len >> 3;
+ job.chain.cipher_ofs = job.chain.cipher_ofs >> 3;
+ }
+ job.chain.auth_ofs = sop->auth.data.offset;
+ job.chain.auth_len = sop->auth.data.length;
+ if (len_in_bits) {
+ job.chain.auth_len = job.chain.auth_len >> 3;
+ job.chain.auth_ofs = job.chain.auth_ofs >> 3;
+ }
+ job.chain.digest_iova = sop->auth.digest.phys_addr;
+ ret = hw_ops.enqueue_chain(hw_ops.qp, sop->session, &job,
+ (void *)op, &drv_data, flags);
+ break;
+ }
+
+ if (ret < 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ ret = 0;
+
+ while (ret == 0 && count++ < 1024) {
+ ret = hw_ops.query_processed(hw_ops.qp, 1);
+ if (!ret)
+ rte_delay_ms(1);
+ }
+ if (ret < 0 || count >= 1024) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ flags = RTE_CRYPTO_HW_DEQ_FLAG_START;
+ op_ret = hw_ops.dequeue_one(hw_ops.qp, &drv_data, flags, &ret);
+ if (op_ret != op || ret != 1)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
static void
process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op *op)
{
@@ -2451,7 +2601,11 @@ test_snow3g_authentication(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -2530,7 +2684,11 @@ test_snow3g_authentication_verify(const struct snow3g_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2600,6 +2758,9 @@ test_kasumi_authentication(const struct kasumi_hash_test_data *tdata)
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_crypt_auth_op(ts_params->valid_devs[0],
ut_params->op);
+ else if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
else
ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
@@ -2671,7 +2832,11 @@ test_kasumi_authentication_verify(const struct kasumi_hash_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -2878,8 +3043,12 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
- ut_params->op);
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -2964,7 +3133,11 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3287,7 +3460,11 @@ test_kasumi_decryption(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3362,7 +3539,11 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -3737,7 +3918,11 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_dst;
@@ -3905,7 +4090,11 @@ test_zuc_cipher_auth(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4000,7 +4189,11 @@ test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
ut_params->obuf = ut_params->op->sym->m_src;
@@ -4136,7 +4329,11 @@ test_snow3g_auth_cipher(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4325,7 +4522,11 @@ test_snow3g_auth_cipher_sgl(const struct snow3g_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4507,7 +4708,11 @@ test_kasumi_auth_cipher(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4697,7 +4902,11 @@ test_kasumi_auth_cipher_sgl(const struct kasumi_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4838,7 +5047,11 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -4925,7 +5138,11 @@ test_zuc_encryption(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5012,7 +5229,11 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 0, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5100,7 +5321,11 @@ test_zuc_authentication(const struct wireless_test_data *tdata)
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
ut_params->obuf = ut_params->op->sym->m_src;
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5232,7 +5457,11 @@ test_zuc_auth_cipher(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -5418,7 +5647,11 @@ test_zuc_auth_cipher_sgl(const struct wireless_test_data *tdata,
if (retval < 0)
return retval;
- ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 1, 1, 1);
+ else
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
ut_params->op);
TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
@@ -7024,6 +7257,9 @@ test_authenticated_encryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -7993,6 +8229,9 @@ test_authenticated_decryption(const struct aead_test_data *tdata)
/* Process crypto operation */
if (gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (qat_api_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -11284,6 +11523,9 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
if (oop == IN_PLACE &&
gbl_action_type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)
process_cpu_aead_op(ts_params->valid_devs[0], ut_params->op);
+ else if (oop == IN_PLACE && qat_api_test == 1)
+ process_sym_hw_api_op(ts_params->valid_devs[0], 0,
+ ut_params->op, 0, 0, 0);
else
TEST_ASSERT_NOT_NULL(
process_crypto_request(ts_params->valid_devs[0],
@@ -13241,6 +13483,75 @@ test_cryptodev_nitrox(void)
return unit_test_suite_runner(&cryptodev_nitrox_testsuite);
}
+static struct unit_test_suite cryptodev_sym_direct_api_testsuite = {
+ .suite_name = "Crypto Sym direct API Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_auth_cipher_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_generate_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_hash_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_cipheronly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_encryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_CCM_authenticated_decryption_test_case_128_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_authenticated_decryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_192_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_qat_sym_direct_api(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ int ret;
+
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both "
+ "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM "
+ "are enabled in config file to run this testsuite.\n");
+ return TEST_SKIPPED;
+ }
+
+ qat_api_test = 1;
+ ret = unit_test_suite_runner(&cryptodev_sym_direct_api_testsuite);
+ qat_api_test = 0;
+
+ return ret;
+}
+
+REGISTER_TEST_COMMAND(cryptodev_qat_sym_api_autotest, test_qat_sym_direct_api);
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
REGISTER_TEST_COMMAND(cryptodev_cpu_aesni_mb_autotest,
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index 41542e055..2854115aa 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -71,6 +71,8 @@
#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
#define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
+extern int qat_api_test;
+
/**
* Write (spread) data from buffer to mbuf data
*
@@ -209,4 +211,8 @@ create_segmented_mbuf(struct rte_mempool *mbuf_pool, int pkt_len,
return NULL;
}
+void
+process_sym_hw_api_op(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op *op,
+ uint8_t is_cipher, uint8_t is_auth, uint8_t len_in_bits);
+
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_blockcipher.c b/app/test/test_cryptodev_blockcipher.c
index 642b54971..dfa74a449 100644
--- a/app/test/test_cryptodev_blockcipher.c
+++ b/app/test/test_cryptodev_blockcipher.c
@@ -461,25 +461,43 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
/* Process crypto operation */
- if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Error sending packet for encryption");
- status = TEST_FAILED;
- goto error_exit;
- }
+ if (qat_api_test) {
+ uint8_t is_cipher = 0, is_auth = 0;
+
+ if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
+ RTE_LOG(DEBUG, USER1,
+ "QAT direct API does not support OOP, Test Skipped.\n");
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "SKIPPED");
+ status = TEST_SUCCESS;
+ goto error_exit;
+ }
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
+ is_cipher = 1;
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
+ is_auth = 1;
+
+ process_sym_hw_api_op(dev_id, 0, op, is_cipher, is_auth, 0);
+ } else {
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for encryption");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
- op = NULL;
+ op = NULL;
- while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
- rte_pause();
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &op, 1) == 0)
+ rte_pause();
- if (!op) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Failed to process sym crypto op");
- status = TEST_FAILED;
- goto error_exit;
+ if (!op) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process sym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
}
debug_hexdump(stdout, "m_src(after):",
--
2.20.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide
2020-07-03 10:12 [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
@ 2020-07-03 10:12 ` Fan Zhang
2 siblings, 0 replies; 6+ messages in thread
From: Fan Zhang @ 2020-07-03 10:12 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch updates programmer's guide to demonstrate the usage
and limitations of cryptodev symmetric crypto data-path APIs.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
doc/guides/prog_guide/cryptodev_lib.rst | 266 ++++++++++++++++++++++++
doc/guides/rel_notes/release_20_08.rst | 8 +
2 files changed, 274 insertions(+)
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index c14f750fa..9900a593a 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -861,6 +861,272 @@ using one of the crypto PMDs available in DPDK.
num_dequeued_ops);
} while (total_num_dequeued_ops < num_enqueued_ops);
+Cryptodev Direct Symmetric Crypto Data-path APIs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Direct symmetric crypto data-path APIs are a set of APIs that especially
+provided for Symmetric HW Crypto PMD that provides fast data-path
+enqueue/dequeue operations. The direct data-path APIs take advantage of
+existing Cryptodev APIs for device, queue pairs, and session management. In
+addition the user are required to get the queue pair pointer data and function
+pointers. The APIs are provided as an advanced feature as an alternative
+to ``rte_cryptodev_enqueue_burst`` and ``rte_cryptodev_dequeue_burst``. The
+APIs are designed for the user to develop close-to-native performance symmetric
+crypto data-path implementation for their applications that do not necessarily
+depend on cryptodev operations and cryptodev operation mempools, or mbufs.
+
+Cryptodev PMDs who supports this feature will have
+``RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API`` feature flag presented. The user uses
+``rte_cryptodev_sym_get_hw_ops`` function call to get all the function pointers
+for different enqueue and dequeue operations, plus the device specific
+queue pair data. After the ``rte_crypto_hw_ops`` structure is properly set by
+the driver, the user can use the function pointers and the queue data pointers
+in the structure to enqueue and dequeue crypto jobs.
+
+To simply the enqueue APIs a symmetric job structure is defined:
+
+.. code-block:: c
+
+ /**
+ * Asynchronous operation job descriptor.
+ * Used by HW crypto devices direct API call that supports such activity
+ **/
+ struct rte_crypto_sym_job {
+ union {
+ /**
+ * When RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL bit is set in flags, sgl
+ * field is used as input data. Otherwise data_iova is
+ * used.
+ **/
+ rte_iova_t data_iova;
+ struct rte_crypto_sgl *sgl;
+ };
+ union {
+ /**
+ * Different than cryptodev ops, all ofs and len fields have
+ * the unit of bytes (including Snow3G/Kasumi/Zuc.
+ **/
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ } cipher_only;
+ struct {
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } auth_only;
+ struct {
+ uint32_t aead_ofs;
+ uint32_t aead_len;
+ rte_iova_t tag_iova;
+ uint8_t *aad;
+ rte_iova_t aad_iova;
+ } aead;
+ struct {
+ uint32_t cipher_ofs;
+ uint32_t cipher_len;
+ uint32_t auth_ofs;
+ uint32_t auth_len;
+ rte_iova_t digest_iova;
+ } chain;
+ };
+ uint8_t *iv;
+ rte_iova_t iv_iova;
+ };
+
+Different than Cryptodev operation, the ``rte_crypto_sym_job`` structure
+focuses only on the data field required for crypto PMD to execute a single job,
+and is not supposed stored as opaque data. The user can freely allocate the
+structure buffer from stack and reuse it to fill all jobs.
+
+To use the direct symmetric crypto APIs safely, the user has to carefully
+set the correct fields in rte_crypto_sym_job structure, otherwise the
+application or the system may crash. Also there are a few limitations to the
+direct symmetric crypto APIs:
+
+* Only support in-place operations.
+* APIs are NOT thread-safe.
+* CANNOT mix the direct API's enqueue with rte_cryptodev_enqueue_burst, or
+ vice versa.
+
+The following sample code shows how to use Cryptodev direct API to process a
+user defined frame with maximum 32 buffers with AES-CBC and HMAC-SHA chained
+algorithm of a frame defined by user.
+
+See *DPDK API Reference* for details on each API definitions.
+
+.. code-block:: c
+
+ #include <rte_cryptodev.h>
+
+ #define FRAME_ELT_OK 0
+ #define FRAME_ELT_FAIL 1
+ #define FRAME_OK 0
+ #define FRAME_SOME_ELT_ERROR 1
+ #define FRAME_SIZE 32
+
+ /* Sample frame element struct */
+ struct sample_frame_elt {
+ /* The status field of frame element */
+ uint8_t status;
+ /* Pre-created and initialized cryptodev session */
+ struct rte_cryptodev_sym_session *session;
+ union {
+ __rte_iova_t data;
+ struct rte_crypto_sgl sgl;
+ };
+ uint32_t data_len;
+ __rte_iova_t digest;
+ uint8_t *iv;
+ uint8_t is_sgl;
+ };
+
+ /* Sample frame struct to describe up to 32 crypto jobs */
+ struct sample_frame {
+ struct sample_frame_elt elts[FRAME_SIZE]; /**< All frame elements */
+ uint32_t n_elts; /**< Number of elements */
+ };
+
+ /* Global Cryptodev Direct API structure */
+ static struct rte_crypto_hw_ops hw_ops;
+
+ /* Initialization */
+ static int
+ frame_operation_init(
+ uint8_t cryptodev_id, /**< Initialized cryptodev ID */
+ uint16_t qp_id /**< Initialized queue pair ID */)
+ {
+ /* Get APIs */
+ ret = rte_cryptodev_sym_get_hw_ops(cryptodev_id, qp_id, &hw_ops);
+ /* If the device does not support this feature or queue pair is not
+ initialized, return -1 */
+ if (!ret)
+ return -1;
+ return 0;
+ }
+
+ /* Frame enqueue function use direct AES-CBC-* + HMAC-SHA* API */
+ static int
+ enqueue_frame_to_direct_api(
+ struct sample_frame *frame /**< Initialized user frame struct */)
+ {
+ struct rte_crypto_hw_ops hw_ops;
+ struct rte_crypto_sym_job job;
+ uint64_t drv_data, flags = 0;
+ uint32_t i;
+ int ret;
+
+ /* Fill all sample frame element data into HW queue pair */
+ for (i = 0; i < frame->n_elts; i++) {
+ struct sample_frame_elt *fe = &frame->elts[i];
+ int ret;
+
+ /* if it is the first element in the frame, set FIRST flag to
+ let the driver to know it is first frame and fill drv_data. */
+ if (i == 0)
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_START;
+ else
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_START;
+
+ /* if it is the last element in the frame, write LAST flag to
+ kick HW queue */
+ if (i == frame->n_elts - 1)
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_LAST;
+ else
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_LAST;
+
+ /* Fill the job data with frame element data */
+ if (fe->is_sgl != 0) {
+ /* The buffer is a SGL buffer */
+ job.sgl = &frame->sgl;
+ /* Set SGL flag */
+ flags |= RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ } else {
+ job.data_iova = fe->data;
+ /* Unset SGL flag in the job */
+ flags &= ~RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL;
+ }
+
+ job.chain.cipher_ofs = job.chain.auth_ofs = 0;
+ job.chain.cipher_len = job.chain.auth_len = fe->data_len;
+ job.chain.digest_iova = fe->digest;
+
+ job.iv = fe->iv;
+
+ /* Call direct data-path enqueue chaining op API */
+ ret = hw_ops.enqueue_chain(hw_ops.qp, fe->session, &job,
+ (void *frame), &drv_data, flags);
+ /**
+ * In case one element is failed to be enqueued, simply abandon
+ * enqueuing the whole frame.
+ **/
+ if (!ret)
+ return -1;
+
+ /**
+ * To this point the frame is enqueued. The job buffer can be
+ * safely reused for enqueuing next frame element.
+ **/
+ }
+
+ return 0;
+ }
+
+ /**
+ * Sample function to write frame element status field based on
+ * driver returned operation result. The function return and parameter
+ * should follow the prototype rte_crpyto_hw_user_post_deq_cb_fn() in
+ * rte_cryptodev.h
+ **/
+ static __rte_always_inline void
+ write_frame_elt_status(void *data, uint32_t index, uint8_t is_op_success)
+ {
+ struct sample_frame *frame = data;
+ frame->elts[index + 1].status = is_op_success ? FRAME_ELT_OK :
+ FRAME_ELT_FAIL;
+ }
+
+ /* Frame dequeue function use direct dequeue API */
+ static struct sample_frame *
+ dequeue_frame_with_direct_api(void)
+ {
+ struct sample_frame *ret_frame;
+ uint64_t flags, drv_data;
+ uint32_t n, n_fail, n_fail_first = 0;
+ int ret;
+
+ /* Dequeue first job, which should have frame data stored in opaque */
+ flags = RTE_CRYPTO_HW_DEQ_FLAG_START;
+ ret_frame = hw_ops.dequeue_one(hw_ops.qp, &drv_data, flags, &ret);
+ if (ret == 0) {
+ /* ret == 0, means it is still under processing */
+ return NULL;
+ } else if (ret == 1) {
+ /* ret_frame is successfully retrieved, the ret stores the
+ operation result */
+ ret_frame->elts[0].status = FRAME_ELT_OK;
+ } else {
+ ret_frame->elts[0].status = FRAME_ELT_FAIL;
+ n_fail_first = 1;
+ }
+
+ /* Query if n_elts has been processed, if not return NULL */
+ if (!hw_ops.query_processed(hw_ops.qp, frame->n_elts))
+ return NULL;
+
+ /* We are sure all elements have been processed, dequeue them all */
+ flag = 0;
+ ret = hw_ops.dequeue_many(hw_ops.qp, &drv_data, (void *)ret_frame,
+ write_frame_elt_status, ret_frame->n_elts - 1, flag, &n_fail);
+
+ if (n_fail + n_fail_first > 0)
+ ret_frame->status = FRAME_SOME_ELT_ERROR;
+ else
+ ret_frame->status = FRAME_OK;
+
+ return ret_frame;
+ }
+
Asymmetric Cryptography
-----------------------
diff --git a/doc/guides/rel_notes/release_20_08.rst b/doc/guides/rel_notes/release_20_08.rst
index 39064afbe..eb973693d 100644
--- a/doc/guides/rel_notes/release_20_08.rst
+++ b/doc/guides/rel_notes/release_20_08.rst
@@ -56,6 +56,14 @@ New Features
Also, make sure to start the actual text at the margin.
=========================================================
+ * **Add Cryptodev data-path APIs for no mbuf-centric data-path.**
+
+ Cryptodev is added a set of data-path APIs that are not based on
+ cryptodev operations. The APIs are designed for external applications
+ or libraries that want to use cryptodev but their data-path
+ implementations are not mbuf-centric. QAT Symmetric PMD is also updated
+ to add the support to this API.
+
Removed Items
-------------
--
2.20.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct data-path APIs
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto " Fan Zhang
@ 2020-07-03 11:09 ` Fan Zhang
0 siblings, 0 replies; 6+ messages in thread
From: Fan Zhang @ 2020-07-03 11:09 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch add symmetric crypto data-path APIs support to QAT-SYM PMD.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
drivers/common/qat/Makefile | 2 +
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_job.c | 661 +++++++++++++++++++++++++++++++
drivers/crypto/qat/qat_sym_job.h | 12 +
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
8 files changed, 686 insertions(+), 5 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_job.c
create mode 100644 drivers/crypto/qat/qat_sym_job.h
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 28bd5668f..6655fd2bc 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -39,6 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
SRCS-y += qat_sym.c
SRCS-y += qat_sym_session.c
SRCS-y += qat_sym_pmd.c
+ SRCS-y += qat_sym_job.c
+
build_qat = yes
endif
endif
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 8e6dd04eb..06e2d8c8a 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
q->csr_tail = q->tail;
}
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
{
uint32_t old_head, new_head;
uint32_t max_head;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..8add1b049 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
void *op_cookie __rte_unused,
uint64_t *dequeue_err_count __rte_unused);
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
+
#endif /* _QAT_QP_H_ */
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index fc65923a7..8a3921293 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -13,6 +13,7 @@ if dep.found()
qat_sources += files('qat_sym_pmd.c',
'qat_sym.c',
'qat_sym_session.c',
+ 'qat_sym_job.c',
'qat_asym_pmd.c',
'qat_asym.c')
qat_ext_deps += dep
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 25b6dd5f4..609180d3f 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -336,7 +336,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
set_cipher_iv(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, qat_req);
-
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
diff --git a/drivers/crypto/qat/qat_sym_job.c b/drivers/crypto/qat/qat_sym_job.c
new file mode 100644
index 000000000..7c0913459
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.c
@@ -0,0 +1,661 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym.h"
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_qp.h"
+#include "qat_sym_job.h"
+
+static __rte_always_inline int
+qat_sym_frame_fill_sgl(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_sgl *sgl, uint32_t max_len)
+{
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sgl *list;
+ int64_t len = max_len;
+ uint32_t i;
+
+ if (!sgl)
+ return -EINVAL;
+ if (sgl->num < 2 || sgl->num > QAT_SYM_SGL_MAX_NUMBER || !sgl->vec)
+ return -EINVAL;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
+ list = (struct qat_sgl *)&cookie->qat_sgl_src;
+
+ for (i = 0; i < sgl->num && len > 0; i++) {
+ list->buffers[i].len = RTE_MIN(sgl->vec[i].len, len);
+ list->buffers[i].resrvd = 0;
+ list->buffers[i].addr = sgl->vec[i].iova;
+ len -= list->buffers[i].len;
+ }
+
+ if (unlikely(len > 0))
+ return -1;
+
+ list->num_bufs = i;
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ req->comn_mid.src_length = req->comn_mid.dst_length = 0;
+ return 0;
+}
+
+static __rte_always_inline void
+qat_sym_set_cipher_param(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ uint32_t cipher_ofs, uint32_t cipher_len)
+{
+ cipher_param->cipher_offset = cipher_ofs;
+ cipher_param->cipher_length = cipher_len;
+}
+
+static __rte_always_inline void
+qat_sym_set_auth_param(struct icp_qat_fw_la_auth_req_params *auth_param,
+ uint32_t auth_ofs, uint32_t auth_len,
+ rte_iova_t digest_iova, rte_iova_t aad_iova)
+{
+ auth_param->auth_off = auth_ofs;
+ auth_param->auth_len = auth_len;
+ auth_param->auth_res_addr = digest_iova;
+ auth_param->u1.aad_adr = aad_iova;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_aead(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len;
+ uint8_t aad_len_field_sz;
+ uint32_t msg_len_be;
+ rte_iova_t aad_iova;
+ uint8_t q;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->aead.aead_ofs + job->aead.aead_len;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ aad_iova = job->aead.aad_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ aad_data = job->aead.aad;
+ aad_iova = job->aead.aad_iova;
+ aad_ccm_real_len = 0;
+ aad_len_field_sz = 0;
+ msg_len_be = rte_bswap32(job->aead.aead_len);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ aad_data = job->iv;
+ aad_iova = job->iv_iova;
+ }
+
+ q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+ rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx], 0, pad_len);
+ }
+
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+ + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(job->aead.aad +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ qat_sym_set_cipher_param(cipher_param, job->aead.aead_ofs,
+ job->aead.aead_len);
+ qat_sym_set_auth_param(auth_param, job->aead.aead_ofs,
+ job->aead.aead_len, job->aead.tag_iova, aad_iova);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->aead.aead_ofs + job->aead.aead_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = aad_iova;
+ cipher_param->spc_auth_res_addr = job->aead.tag_iova;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+
+static __rte_always_inline int
+qat_sym_job_enqueue_cipher(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->cipher_only.cipher_ofs,
+ job->cipher_only.cipher_len);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_auth(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ auth_param = (void *)((uint8_t *)&req->serv_specif_rqpars +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->auth_only.auth_ofs + job->auth_only.auth_len;
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->auth_only.auth_ofs,
+ job->auth_only.auth_len, job->auth_only.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->auth_only.auth_ofs +
+ job->auth_only.auth_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_chain(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t min_ofs = RTE_MIN(job->chain.cipher_ofs, job->chain.auth_ofs);
+ uint32_t max_len = RTE_MAX(job->chain.cipher_len, job->chain.auth_len);
+ rte_iova_t auth_iova_end;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr =
+ req->comn_mid.dest_data_addr = job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length = min_ofs + max_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->chain.cipher_ofs,
+ job->chain.cipher_len);
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->chain.auth_ofs,
+ job->chain.auth_len, job->chain.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ uint32_t len = job->chain.auth_ofs +
+ job->chain.auth_len;
+ struct rte_crypto_vec *vec = job->sgl->vec;
+ int auth_end_get = 0;
+ while (len) {
+ if (len <= vec->len) {
+ auth_iova_end = vec->iova + len;
+ auth_end_get = 1;
+ break;
+ }
+ len -= vec->len;
+ vec++;
+ }
+ if (!auth_end_get) {
+ QAT_DP_LOG(ERR, "Failed to get auth end");
+ return -1;
+ }
+ } else
+ auth_iova_end = job->data_iova + job->chain.auth_ofs +
+ job->chain.auth_len;
+
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset +
+ cipher_param->cipher_length) &&
+ (job->chain.digest_iova == auth_iova_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset +
+ cipher_param->cipher_length <
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length)
+ req->comn_mid.dst_length =
+ req->comn_mid.src_length =
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header =
+ &req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ min_ofs + max_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+#define get_rx_queue_message_at_index(q, h, i) \
+ (void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
+ q->modulo_mask))
+
+static __rte_always_inline int
+qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
+{
+ return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status);
+}
+
+static __rte_always_inline int
+qat_sym_query_processed_jobs(void *qat_sym_qp, uint32_t nb_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head = (rx_queue->head + (nb_jobs - 1) * rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG)
+ return 0;
+
+ return 1;
+}
+
+static __rte_always_inline void *
+qat_sym_job_dequeue_one(void *qat_sym_qp, uint64_t *drv_data, uint64_t flags,
+ int *is_op_success)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head;
+ void *opaque;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ *is_op_success = 0;
+ return NULL;
+ }
+
+ if (unlikely(qat_is_rx_msg_ok(resp) == 0))
+ *is_op_success = -1;
+ else
+ *is_op_success = 1;
+
+ opaque = (void *)(uintptr_t)resp->opaque_data;
+
+ rx_queue->head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ rx_queue->nb_processed_responses++;
+ qp->dequeued++;
+ qp->stats.dequeued_count++;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+
+ return opaque;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_job_dequeue_n(void *qat_sym_qp, uint64_t *drv_data,
+ void *user_data, rte_crpyto_hw_user_post_deq_cb_fn cb,
+ uint32_t n, uint64_t flags, uint32_t *n_failed_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head, i;
+ uint32_t status, total_fail = 0;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ for (i = 0; i < n; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_EXHAUST)
+ break;
+ return -i;
+ }
+
+ status = qat_is_rx_msg_ok(resp);
+ total_fail += status;
+ cb(user_data, i, status);
+
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ }
+
+ rx_queue->head = head;
+ rx_queue->nb_processed_responses += i;
+ qp->dequeued += i;
+ qp->stats.dequeued_count += i;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+ *n_failed_jobs = total_fail;
+
+ return i;
+}
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp->service_type != QAT_SERVICE_SYMMETRIC)
+ return -EINVAL;
+
+ hw_ops->qp = (void *)qp;
+ hw_ops->enqueue_aead = qat_sym_job_enqueue_aead;
+ hw_ops->enqueue_cipher = qat_sym_job_enqueue_cipher;
+ hw_ops->enqueue_auth = qat_sym_job_enqueue_auth;
+ hw_ops->enqueue_chain = qat_sym_job_enqueue_chain;
+ hw_ops->dequeue_one = qat_sym_job_dequeue_one;
+ hw_ops->dequeue_many = qat_sym_job_dequeue_n;
+ hw_ops->query_processed = qat_sym_query_processed_jobs;
+
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_sym_job.h b/drivers/crypto/qat/qat_sym_job.h
new file mode 100644
index 000000000..b11aeb841
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_FRAME_H_
+#define _QAT_SYM_FRAME_H_
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops);
+
+#endif /* _QAT_SYM_FRAME_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index e887c880f..be9d73c0a 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -13,6 +13,7 @@
#include "qat_sym.h"
#include "qat_sym_session.h"
#include "qat_sym_pmd.h"
+#include "qat_sym_job.h"
#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
@@ -234,7 +235,8 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
/* Crypto related operations */
.sym_session_get_size = qat_sym_session_get_private_size,
.sym_session_configure = qat_sym_session_configure,
- .sym_session_clear = qat_sym_session_clear
+ .sym_session_clear = qat_sym_session_clear,
+ .sym_get_hw_ops = qat_sym_get_ops,
};
static uint16_t
@@ -308,7 +310,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
- RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
+ RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
+ RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API;
internals = cryptodev->data->dev_private;
internals->qat_dev = qat_pci_dev;
--
2.20.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct data-path APIs
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: " Fan Zhang
@ 2020-07-03 10:14 ` Fan Zhang
0 siblings, 0 replies; 6+ messages in thread
From: Fan Zhang @ 2020-07-03 10:14 UTC (permalink / raw)
To: dev; +Cc: fiona.trahe, akhil.goyal, thomas, jerinjacobk, Fan Zhang
This patch add symmetric crypto data-path APIs support to QAT-SYM PMD.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
drivers/common/qat/Makefile | 2 +
drivers/common/qat/qat_qp.c | 4 +-
drivers/common/qat/qat_qp.h | 3 +
drivers/crypto/qat/meson.build | 1 +
drivers/crypto/qat/qat_sym.c | 1 -
drivers/crypto/qat/qat_sym_job.c | 661 +++++++++++++++++++++++++++++++
drivers/crypto/qat/qat_sym_job.h | 12 +
drivers/crypto/qat/qat_sym_pmd.c | 7 +-
8 files changed, 686 insertions(+), 5 deletions(-)
create mode 100644 drivers/crypto/qat/qat_sym_job.c
create mode 100644 drivers/crypto/qat/qat_sym_job.h
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
index 28bd5668f..6655fd2bc 100644
--- a/drivers/common/qat/Makefile
+++ b/drivers/common/qat/Makefile
@@ -39,6 +39,8 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
SRCS-y += qat_sym.c
SRCS-y += qat_sym_session.c
SRCS-y += qat_sym_pmd.c
+ SRCS-y += qat_sym_job.c
+
build_qat = yes
endif
endif
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 8e6dd04eb..06e2d8c8a 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -547,8 +547,8 @@ txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
q->csr_tail = q->tail;
}
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
{
uint32_t old_head, new_head;
uint32_t max_head;
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index 575d69059..8add1b049 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -116,4 +116,7 @@ qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
void *op_cookie __rte_unused,
uint64_t *dequeue_err_count __rte_unused);
+void
+rxq_free_desc(struct qat_qp *qp, struct qat_queue *q);
+
#endif /* _QAT_QP_H_ */
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index fc65923a7..8a3921293 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -13,6 +13,7 @@ if dep.found()
qat_sources += files('qat_sym_pmd.c',
'qat_sym.c',
'qat_sym_session.c',
+ 'qat_sym_job.c',
'qat_asym_pmd.c',
'qat_asym.c')
qat_ext_deps += dep
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 25b6dd5f4..609180d3f 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -336,7 +336,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
set_cipher_iv(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, qat_req);
-
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
diff --git a/drivers/crypto/qat/qat_sym_job.c b/drivers/crypto/qat/qat_sym_job.c
new file mode 100644
index 000000000..7c0913459
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.c
@@ -0,0 +1,661 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2019 Intel Corporation
+ */
+
+#include <rte_cryptodev_pmd.h>
+
+#include "adf_transport_access_macros.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#include "qat_sym.h"
+#include "qat_sym_pmd.h"
+#include "qat_sym_session.h"
+#include "qat_qp.h"
+#include "qat_sym_job.h"
+
+static __rte_always_inline int
+qat_sym_frame_fill_sgl(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
+ struct rte_crypto_sgl *sgl, uint32_t max_len)
+{
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_op_cookie *cookie;
+ struct qat_sgl *list;
+ int64_t len = max_len;
+ uint32_t i;
+
+ if (!sgl)
+ return -EINVAL;
+ if (sgl->num < 2 || sgl->num > QAT_SYM_SGL_MAX_NUMBER || !sgl->vec)
+ return -EINVAL;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
+ list = (struct qat_sgl *)&cookie->qat_sgl_src;
+
+ for (i = 0; i < sgl->num && len > 0; i++) {
+ list->buffers[i].len = RTE_MIN(sgl->vec[i].len, len);
+ list->buffers[i].resrvd = 0;
+ list->buffers[i].addr = sgl->vec[i].iova;
+ len -= list->buffers[i].len;
+ }
+
+ if (unlikely(len > 0))
+ return -1;
+
+ list->num_bufs = i;
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ req->comn_mid.src_length = req->comn_mid.dst_length = 0;
+ return 0;
+}
+
+static __rte_always_inline void
+qat_sym_set_cipher_param(struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ uint32_t cipher_ofs, uint32_t cipher_len)
+{
+ cipher_param->cipher_offset = cipher_ofs;
+ cipher_param->cipher_length = cipher_len;
+}
+
+static __rte_always_inline void
+qat_sym_set_auth_param(struct icp_qat_fw_la_auth_req_params *auth_param,
+ uint32_t auth_ofs, uint32_t auth_len,
+ rte_iova_t digest_iova, rte_iova_t aad_iova)
+{
+ auth_param->auth_off = auth_ofs;
+ auth_param->auth_len = auth_len;
+ auth_param->auth_res_addr = digest_iova;
+ auth_param->u1.aad_adr = aad_iova;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_aead(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ register struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len;
+ uint8_t aad_len_field_sz;
+ uint32_t msg_len_be;
+ rte_iova_t aad_iova;
+ uint8_t q;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->aead.aead_ofs + job->aead.aead_len;
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ aad_iova = job->aead.aad_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ aad_data = job->aead.aad;
+ aad_iova = job->aead.aad_iova;
+ aad_ccm_real_len = 0;
+ aad_len_field_sz = 0;
+ msg_len_be = rte_bswap32(job->aead.aead_len);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ aad_data = job->iv;
+ aad_iova = job->iv_iova;
+ }
+
+ q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+ rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx], 0, pad_len);
+ }
+
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+ + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(job->aead.aad +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ job->iv + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ ctx->cipher_iv.length);
+
+ }
+ break;
+ default:
+ return -1;
+ }
+
+ qat_sym_set_cipher_param(cipher_param, job->aead.aead_ofs,
+ job->aead.aead_len);
+ qat_sym_set_auth_param(auth_param, job->aead.aead_ofs,
+ job->aead.aead_len, job->aead.tag_iova, aad_iova);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->aead.aead_ofs + job->aead.aead_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ if (ctx->is_single_pass) {
+ cipher_param->spc_aad_addr = aad_iova;
+ cipher_param->spc_auth_res_addr = job->aead.tag_iova;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+
+static __rte_always_inline int
+qat_sym_job_enqueue_cipher(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->cipher_only.cipher_ofs,
+ job->cipher_only.cipher_len);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->cipher_only.cipher_ofs +
+ job->cipher_only.cipher_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_auth(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ auth_param = (void *)((uint8_t *)&req->serv_specif_rqpars +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
+ job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length =
+ job->auth_only.auth_ofs + job->auth_only.auth_len;
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->auth_only.auth_ofs,
+ job->auth_only.auth_len, job->auth_only.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ job->auth_only.auth_ofs +
+ job->auth_only.auth_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+static __rte_always_inline int
+qat_sym_job_enqueue_chain(void *qat_sym_qp,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_crypto_sym_job *job, void *opaque, uint64_t *drv_data,
+ uint64_t flags)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *tx_queue = &qp->tx_q;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_bulk_req *req;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ uint32_t min_ofs = RTE_MIN(job->chain.cipher_ofs, job->chain.auth_ofs);
+ uint32_t max_len = RTE_MAX(job->chain.cipher_len, job->chain.auth_len);
+ rte_iova_t auth_iova_end;
+ uint32_t t;
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ session, cryptodev_qat_driver_id);
+ if (unlikely(ctx->bpi_ctx)) {
+ QAT_DP_LOG(ERR, "DOCSIS is not supported");
+ return -1;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_START) != 0)) {
+ t = tx_queue->tail;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ req->comn_mid.opaque_data = (uintptr_t)opaque;
+ } else {
+ t = (uint32_t)*drv_data;
+ req = (struct icp_qat_fw_la_bulk_req *)(
+ (uint8_t *)tx_queue->base_addr + t);
+ rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+ }
+
+ cipher_param = (void *)&req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param +
+ ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+
+ req->comn_mid.src_data_addr =
+ req->comn_mid.dest_data_addr = job->data_iova;
+ req->comn_mid.src_length = req->comn_mid.dst_length = min_ofs + max_len;
+
+ /* cipher IV */
+ rte_memcpy_generic(cipher_param->u.cipher_IV_array,
+ job->iv, ctx->cipher_iv.length);
+ qat_sym_set_cipher_param(cipher_param, job->chain.cipher_ofs,
+ job->chain.cipher_len);
+
+ /* auth */
+ qat_sym_set_auth_param(auth_param, job->chain.auth_ofs,
+ job->chain.auth_len, job->chain.digest_iova, 0);
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ auth_param->u1.aad_adr = job->iv_iova;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ uint32_t len = job->chain.auth_ofs +
+ job->chain.auth_len;
+ struct rte_crypto_vec *vec = job->sgl->vec;
+ int auth_end_get = 0;
+ while (len) {
+ if (len <= vec->len) {
+ auth_iova_end = vec->iova + len;
+ auth_end_get = 1;
+ break;
+ }
+ len -= vec->len;
+ vec++;
+ }
+ if (!auth_end_get) {
+ QAT_DP_LOG(ERR, "Failed to get auth end");
+ return -1;
+ }
+ } else
+ auth_iova_end = job->data_iova + job->chain.auth_ofs +
+ job->chain.auth_len;
+
+ /* Then check if digest-encrypted conditions are met */
+ if ((auth_param->auth_off + auth_param->auth_len <
+ cipher_param->cipher_offset +
+ cipher_param->cipher_length) &&
+ (job->chain.digest_iova == auth_iova_end)) {
+ /* Handle partial digest encryption */
+ if (cipher_param->cipher_offset +
+ cipher_param->cipher_length <
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length)
+ req->comn_mid.dst_length =
+ req->comn_mid.src_length =
+ auth_param->auth_off +
+ auth_param->auth_len +
+ ctx->digest_length;
+ struct icp_qat_fw_comn_req_hdr *header =
+ &req->comn_hdr;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ QAT_DP_LOG(ERR, "GMAC as chained auth algo is not supported");
+ return -1;
+ default:
+ break;
+ }
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_IS_SGL) != 0)) {
+ int ret = qat_sym_frame_fill_sgl(qp, req, job->sgl,
+ min_ofs + max_len);
+ if (unlikely(ret < 0))
+ return -1;
+ }
+
+ qp->enqueued++;
+ qp->stats.enqueued_count++;
+
+ if (unlikely((flags & RTE_CRYPTO_HW_ENQ_FLAG_END) != 0)) {
+ tx_queue->tail = (t + tx_queue->msg_size) &
+ tx_queue->modulo_mask;
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
+ tx_queue->hw_bundle_number,
+ tx_queue->hw_queue_number,
+ tx_queue->tail);
+ tx_queue->csr_tail = tx_queue->tail;
+ } else
+ *drv_data = (t + tx_queue->msg_size) & tx_queue->modulo_mask;
+
+ return 0;
+}
+
+#define get_rx_queue_message_at_index(q, h, i) \
+ (void *)((uint8_t *)q->base_addr + ((h + q->msg_size * (i)) & \
+ q->modulo_mask))
+
+static __rte_always_inline int
+qat_is_rx_msg_ok(struct icp_qat_fw_comn_resp *resp_msg)
+{
+ return ICP_QAT_FW_COMN_STATUS_FLAG_OK ==
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status);
+}
+
+static __rte_always_inline int
+qat_sym_query_processed_jobs(void *qat_sym_qp, uint32_t nb_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head = (rx_queue->head + (nb_jobs - 1) * rx_queue->msg_size) &
+ rx_queue->modulo_mask;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+ if (*(uint32_t *)resp == ADF_RING_EMPTY_SIG)
+ return 0;
+
+ return 1;
+}
+
+static __rte_always_inline void *
+qat_sym_job_dequeue_one(void *qat_sym_qp, uint64_t *drv_data, uint64_t flags,
+ int *is_op_success)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head;
+ void *opaque;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ *is_op_success = 0;
+ return NULL;
+ }
+
+ if (unlikely(qat_is_rx_msg_ok(resp) == 0))
+ *is_op_success = -1;
+ else
+ *is_op_success = 1;
+
+ opaque = (void *)(uintptr_t)resp->opaque_data;
+
+ rx_queue->head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ rx_queue->nb_processed_responses++;
+ qp->dequeued++;
+ qp->stats.dequeued_count++;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+
+ return opaque;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_job_dequeue_n(void *qat_sym_qp, uint64_t *drv_data,
+ void *user_data, rte_crpyto_hw_user_post_deq_cb_fn cb,
+ uint32_t n, uint64_t flags, uint32_t *n_failed_jobs)
+{
+ struct qat_qp *qp = qat_sym_qp;
+ struct qat_queue *rx_queue = &qp->rx_q;
+ struct icp_qat_fw_comn_resp *resp;
+ uint32_t head, i;
+ uint32_t status, total_fail = 0;
+
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_START)
+ head = rx_queue->head;
+ else
+ head = (uint32_t)*drv_data;
+
+ for (i = 0; i < n; i++) {
+ resp = (struct icp_qat_fw_comn_resp *)(
+ (uint8_t *)rx_queue->base_addr + head);
+
+ if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG)) {
+ if (flags & RTE_CRYPTO_HW_DEQ_FLAG_EXHAUST)
+ break;
+ return -i;
+ }
+
+ status = qat_is_rx_msg_ok(resp);
+ total_fail += status;
+ cb(user_data, i, status);
+
+ head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
+ }
+
+ rx_queue->head = head;
+ rx_queue->nb_processed_responses += i;
+ qp->dequeued += i;
+ qp->stats.dequeued_count += i;
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(qp, rx_queue);
+ *n_failed_jobs = total_fail;
+
+ return i;
+}
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops)
+{
+ struct qat_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp->service_type != QAT_SERVICE_SYMMETRIC)
+ return -EINVAL;
+
+ hw_ops->qp = (void *)qp;
+ hw_ops->enqueue_aead = qat_sym_job_enqueue_aead;
+ hw_ops->enqueue_cipher = qat_sym_job_enqueue_cipher;
+ hw_ops->enqueue_auth = qat_sym_job_enqueue_auth;
+ hw_ops->enqueue_chain = qat_sym_job_enqueue_chain;
+ hw_ops->dequeue_one = qat_sym_job_dequeue_one;
+ hw_ops->dequeue_many = qat_sym_job_dequeue_n;
+ hw_ops->query_processed = qat_sym_query_processed_jobs;
+
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_sym_job.h b/drivers/crypto/qat/qat_sym_job.h
new file mode 100644
index 000000000..b11aeb841
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_job.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_FRAME_H_
+#define _QAT_SYM_FRAME_H_
+
+int
+qat_sym_get_ops(struct rte_cryptodev *dev,
+ uint16_t qp_id, struct rte_crypto_hw_ops *hw_ops);
+
+#endif /* _QAT_SYM_FRAME_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index e887c880f..be9d73c0a 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -13,6 +13,7 @@
#include "qat_sym.h"
#include "qat_sym_session.h"
#include "qat_sym_pmd.h"
+#include "qat_sym_job.h"
#define MIXED_CRYPTO_MIN_FW_VER 0x04090000
@@ -234,7 +235,8 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
/* Crypto related operations */
.sym_session_get_size = qat_sym_session_get_private_size,
.sym_session_configure = qat_sym_session_configure,
- .sym_session_clear = qat_sym_session_clear
+ .sym_session_clear = qat_sym_session_clear,
+ .sym_get_hw_ops = qat_sym_get_ops,
};
static uint16_t
@@ -308,7 +310,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
- RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
+ RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
+ RTE_CRYPTODEV_FF_SYM_HW_DIRECT_API;
internals = cryptodev->data->dev_private;
internals->qat_dev = qat_pci_dev;
--
2.20.1
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2020-07-03 11:09 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-03 10:12 [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 2/3] test/crypto: add unit-test for cryptodev direct APIs Fan Zhang
2020-07-03 10:12 ` [dpdk-dev] [dpdk-dev v3 3/3] doc: add cryptodev direct APIs guide Fan Zhang
-- strict thread matches above, loose matches on Subject: below --
2020-06-25 13:31 [dpdk-dev] [dpdk-dev v2 0/3] crypto/qat: add symmetric crypto data-path APIs Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 0/3] cryptodev: " Fan Zhang
2020-07-03 10:14 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 0/4] cryptodev: add symmetric crypto " Fan Zhang
2020-07-03 11:09 ` [dpdk-dev] [dpdk-dev v3 1/3] crypto/qat: add support to direct " Fan Zhang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).