DPDK patches and discussions
 help / color / mirror / Atom feed
From: Arek Kusztal <arkadiuszx.kusztal@intel.com>
To: dev@dpdk.org
Cc: gakhil@marvell.com, fiona.trahe@intel.com, roy.fan.zhang@intel.com
Subject: [dpdk-dev] [PATCH 13/15] crypto/qat: update raw dp api
Date: Mon, 31 May 2021 15:10:25 +0100	[thread overview]
Message-ID: <20210531141027.13289-14-arkadiuszx.kusztal@intel.com> (raw)
In-Reply-To: <20210531141027.13289-1-arkadiuszx.kusztal@intel.com>

From: Fan Zhang <roy.fan.zhang@intel.com>

This commit updates the QAT raw data-path API to support the
changes made to device and sessions. The QAT RAW data-path API
now works on Generation 1-3 devices.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/qat/qat_sym_hw_dp.c | 419 +++++++++++++++--------------
 1 file changed, 216 insertions(+), 203 deletions(-)

diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
index 2f64de44a1..4305579b54 100644
--- a/drivers/crypto/qat/qat_sym_hw_dp.c
+++ b/drivers/crypto/qat/qat_sym_hw_dp.c
@@ -101,204 +101,6 @@ qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
 #define QAT_SYM_DP_GET_MAX_ENQ(q, c, n) \
 	RTE_MIN((q->max_inflights - q->enqueued + q->dequeued - c), n)
 
-static __rte_always_inline void
-enqueue_one_aead_job(struct qat_sym_session *ctx,
-	struct icp_qat_fw_la_bulk_req *req,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	union rte_crypto_sym_ofs ofs, uint32_t data_len)
-{
-	struct icp_qat_fw_la_cipher_req_params *cipher_param =
-		(void *)&req->serv_specif_rqpars;
-	struct icp_qat_fw_la_auth_req_params *auth_param =
-		(void *)((uint8_t *)&req->serv_specif_rqpars +
-		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
-	uint8_t *aad_data;
-	uint8_t aad_ccm_real_len;
-	uint8_t aad_len_field_sz;
-	uint32_t msg_len_be;
-	rte_iova_t aad_iova = 0;
-	uint8_t q;
-
-	switch (ctx->qat_hash_alg) {
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
-	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
-		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
-			req->comn_hdr.serv_specif_flags,
-				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
-				ctx->cipher_iv.length);
-		aad_iova = aad->iova;
-		break;
-	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
-		aad_data = aad->va;
-		aad_iova = aad->iova;
-		aad_ccm_real_len = 0;
-		aad_len_field_sz = 0;
-		msg_len_be = rte_bswap32((uint32_t)data_len -
-				ofs.ofs.cipher.head);
-
-		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
-			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
-			aad_ccm_real_len = ctx->aad_len -
-				ICP_QAT_HW_CCM_AAD_B0_LEN -
-				ICP_QAT_HW_CCM_AAD_LEN_INFO;
-		} else {
-			aad_data = iv->va;
-			aad_iova = iv->iova;
-		}
-
-		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
-			aad_len_field_sz, ctx->digest_length, q);
-		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
-				(uint8_t *)&msg_len_be,
-				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
-		} else {
-			memcpy(aad_data	+ ctx->cipher_iv.length +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-				(uint8_t *)&msg_len_be +
-				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
-				- q), q);
-		}
-
-		if (aad_len_field_sz > 0) {
-			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
-				rte_bswap16(aad_ccm_real_len);
-
-			if ((aad_ccm_real_len + aad_len_field_sz)
-				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
-				uint8_t pad_len = 0;
-				uint8_t pad_idx = 0;
-
-				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
-					((aad_ccm_real_len +
-					aad_len_field_sz) %
-					ICP_QAT_HW_CCM_AAD_B0_LEN);
-				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
-					aad_ccm_real_len +
-					aad_len_field_sz;
-				memset(&aad_data[pad_idx], 0, pad_len);
-			}
-		}
-
-		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
-			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va +
-			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
-		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
-			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
-		rte_memcpy((uint8_t *)aad->va +
-				ICP_QAT_HW_CCM_NONCE_OFFSET,
-			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
-			ctx->cipher_iv.length);
-		break;
-	default:
-		break;
-	}
-
-	cipher_param->cipher_offset = ofs.ofs.cipher.head;
-	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
-			ofs.ofs.cipher.tail;
-	auth_param->auth_off = ofs.ofs.cipher.head;
-	auth_param->auth_len = cipher_param->cipher_length;
-	auth_param->auth_res_addr = digest->iova;
-	auth_param->u1.aad_adr = aad_iova;
-
-	if (ctx->is_single_pass) {
-		cipher_param->spc_aad_addr = aad_iova;
-		cipher_param->spc_auth_res_addr = digest->iova;
-	}
-}
-
-static __rte_always_inline int
-qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_vec *data, uint16_t n_data_vecs,
-	union rte_crypto_sym_ofs ofs,
-	struct rte_crypto_va_iova_ptr *iv,
-	struct rte_crypto_va_iova_ptr *digest,
-	struct rte_crypto_va_iova_ptr *aad,
-	void *user_data)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-	uint32_t tail = dp_ctx->tail;
-
-	req = (struct icp_qat_fw_la_bulk_req *)(
-		(uint8_t *)tx_queue->base_addr + tail);
-	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
-	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
-	if (unlikely(data_len < 0))
-		return -1;
-	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
-
-	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
-		(uint32_t)data_len);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue++;
-
-	return 0;
-}
-
-static __rte_always_inline uint32_t
-qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
-	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
-	void *user_data[], int *status)
-{
-	struct qat_qp *qp = qp_data;
-	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
-	struct qat_queue *tx_queue = &qp->tx_q;
-	struct qat_sym_session *ctx = dp_ctx->session;
-	uint32_t i, n;
-	uint32_t tail;
-	struct icp_qat_fw_la_bulk_req *req;
-	int32_t data_len;
-
-	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
-	if (unlikely(n == 0)) {
-		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
-		*status = 0;
-		return 0;
-	}
-
-	tail = dp_ctx->tail;
-
-	for (i = 0; i < n; i++) {
-		req  = (struct icp_qat_fw_la_bulk_req *)(
-			(uint8_t *)tx_queue->base_addr + tail);
-		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
-
-		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
-			vec->sgl[i].num);
-		if (unlikely(data_len < 0))
-			break;
-		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
-		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
-			&vec->aad[i], ofs, (uint32_t)data_len);
-		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
-	}
-
-	if (unlikely(i < n))
-		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
-
-	dp_ctx->tail = tail;
-	dp_ctx->cached_enqueue += i;
-	*status = 0;
-	return i;
-}
-
 static __rte_always_inline void
 enqueue_one_cipher_job(struct qat_sym_session *ctx,
 	struct icp_qat_fw_la_bulk_req *req,
@@ -704,6 +506,207 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t *drv_ctx,
 	return i;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_la_cipher_req_params *cipher_param =
+		(void *)&req->serv_specif_rqpars;
+	struct icp_qat_fw_la_auth_req_params *auth_param =
+		(void *)((uint8_t *)&req->serv_specif_rqpars +
+		ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
+	uint8_t *aad_data;
+	uint8_t aad_ccm_real_len;
+	uint8_t aad_len_field_sz;
+	uint32_t msg_len_be;
+	rte_iova_t aad_iova = 0;
+	uint8_t q;
+
+	/* CPM 1.7 uses single pass to treat AEAD as cipher operation */
+	if (ctx->is_single_pass) {
+		enqueue_one_cipher_job(ctx, req, iv, ofs, data_len);
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+		return;
+	}
+
+	switch (ctx->qat_hash_alg) {
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+	case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+		ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+				ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+		rte_memcpy(cipher_param->u.cipher_IV_array, iv->va,
+				ctx->cipher_iv.length);
+		aad_iova = aad->iova;
+		break;
+	case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+		aad_data = aad->va;
+		aad_iova = aad->iova;
+		aad_ccm_real_len = 0;
+		aad_len_field_sz = 0;
+		msg_len_be = rte_bswap32((uint32_t)data_len -
+				ofs.ofs.cipher.head);
+
+		if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+			aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+			aad_ccm_real_len = ctx->aad_len -
+				ICP_QAT_HW_CCM_AAD_B0_LEN -
+				ICP_QAT_HW_CCM_AAD_LEN_INFO;
+		} else {
+			aad_data = iv->va;
+			aad_iova = iv->iova;
+		}
+
+		q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+		aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+			aad_len_field_sz, ctx->digest_length, q);
+		if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+				(uint8_t *)&msg_len_be,
+				ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+		} else {
+			memcpy(aad_data	+ ctx->cipher_iv.length +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+				(uint8_t *)&msg_len_be +
+				(ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+				- q), q);
+		}
+
+		if (aad_len_field_sz > 0) {
+			*(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
+				rte_bswap16(aad_ccm_real_len);
+
+			if ((aad_ccm_real_len + aad_len_field_sz)
+				% ICP_QAT_HW_CCM_AAD_B0_LEN) {
+				uint8_t pad_len = 0;
+				uint8_t pad_idx = 0;
+
+				pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+					((aad_ccm_real_len +
+					aad_len_field_sz) %
+					ICP_QAT_HW_CCM_AAD_B0_LEN);
+				pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+					aad_ccm_real_len +
+					aad_len_field_sz;
+				memset(&aad_data[pad_idx], 0, pad_len);
+			}
+		}
+
+		rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
+			+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va +
+			ICP_QAT_HW_CCM_NONCE_OFFSET, ctx->cipher_iv.length);
+		*(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+			q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+		rte_memcpy((uint8_t *)aad->va +
+				ICP_QAT_HW_CCM_NONCE_OFFSET,
+			(uint8_t *)iv->va + ICP_QAT_HW_CCM_NONCE_OFFSET,
+			ctx->cipher_iv.length);
+		break;
+	default:
+		break;
+	}
+
+	cipher_param->cipher_offset = ofs.ofs.cipher.head;
+	cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+			ofs.ofs.cipher.tail;
+	auth_param->auth_off = ofs.ofs.cipher.head;
+	auth_param->auth_len = cipher_param->cipher_length;
+	auth_param->auth_res_addr = digest->iova;
+	auth_param->u1.aad_adr = aad_iova;
+}
+
+static __rte_always_inline int
+qat_sym_dp_enqueue_single_aead(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_vec *data, uint16_t n_data_vecs,
+	union rte_crypto_sym_ofs ofs,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	void *user_data)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+	uint32_t tail = dp_ctx->tail;
+
+	req = (struct icp_qat_fw_la_bulk_req *)(
+		(uint8_t *)tx_queue->base_addr + tail);
+	tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+	rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
+	data_len = qat_sym_dp_parse_data_vec(qp, req, data, n_data_vecs);
+	if (unlikely(data_len < 0))
+		return -1;
+	req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data;
+
+	enqueue_one_aead_job(ctx, req, iv, digest, aad, ofs,
+		(uint32_t)data_len);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue++;
+
+	return 0;
+}
+
+static __rte_always_inline uint32_t
+qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t *drv_ctx,
+	struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
+	void *user_data[], int *status)
+{
+	struct qat_qp *qp = qp_data;
+	struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
+	struct qat_queue *tx_queue = &qp->tx_q;
+	struct qat_sym_session *ctx = dp_ctx->session;
+	uint32_t i, n;
+	uint32_t tail;
+	struct icp_qat_fw_la_bulk_req *req;
+	int32_t data_len;
+
+	n = QAT_SYM_DP_GET_MAX_ENQ(qp, dp_ctx->cached_enqueue, vec->num);
+	if (unlikely(n == 0)) {
+		qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
+		*status = 0;
+		return 0;
+	}
+
+	tail = dp_ctx->tail;
+
+	for (i = 0; i < n; i++) {
+		req  = (struct icp_qat_fw_la_bulk_req *)(
+			(uint8_t *)tx_queue->base_addr + tail);
+		rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+		data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
+			vec->sgl[i].num);
+		if (unlikely(data_len < 0))
+			break;
+		req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
+		enqueue_one_aead_job(ctx, req, &vec->iv[i], &vec->digest[i],
+			&vec->aad[i], ofs, (uint32_t)data_len);
+		tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
+	}
+
+	if (unlikely(i < n))
+		qat_sym_dp_fill_vec_status(vec->status + i, -1, n - i);
+
+	dp_ctx->tail = tail;
+	dp_ctx->cached_enqueue += i;
+	*status = 0;
+	return i;
+}
+
 static __rte_always_inline uint32_t
 qat_sym_dp_dequeue_burst(void *qp_data, uint8_t *drv_ctx,
 	rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
@@ -937,8 +940,9 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 	raw_dp_ctx->dequeue = qat_sym_dp_dequeue;
 	raw_dp_ctx->dequeue_done = qat_sym_dp_update_head;
 
-	if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
-			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
 		/* AES-GCM or AES-CCM */
 		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
 			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
@@ -954,12 +958,21 @@ qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
 					qat_sym_dp_enqueue_chain_jobs;
 			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_chain;
 		}
-	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
 		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_auth_jobs;
 		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_auth;
 	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
-		raw_dp_ctx->enqueue_burst = qat_sym_dp_enqueue_cipher_jobs;
-		raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
+		if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
+			ctx->qat_cipher_alg ==
+				ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_aead_jobs;
+			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_aead;
+		} else {
+			raw_dp_ctx->enqueue_burst =
+					qat_sym_dp_enqueue_cipher_jobs;
+			raw_dp_ctx->enqueue = qat_sym_dp_enqueue_single_cipher;
+		}
 	} else
 		return -1;
 
-- 
2.25.1


  parent reply	other threads:[~2021-05-31 14:12 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-31 14:10 [dpdk-dev] [PATCH 00/15] Add support for fourth generation of Intel QuickAssist Technology devices Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 01/15] common/qat: rework qp per service function Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 02/15] crypto/qat: add fourth generation qat devices support Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 03/15] crypto/qat: enable gen4 legacy algorithms Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 04/15] crypto/qat: add fourth generation ucs slice type, add ctr mode Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 05/15] crypto/qat: rename content descriptor functions Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 06/15] crypto/qat: add legacy gcm and ccm Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 07/15] crypto/qat: rework init common header function Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 08/15] crypto/qat: add aes gcm in ucs spc mode Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 09/15] crypto/qat: add chacha-poly " Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 10/15] crypto/qat: add gmac in legacy mode on fourth generation Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 11/15] common/qat: add pf2vf communication in qat Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 12/15] common/qat: reset ring pairs before setting pmd Arek Kusztal
2021-05-31 14:10 ` Arek Kusztal [this message]
2021-05-31 14:10 ` [dpdk-dev] [PATCH 14/15] crypto/qat: enable RAW API on QAT GEN1-3 only Arek Kusztal
2021-05-31 14:10 ` [dpdk-dev] [PATCH 15/15] test/crypto: check if RAW API is supported Arek Kusztal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210531141027.13289-14-arkadiuszx.kusztal@intel.com \
    --to=arkadiuszx.kusztal@intel.com \
    --cc=dev@dpdk.org \
    --cc=fiona.trahe@intel.com \
    --cc=gakhil@marvell.com \
    --cc=roy.fan.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).