* [dpdk-dev] [PATCH 2/3] common/cpt: redesign propagation of error
2019-03-01 18:42 [dpdk-dev] [PATCH 1/3] common/cpt: improve structures used in datapath Anoob Joseph
@ 2019-03-01 18:42 ` Anoob Joseph
2019-03-01 18:42 ` [dpdk-dev] [PATCH 3/3] crypto/octeontx: move device specific code to driver Anoob Joseph
2019-03-19 4:36 ` [dpdk-dev] [PATCH 1/3] common/cpt: improve structures used in datapath Anoob Joseph
2 siblings, 0 replies; 7+ messages in thread
From: Anoob Joseph @ 2019-03-01 18:42 UTC (permalink / raw)
To: Akhil Goyal, Pablo de Lara
Cc: Anoob Joseph, Jerin Jacob Kollanukkaran,
Narayana Prasad Raju Athreya, Suheil Chandran, Ankur Dwivedi,
dev, Archana Muniganti
The check for prep_req is good enough to flag error. The return var
passed around is redundant. Fixing this. Also making the functions
return correct error values in case of various failures.
In addition, adding unlikely flag for all error checks.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
---
drivers/common/cpt/cpt_request_mgr.h | 12 +-
drivers/common/cpt/cpt_ucode.h | 318 ++++++++++++++++++++---------------
2 files changed, 189 insertions(+), 141 deletions(-)
diff --git a/drivers/common/cpt/cpt_request_mgr.h b/drivers/common/cpt/cpt_request_mgr.h
index 4463cfb..81bddf4 100644
--- a/drivers/common/cpt/cpt_request_mgr.h
+++ b/drivers/common/cpt/cpt_request_mgr.h
@@ -111,17 +111,17 @@ cpt_pmd_crypto_operation(struct cpt_instance *instance,
cpt_op = sess->cpt_op;
- mdata = &(cptvf->meta_info);
-
if (likely(cpt_op & CPT_OP_CIPHER_MASK))
- prep_req = fill_fc_params(op, sess, &mdata, &ret);
+ ret = fill_fc_params(op, sess, &cptvf->meta_info, &mdata,
+ &prep_req);
else
- prep_req = fill_digest_params(op, sess, &mdata, &ret);
+ ret = fill_digest_params(op, sess, &cptvf->meta_info,
+ &mdata, &prep_req);
- if (unlikely(!prep_req)) {
+ if (unlikely(ret)) {
CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
"ret 0x%x", op, (unsigned int)cpt_op, ret);
- goto req_fail;
+ return ret;
}
/* Enqueue prepared instruction to HW */
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
index 5933ea7..37c74f9 100644
--- a/drivers/common/cpt/cpt_ucode.h
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -458,7 +458,7 @@ fill_sg_comp_from_iov(sg_comp_t *list,
return (uint32_t)i;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_digest_gen_prep(uint32_t flags,
uint64_t d_lens,
digest_params_t *params,
@@ -483,18 +483,9 @@ cpt_digest_gen_prep(uint32_t flags,
uint64_t c_dma, m_dma;
opcode_info_t opcode;
- if (!params || !params->ctx_buf.vaddr)
- return ERR_BAD_INPUT_ARG;
-
ctx = params->ctx_buf.vaddr;
meta_p = ¶ms->meta_buf;
- if (!meta_p->vaddr || !meta_p->dma_addr)
- return ERR_BAD_INPUT_ARG;
-
- if (meta_p->size < sizeof(struct cpt_request_info))
- return ERR_BAD_INPUT_ARG;
-
m_vaddr = meta_p->vaddr;
m_dma = meta_p->dma_addr;
m_size = meta_p->size;
@@ -582,10 +573,10 @@ cpt_digest_gen_prep(uint32_t flags,
if (size) {
i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov,
0, &size, NULL, 0);
- if (size) {
+ if (unlikely(size)) {
CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
" by %dB", size);
- return ERR_BAD_INPUT_ARG;
+ return;
}
} else {
/*
@@ -605,8 +596,10 @@ cpt_digest_gen_prep(uint32_t flags,
scatter_comp = (sg_comp_t *)((uint8_t *)gather_comp + g_size_bytes);
if (flags & VALID_MAC_BUF) {
- if (params->mac_buf.size < mac_len)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(params->mac_buf.size < mac_len)) {
+ CPT_LOG_DP_ERR("Insufficient MAC size");
+ return;
+ }
size = mac_len;
i = fill_sg_comp_from_buf_min(scatter_comp, i,
@@ -616,10 +609,10 @@ cpt_digest_gen_prep(uint32_t flags,
i = fill_sg_comp_from_iov(scatter_comp, i,
params->src_iov, data_len,
&size, NULL, 0);
- if (size) {
- CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short by"
- " %dB", size);
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
+ " %dB", size);
+ return;
}
}
@@ -663,10 +656,10 @@ cpt_digest_gen_prep(uint32_t flags,
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_enc_hmac_prep(uint32_t flags,
uint64_t d_offs,
uint64_t d_lens,
@@ -922,7 +915,7 @@ cpt_enc_hmac_prep(uint32_t flags,
if (unlikely(size)) {
CPT_LOG_DP_ERR("Insufficient buffer space,"
" size %d needed", size);
- return ERR_BAD_INPUT_ARG;
+ return;
}
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
@@ -964,8 +957,12 @@ cpt_enc_hmac_prep(uint32_t flags,
aad_buf,
aad_offset);
}
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return;
+ }
}
/* mac_data */
if (mac_len) {
@@ -998,7 +995,7 @@ cpt_enc_hmac_prep(uint32_t flags,
CPT_LOG_DP_ERR("Insufficient buffer"
" space, size %d needed",
size);
- return ERR_BAD_INPUT_ARG;
+ return;
}
}
}
@@ -1048,10 +1045,10 @@ cpt_enc_hmac_prep(uint32_t flags,
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_dec_hmac_prep(uint32_t flags,
uint64_t d_offs,
uint64_t d_lens,
@@ -1303,8 +1300,12 @@ cpt_dec_hmac_prep(uint32_t flags,
aad_buf,
aad_offset);
}
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return;
+ }
}
/* mac data */
@@ -1325,8 +1326,10 @@ cpt_dec_hmac_prep(uint32_t flags,
uint32_t aad_offset = aad_len ?
passthrough_len : 0;
- if (!fc_params->src_iov)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(!fc_params->src_iov)) {
+ CPT_LOG_DP_ERR("Bad input args");
+ return;
+ }
i = fill_sg_comp_from_iov(
gather_comp, i,
@@ -1336,8 +1339,12 @@ cpt_dec_hmac_prep(uint32_t flags,
aad_offset);
}
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return;
+ }
}
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
@@ -1370,8 +1377,10 @@ cpt_dec_hmac_prep(uint32_t flags,
uint32_t aad_offset = aad_len ?
passthrough_len : 0;
- if (!fc_params->dst_iov)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(!fc_params->dst_iov)) {
+ CPT_LOG_DP_ERR("Bad input args");
+ return;
+ }
i = fill_sg_comp_from_iov(scatter_comp, i,
fc_params->dst_iov, 0,
@@ -1379,8 +1388,11 @@ cpt_dec_hmac_prep(uint32_t flags,
aad_offset);
}
- if (unlikely(size))
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
@@ -1430,10 +1442,10 @@ cpt_dec_hmac_prep(uint32_t flags,
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
uint64_t d_offs,
uint64_t d_lens,
@@ -1654,8 +1666,11 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
i = fill_sg_comp_from_iov(gather_comp, i,
params->src_iov,
0, &size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
@@ -1686,8 +1701,11 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
/* mac data */
@@ -1703,8 +1721,11 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
}
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
@@ -1752,10 +1773,10 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
uint64_t d_offs,
uint64_t d_lens,
@@ -1946,8 +1967,11 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
i = fill_sg_comp_from_iov(gather_comp, i,
params->src_iov,
0, &size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
@@ -1972,8 +1996,11 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
@@ -2020,10 +2047,10 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_kasumi_enc_prep(uint32_t req_flags,
uint64_t d_offs,
uint64_t d_lens,
@@ -2171,8 +2198,11 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
params->src_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
@@ -2204,8 +2234,11 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
/* mac data */
@@ -2221,8 +2254,11 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
}
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
@@ -2269,10 +2305,10 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
-static __rte_always_inline int
+static __rte_always_inline void
cpt_kasumi_dec_prep(uint64_t d_offs,
uint64_t d_lens,
fc_params_t *params,
@@ -2393,8 +2429,11 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
i = fill_sg_comp_from_iov(gather_comp, i,
params->src_iov,
0, &size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
g_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
@@ -2417,8 +2456,11 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
i = fill_sg_comp_from_iov(scatter_comp, i,
params->dst_iov, 0,
&size, NULL, 0);
- if (size)
- return ERR_BAD_INPUT_ARG;
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed", size);
+ return;
+ }
}
((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
s_size_bytes = ((i + 3) / 4) * sizeof(sg_comp_t);
@@ -2464,7 +2506,7 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
req->op = op;
*prep_req = req;
- return 0;
+ return;
}
static __rte_always_inline void *
@@ -2472,69 +2514,57 @@ cpt_fc_dec_hmac_prep(uint32_t flags,
uint64_t d_offs,
uint64_t d_lens,
fc_params_t *fc_params,
- void *op, int *ret_val)
+ void *op)
{
struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
uint8_t fc_type;
void *prep_req = NULL;
- int ret;
fc_type = ctx->fc_type;
if (likely(fc_type == FC_GEN)) {
- ret = cpt_dec_hmac_prep(flags, d_offs, d_lens,
- fc_params, op, &prep_req);
+ cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
} else if (fc_type == ZUC_SNOW3G) {
- ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens,
- fc_params, op, &prep_req);
+ cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
} else if (fc_type == KASUMI) {
- ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op,
- &prep_req);
- } else {
- /*
- * For AUTH_ONLY case,
- * MC only supports digest generation and verification
- * should be done in software by memcmp()
- */
-
- ret = ERR_EIO;
+ cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, op, &prep_req);
}
- if (unlikely(!prep_req))
- *ret_val = ret;
+ /*
+ * For AUTH_ONLY case,
+ * MC only supports digest generation and verification
+ * should be done in software by memcmp()
+ */
+
return prep_req;
}
static __rte_always_inline void *__hot
cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
- fc_params_t *fc_params, void *op, int *ret_val)
+ fc_params_t *fc_params, void *op)
{
struct cpt_ctx *ctx = fc_params->ctx_buf.vaddr;
uint8_t fc_type;
void *prep_req = NULL;
- int ret;
fc_type = ctx->fc_type;
/* Common api for rest of the ops */
if (likely(fc_type == FC_GEN)) {
- ret = cpt_enc_hmac_prep(flags, d_offs, d_lens,
- fc_params, op, &prep_req);
+ cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
} else if (fc_type == ZUC_SNOW3G) {
- ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens,
- fc_params, op, &prep_req);
+ cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
} else if (fc_type == KASUMI) {
- ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens,
- fc_params, op, &prep_req);
+ cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params, op,
+ &prep_req);
} else if (fc_type == HASH_HMAC) {
- ret = cpt_digest_gen_prep(flags, d_lens, fc_params, op,
- &prep_req);
- } else {
- ret = ERR_EIO;
+ cpt_digest_gen_prep(flags, d_lens, fc_params, op, &prep_req);
}
- if (unlikely(!prep_req))
- *ret_val = ret;
return prep_req;
}
@@ -3114,20 +3144,20 @@ prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
return 0;
}
-static __rte_always_inline void *
+static __rte_always_inline int
fill_fc_params(struct rte_crypto_op *cop,
struct cpt_sess_misc *sess_misc,
+ struct cptvf_meta_info *cpt_m_info,
void **mdata_ptr,
- int *op_ret)
+ void **prep_req)
{
uint32_t space = 0;
struct rte_crypto_sym_op *sym_op = cop->sym;
- void *mdata;
+ void *mdata = NULL;
uintptr_t *op;
uint32_t mc_hash_off;
uint32_t flags = 0;
uint64_t d_offs, d_lens;
- void *prep_req = NULL;
struct rte_mbuf *m_src, *m_dst;
uint8_t cpt_op = sess_misc->cpt_op;
uint8_t zsk_flag = sess_misc->zsk_flag;
@@ -3142,8 +3172,7 @@ fill_fc_params(struct rte_crypto_op *cop,
char src[SRC_IOV_SIZE];
char dst[SRC_IOV_SIZE];
uint32_t iv_buf[4];
- struct cptvf_meta_info *cpt_m_info =
- (struct cptvf_meta_info *)(*mdata_ptr);
+ int ret;
if (likely(sess_misc->iv_length)) {
flags |= VALID_IV_BUF;
@@ -3289,8 +3318,8 @@ fill_fc_params(struct rte_crypto_op *cop,
&fc_params,
&flags))) {
CPT_LOG_DP_ERR("Prepare inplace src iov failed");
- *op_ret = -1;
- return NULL;
+ ret = -EINVAL;
+ goto err_exit;
}
} else {
@@ -3301,8 +3330,8 @@ fill_fc_params(struct rte_crypto_op *cop,
/* Store SG I/O in the api for reuse */
if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
CPT_LOG_DP_ERR("Prepare src iov failed");
- *op_ret = -1;
- return NULL;
+ ret = -EINVAL;
+ goto err_exit;
}
if (unlikely(m_dst != NULL)) {
@@ -3319,14 +3348,16 @@ fill_fc_params(struct rte_crypto_op *cop,
"m_dst %p, need %u"
" more",
m_dst, pkt_len);
- return NULL;
+ ret = -EINVAL;
+ goto err_exit;
}
}
if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
CPT_LOG_DP_ERR("Prepare dst iov failed for "
"m_dst %p", m_dst);
- return NULL;
+ ret = -EINVAL;
+ goto err_exit;
}
} else {
fc_params.dst_iov = (void *)src;
@@ -3346,7 +3377,8 @@ fill_fc_params(struct rte_crypto_op *cop,
if (unlikely(mdata == NULL)) {
CPT_LOG_DP_ERR("Error allocating meta buffer for request");
- return NULL;
+ ret = -ENOMEM;
+ goto err_exit;
}
op = (uintptr_t *)((uintptr_t)mdata & (uintptr_t)~1ull);
@@ -3361,16 +3393,26 @@ fill_fc_params(struct rte_crypto_op *cop,
/* Finally prepare the instruction */
if (cpt_op & CPT_OP_ENCODE)
- prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
- &fc_params, op, op_ret);
+ *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
+ &fc_params, op);
else
- prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
- &fc_params, op, op_ret);
+ *prep_req = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens,
+ &fc_params, op);
+
+ if (unlikely(*prep_req == NULL)) {
+ CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
+ }
- if (unlikely(!prep_req))
- free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
*mdata_ptr = mdata;
- return prep_req;
+
+ return 0;
+
+free_mdata_and_exit:
+ free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
+err_exit:
+ return ret;
}
static __rte_always_inline void
@@ -3476,11 +3518,12 @@ find_kasumif9_direction_and_length(uint8_t *src,
/*
* This handles all auth only except AES_GMAC
*/
-static __rte_always_inline void *
+static __rte_always_inline int
fill_digest_params(struct rte_crypto_op *cop,
struct cpt_sess_misc *sess,
+ struct cptvf_meta_info *cpt_m_info,
void **mdata_ptr,
- int *op_ret)
+ void **prep_req)
{
uint32_t space = 0;
struct rte_crypto_sym_op *sym_op = cop->sym;
@@ -3490,7 +3533,6 @@ fill_digest_params(struct rte_crypto_op *cop,
uint32_t auth_range_off;
uint32_t flags = 0;
uint64_t d_offs = 0, d_lens;
- void *prep_req = NULL;
struct rte_mbuf *m_src, *m_dst;
uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
uint8_t zsk_flag = sess->zsk_flag;
@@ -3498,9 +3540,9 @@ fill_digest_params(struct rte_crypto_op *cop,
fc_params_t params;
char src[SRC_IOV_SIZE];
uint8_t iv_buf[16];
+ int ret;
+
memset(¶ms, 0, sizeof(fc_params_t));
- struct cptvf_meta_info *cpt_m_info =
- (struct cptvf_meta_info *)(*mdata_ptr);
m_src = sym_op->m_src;
@@ -3508,9 +3550,8 @@ fill_digest_params(struct rte_crypto_op *cop,
mdata = alloc_op_meta(NULL, ¶ms.meta_buf, cpt_m_info->cptvf_op_mlen,
cpt_m_info->cptvf_meta_pool);
if (mdata == NULL) {
- CPT_LOG_DP_ERR("Error allocating meta buffer for request");
- *op_ret = -ENOMEM;
- return NULL;
+ ret = -ENOMEM;
+ goto err_exit;
}
mphys = params.meta_buf.dma_addr;
@@ -3597,7 +3638,8 @@ fill_digest_params(struct rte_crypto_op *cop,
if (!rte_pktmbuf_append(m_dst, space)) {
CPT_LOG_DP_ERR("Failed to extend "
"mbuf by %uB", space);
- goto err;
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
}
params.mac_buf.vaddr =
@@ -3626,18 +3668,24 @@ fill_digest_params(struct rte_crypto_op *cop,
/*Store SG I/O in the api for reuse */
if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
CPT_LOG_DP_ERR("Prepare src iov failed");
- *op_ret = -1;
- goto err;
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
+ }
+
+ *prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, op);
+ if (unlikely(*prep_req == NULL)) {
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
}
- prep_req = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens,
- ¶ms, op, op_ret);
*mdata_ptr = mdata;
- return prep_req;
-err:
- if (unlikely(!prep_req))
- free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
- return NULL;
+
+ return 0;
+
+free_mdata_and_exit:
+ free_op_meta(mdata, cpt_m_info->cptvf_meta_pool);
+err_exit:
+ return ret;
}
#endif /*_CPT_UCODE_H_ */
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH 3/3] crypto/octeontx: move device specific code to driver
2019-03-01 18:42 [dpdk-dev] [PATCH 1/3] common/cpt: improve structures used in datapath Anoob Joseph
2019-03-01 18:42 ` [dpdk-dev] [PATCH 2/3] common/cpt: redesign propagation of error Anoob Joseph
@ 2019-03-01 18:42 ` Anoob Joseph
2019-03-19 4:36 ` [dpdk-dev] [PATCH 1/3] common/cpt: improve structures used in datapath Anoob Joseph
2 siblings, 0 replies; 7+ messages in thread
From: Anoob Joseph @ 2019-03-01 18:42 UTC (permalink / raw)
To: Akhil Goyal, Pablo de Lara
Cc: Anoob Joseph, Jerin Jacob Kollanukkaran,
Narayana Prasad Raju Athreya, Suheil Chandran, Ankur Dwivedi,
dev, Archana Muniganti
Moving upper level enqueue/dequeue routines to driver. The h/w interface
used to submit request has enough differences to substantiate the need
for separate routines.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
---
drivers/common/cpt/cpt_common.h | 6 -
drivers/common/cpt/cpt_mcode_defines.h | 12 +
drivers/common/cpt/cpt_request_mgr.h | 185 ---------------
drivers/crypto/octeontx/Makefile | 3 +-
drivers/crypto/octeontx/meson.build | 2 +-
drivers/crypto/octeontx/otx_cryptodev_hw_access.h | 2 +
drivers/crypto/octeontx/otx_cryptodev_ops.c | 262 ++++++++++++++++++++--
7 files changed, 255 insertions(+), 217 deletions(-)
delete mode 100644 drivers/common/cpt/cpt_request_mgr.h
diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index 8568d5b..ceb32f2 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -9,12 +9,6 @@
* This file defines common macros and structs
*/
-/*
- * Macros to determine CPT model. Driver makefile will define CPT_MODEL
- * accordingly
- */
-#define CRYPTO_OCTEONTX 0x1
-
#define TIME_IN_RESET_COUNT 5
/* Default command timeout in seconds */
diff --git a/drivers/common/cpt/cpt_mcode_defines.h b/drivers/common/cpt/cpt_mcode_defines.h
index becc14f..c0adbd5 100644
--- a/drivers/common/cpt/cpt_mcode_defines.h
+++ b/drivers/common/cpt/cpt_mcode_defines.h
@@ -383,4 +383,16 @@ typedef mc_hash_type_t auth_type_t;
#define SESS_PRIV(__sess) \
(void *)((uint8_t *)__sess + sizeof(struct cpt_sess_misc))
+/*
+ * Get the session size
+ *
+ * @return
+ * - session size
+ */
+static __rte_always_inline unsigned int
+cpt_get_session_size(void)
+{
+ unsigned int ctx_len = sizeof(struct cpt_ctx);
+ return (sizeof(struct cpt_sess_misc) + RTE_ALIGN_CEIL(ctx_len, 8));
+}
#endif /* _CPT_MCODE_DEFINES_H_ */
diff --git a/drivers/common/cpt/cpt_request_mgr.h b/drivers/common/cpt/cpt_request_mgr.h
deleted file mode 100644
index 81bddf4..0000000
--- a/drivers/common/cpt/cpt_request_mgr.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018 Cavium, Inc
- */
-
-#ifndef _CPT_REQUEST_MGR_H_
-#define _CPT_REQUEST_MGR_H_
-
-#include <rte_branch_prediction.h>
-#include <rte_cycles.h>
-
-#include "cpt_common.h"
-#include "cpt_mcode_defines.h"
-
-#if CPT_MODEL == CRYPTO_OCTEONTX
-#include "../../crypto/octeontx/otx_cryptodev_hw_access.h"
-#endif
-
-/*
- * This file defines the agreement between the common layer and the individual
- * crypto drivers for OCTEON TX series. Datapath in otx* directory include this
- * file and all these functions are static inlined for better performance.
- *
- */
-
-/*
- * Get the session size
- *
- * This function is used in the data path.
- *
- * @return
- * - session size
- */
-static __rte_always_inline unsigned int
-cpt_get_session_size(void)
-{
- unsigned int ctx_len = sizeof(struct cpt_ctx);
- return (sizeof(struct cpt_sess_misc) + RTE_ALIGN_CEIL(ctx_len, 8));
-}
-
-static __rte_always_inline int32_t __hot
-cpt_enqueue_req(struct cpt_instance *instance, struct pending_queue *pqueue,
- void *req)
-{
- struct cpt_request_info *user_req = (struct cpt_request_info *)req;
- int32_t ret = 0;
-
- if (unlikely(!req))
- return 0;
-
- if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
- return -EAGAIN;
-
- fill_cpt_inst(instance, req);
-
- CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
-
- /* Fill time_out cycles */
- user_req->time_out = rte_get_timer_cycles() +
- DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
- user_req->extra_time = 0;
-
- /* Default mode of software queue */
- mark_cpt_inst(instance);
-
- pqueue->rid_queue[pqueue->enq_tail].rid =
- (uintptr_t)user_req;
- /* We will use soft queue length here to limit
- * requests
- */
- MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
- pqueue->pending_count += 1;
-
- CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
- "op: %p", user_req, user_req->op);
-
- return ret;
-}
-
-static __rte_always_inline int __hot
-cpt_pmd_crypto_operation(struct cpt_instance *instance,
- struct rte_crypto_op *op, struct pending_queue *pqueue,
- uint8_t cpt_driver_id)
-{
- struct cpt_sess_misc *sess = NULL;
- struct rte_crypto_sym_op *sym_op = op->sym;
- void *prep_req = NULL, *mdata = NULL;
- int ret = 0;
- uint64_t cpt_op;
- struct cpt_vf *cptvf = (struct cpt_vf *)instance;
-
- if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- int sess_len;
-
- sess_len = cpt_get_session_size();
-
- sess = rte_calloc(__func__, 1, sess_len, 8);
- if (!sess)
- return -ENOMEM;
-
- sess->ctx_dma_addr = rte_malloc_virt2iova(sess) +
- sizeof(struct cpt_sess_misc);
-
- ret = instance_session_cfg(sym_op->xform, (void *)sess);
- if (unlikely(ret))
- return -EINVAL;
- } else {
- sess = (struct cpt_sess_misc *)
- get_sym_session_private_data(sym_op->session,
- cpt_driver_id);
- }
-
- cpt_op = sess->cpt_op;
-
- if (likely(cpt_op & CPT_OP_CIPHER_MASK))
- ret = fill_fc_params(op, sess, &cptvf->meta_info, &mdata,
- &prep_req);
- else
- ret = fill_digest_params(op, sess, &cptvf->meta_info,
- &mdata, &prep_req);
-
- if (unlikely(ret)) {
- CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
- "ret 0x%x", op, (unsigned int)cpt_op, ret);
- return ret;
- }
-
- /* Enqueue prepared instruction to HW */
- ret = cpt_enqueue_req(instance, pqueue, prep_req);
-
- if (unlikely(ret)) {
- if (unlikely(ret == -EAGAIN))
- goto req_fail;
- CPT_LOG_DP_ERR("Error enqueing crypto request : error "
- "code %d", ret);
- goto req_fail;
- }
-
- return 0;
-
-req_fail:
- if (mdata)
- free_op_meta(mdata, cptvf->meta_info.cptvf_meta_pool);
- return ret;
-}
-
-static __rte_always_inline int32_t __hot
-cpt_dequeue_burst(struct cpt_instance *instance, uint16_t cnt,
- void *resp[], uint8_t cc[], struct pending_queue *pqueue)
-{
- struct cpt_request_info *user_req;
- struct rid *rid_e;
- int i, count, pcount;
- uint8_t ret;
-
- pcount = pqueue->pending_count;
- count = (cnt > pcount) ? pcount : cnt;
-
- for (i = 0; i < count; i++) {
- rid_e = &pqueue->rid_queue[pqueue->deq_head];
- user_req = (struct cpt_request_info *)(rid_e->rid);
-
- if (likely((i+1) < count))
- rte_prefetch_non_temporal((void *)rid_e[1].rid);
-
- ret = check_nb_command_id(user_req, instance);
-
- if (unlikely(ret == ERR_REQ_PENDING)) {
- /* Stop checking for completions */
- break;
- }
-
- /* Return completion code and op handle */
- cc[i] = (uint8_t)ret;
- resp[i] = user_req->op;
- CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
- user_req, user_req->op, ret);
-
- MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
- pqueue->pending_count -= 1;
- }
-
- return i;
-}
-
-#endif /* _CPT_REQUEST_MGR_H_ */
diff --git a/drivers/crypto/octeontx/Makefile b/drivers/crypto/octeontx/Makefile
index d6412d1..2752cbc 100644
--- a/drivers/crypto/octeontx/Makefile
+++ b/drivers/crypto/octeontx/Makefile
@@ -20,8 +20,9 @@ LDLIBS += -lrte_common_cpt
VPATH += $(RTE_SDK)/drivers/crypto/octeontx
-CFLAGS += -O3 -DCPT_MODEL=CRYPTO_OCTEONTX
+CFLAGS += -O3
CFLAGS += -I$(RTE_SDK)/drivers/common/cpt
+CFLAGS += -DALLOW_EXPERIMENTAL_API
# PMD code
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev.c
diff --git a/drivers/crypto/octeontx/meson.build b/drivers/crypto/octeontx/meson.build
index 6511b40..423737e 100644
--- a/drivers/crypto/octeontx/meson.build
+++ b/drivers/crypto/octeontx/meson.build
@@ -8,6 +8,7 @@ deps += ['bus_pci']
deps += ['common_cpt']
name = 'octeontx_crypto'
+allow_experimental_apis = true
sources = files('otx_cryptodev.c',
'otx_cryptodev_capabilities.c',
'otx_cryptodev_hw_access.c',
@@ -15,4 +16,3 @@ sources = files('otx_cryptodev.c',
'otx_cryptodev_ops.c')
includes += include_directories('../../common/cpt')
-cflags += '-DCPT_MODEL=CRYPTO_OCTEONTX'
diff --git a/drivers/crypto/octeontx/otx_cryptodev_hw_access.h b/drivers/crypto/octeontx/otx_cryptodev_hw_access.h
index 82b15ee..dea4cba 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_hw_access.h
+++ b/drivers/crypto/octeontx/otx_cryptodev_hw_access.h
@@ -39,6 +39,8 @@
struct cpt_instance {
uint32_t queue_id;
uintptr_t rsvd;
+ struct rte_mempool *sess_mp;
+ struct rte_mempool *sess_mp_priv;
};
struct command_chunk {
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
index 6a0cf83..0f9f2a2 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -11,7 +11,6 @@
#include "cpt_pmd_logs.h"
#include "cpt_pmd_ops_helper.h"
#include "cpt_ucode.h"
-#include "cpt_request_mgr.h"
#include "otx_cryptodev.h"
#include "otx_cryptodev_capabilities.h"
@@ -222,6 +221,8 @@ otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
}
instance->queue_id = que_pair_id;
+ instance->sess_mp = qp_conf->mp_session;
+ instance->sess_mp_priv = qp_conf->mp_session_private;
dev->data->queue_pairs[que_pair_id] = instance;
return 0;
@@ -340,11 +341,160 @@ otx_cpt_session_clear(struct rte_cryptodev *dev,
}
}
+static __rte_always_inline int32_t __hot
+otx_cpt_request_enqueue(struct cpt_instance *instance,
+ struct pending_queue *pqueue,
+ void *req)
+{
+ struct cpt_request_info *user_req = (struct cpt_request_info *)req;
+
+ if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
+ return -EAGAIN;
+
+ fill_cpt_inst(instance, req);
+
+ CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
+
+ /* Fill time_out cycles */
+ user_req->time_out = rte_get_timer_cycles() +
+ DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
+ user_req->extra_time = 0;
+
+ /* Default mode of software queue */
+ mark_cpt_inst(instance);
+
+ pqueue->rid_queue[pqueue->enq_tail].rid = (uintptr_t)user_req;
+
+ /* We will use soft queue length here to limit requests */
+ MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
+ pqueue->pending_count += 1;
+
+ CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
+ "op: %p", user_req, user_req->op);
+ return 0;
+}
+
+static __rte_always_inline int __hot
+otx_cpt_enq_single_sym(struct cpt_instance *instance,
+ struct rte_crypto_op *op,
+ struct pending_queue *pqueue)
+{
+ struct cpt_sess_misc *sess;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ void *prep_req, *mdata = NULL;
+ int ret = 0;
+ uint64_t cpt_op;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+
+ sess = (struct cpt_sess_misc *)
+ get_sym_session_private_data(sym_op->session,
+ otx_cryptodev_driver_id);
+
+ cpt_op = sess->cpt_op;
+
+ if (likely(cpt_op & CPT_OP_CIPHER_MASK))
+ ret = fill_fc_params(op, sess, &cptvf->meta_info, &mdata,
+ &prep_req);
+ else
+ ret = fill_digest_params(op, sess, &cptvf->meta_info,
+ &mdata, &prep_req);
+
+ if (unlikely(ret)) {
+ CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
+ "ret 0x%x", op, (unsigned int)cpt_op, ret);
+ return ret;
+ }
+
+ /* Enqueue prepared instruction to h/w */
+ ret = otx_cpt_request_enqueue(instance, pqueue, prep_req);
+
+ if (unlikely(ret)) {
+ /* Buffer allocated for request preparation need to be freed */
+ free_op_meta(mdata, cptvf->meta_info.cptvf_meta_pool);
+ return ret;
+ }
+
+ return 0;
+}
+
+static __rte_always_inline int __hot
+otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
+ struct rte_crypto_op *op,
+ struct pending_queue *pqueue)
+{
+ struct cpt_sess_misc *sess;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ int ret;
+ void *sess_t = NULL;
+ void *sess_private_data_t = NULL;
+
+ /* Create tmp session */
+
+ if (rte_mempool_get(instance->sess_mp, (void **)&sess_t)) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (rte_mempool_get(instance->sess_mp_priv,
+ (void **)&sess_private_data_t)) {
+ ret = -ENOMEM;
+ goto free_sess;
+ }
+
+ sess = (struct cpt_sess_misc *)sess_private_data_t;
+
+ sess->ctx_dma_addr = rte_mempool_virt2iova(sess) +
+ sizeof(struct cpt_sess_misc);
+
+ ret = instance_session_cfg(sym_op->xform, (void *)sess);
+ if (unlikely(ret)) {
+ ret = -EINVAL;
+ goto free_sess_priv;
+ }
+
+ /* Save tmp session in op */
+
+ sym_op->session = (struct rte_cryptodev_sym_session *)sess_t;
+ set_sym_session_private_data(sym_op->session, otx_cryptodev_driver_id,
+ sess_private_data_t);
+
+ /* Enqueue op with the tmp session set */
+ ret = otx_cpt_enq_single_sym(instance, op, pqueue);
+
+ if (unlikely(ret))
+ goto free_sess_priv;
+
+ return 0;
+
+free_sess_priv:
+ rte_mempool_put(instance->sess_mp_priv, sess_private_data_t);
+free_sess:
+ rte_mempool_put(instance->sess_mp, sess_t);
+exit:
+ return ret;
+}
+
+static __rte_always_inline int __hot
+otx_cpt_enq_single(struct cpt_instance *inst,
+ struct rte_crypto_op *op,
+ struct pending_queue *pqueue)
+{
+ /* Check for the type */
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ return otx_cpt_enq_single_sym(inst, op, pqueue);
+ else if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS))
+ return otx_cpt_enq_single_sym_sessless(inst, op, pqueue);
+
+ /* Should not reach here */
+ return -EINVAL;
+}
+
static uint16_t
otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct cpt_instance *instance = (struct cpt_instance *)qptr;
- uint16_t count = 0;
+ uint16_t count;
int ret;
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
struct pending_queue *pqueue = &cptvf->pqueue;
@@ -355,8 +505,10 @@ otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
count = 0;
while (likely(count < nb_ops)) {
- ret = cpt_pmd_crypto_operation(instance, ops[count], pqueue,
- otx_cryptodev_driver_id);
+
+ /* Enqueue single op */
+ ret = otx_cpt_enq_single(instance, ops[count], pqueue);
+
if (unlikely(ret))
break;
count++;
@@ -365,48 +517,110 @@ otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return count;
}
+static __rte_always_inline void
+otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
+{
+ /* H/w has returned success */
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Perform further post processing */
+
+ if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ /* Check if auth verify need to be completed */
+ if (unlikely(rsp[2]))
+ compl_auth_verify(cop, (uint8_t *)rsp[2], rsp[3]);
+ return;
+ }
+}
+
static uint16_t
otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct cpt_instance *instance = (struct cpt_instance *)qptr;
+ struct cpt_request_info *user_req;
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct rid *rid_e;
+ uint8_t cc[nb_ops];
+ int i, count, pcount;
+ uint8_t ret;
+ int nb_completed;
struct pending_queue *pqueue = &cptvf->pqueue;
- uint16_t nb_completed, i = 0;
- uint8_t compcode[nb_ops];
+ struct rte_crypto_op *cop;
+ void *metabuf;
+ uintptr_t *rsp;
+
+ pcount = pqueue->pending_count;
+ count = (nb_ops > pcount) ? pcount : nb_ops;
+
+ for (i = 0; i < count; i++) {
+ rid_e = &pqueue->rid_queue[pqueue->deq_head];
+ user_req = (struct cpt_request_info *)(rid_e->rid);
+
+ if (likely((i+1) < count))
+ rte_prefetch_non_temporal((void *)rid_e[1].rid);
+
+ ret = check_nb_command_id(user_req, instance);
+
+ if (unlikely(ret == ERR_REQ_PENDING)) {
+ /* Stop checking for completions */
+ break;
+ }
+
+ /* Return completion code and op handle */
+ cc[i] = ret;
+ ops[i] = user_req->op;
+
+ CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
+ user_req, user_req->op, ret);
- nb_completed = cpt_dequeue_burst(instance, nb_ops,
- (void **)ops, compcode, pqueue);
- while (likely(i < nb_completed)) {
- struct rte_crypto_op *cop;
- void *metabuf;
- uintptr_t *rsp;
- uint8_t status;
+ MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
+ pqueue->pending_count -= 1;
+ }
+
+ nb_completed = i;
+
+ for (i = 0; i < nb_completed; i++) {
rsp = (void *)ops[i];
- status = compcode[i];
+
if (likely((i + 1) < nb_completed))
rte_prefetch0(ops[i+1]);
+
metabuf = (void *)rsp[0];
cop = (void *)rsp[1];
ops[i] = cop;
- if (likely(status == 0)) {
- if (likely(!rsp[2]))
- cop->status =
- RTE_CRYPTO_OP_STATUS_SUCCESS;
- else
- compl_auth_verify(cop, (uint8_t *)rsp[2],
- rsp[3]);
- } else if (status == ERR_GC_ICV_MISCOMPARE) {
- /*auth data mismatch */
+ /* Check completion code */
+
+ if (likely(cc[i] == 0)) {
+ /* H/w success pkt. Post process */
+ otx_cpt_dequeue_post_process(cop, rsp);
+ } else if (cc[i] == ERR_GC_ICV_MISCOMPARE) {
+ /* auth data mismatch */
cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
+ /* Error */
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
+
+ if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ void *sess_private_data_t =
+ get_sym_session_private_data(cop->sym->session,
+ otx_cryptodev_driver_id);
+ memset(sess_private_data_t, 0,
+ cpt_get_session_size());
+ memset(cop->sym->session, 0,
+ rte_cryptodev_sym_get_existing_header_session_size(
+ cop->sym->session));
+ rte_mempool_put(instance->sess_mp_priv,
+ sess_private_data_t);
+ rte_mempool_put(instance->sess_mp, cop->sym->session);
+ cop->sym->session = NULL;
+ }
free_op_meta(metabuf, cptvf->meta_info.cptvf_meta_pool);
- i++;
}
+
return nb_completed;
}
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread