* [dpdk-dev] [PATCH 1/4] common/cpt: prepopulate word7 in sess
2020-11-03 8:37 [dpdk-dev] [PATCH 0/4] code cleanup and improvements Archana Muniganti
@ 2020-11-03 8:37 ` Archana Muniganti
2020-11-03 8:37 ` [dpdk-dev] [PATCH 2/4] common/cpt: remove temporary variable Archana Muniganti
` (3 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Archana Muniganti @ 2020-11-03 8:37 UTC (permalink / raw)
To: akhil.goyal, anoobj, adwivedi; +Cc: Archana Muniganti, dev
CPT inst word7 is an immutable data for a session.
This data can be populated in a session.
Signed-off-by: Archana Muniganti <marchana@marvell.com>
---
drivers/common/cpt/cpt_common.h | 1 -
drivers/common/cpt/cpt_mcode_defines.h | 13 ++-
drivers/common/cpt/cpt_ucode.h | 109 +++++-------------
.../crypto/octeontx/otx_cryptodev_hw_access.h | 4 +-
drivers/crypto/octeontx/otx_cryptodev_ops.c | 29 +++--
drivers/crypto/octeontx2/otx2_cryptodev_ops.c | 46 ++++----
drivers/crypto/octeontx2/otx2_cryptodev_sec.c | 4 +-
drivers/crypto/octeontx2/otx2_cryptodev_sec.h | 2 +-
drivers/crypto/octeontx2/otx2_ipsec_po_ops.h | 2 -
9 files changed, 91 insertions(+), 119 deletions(-)
diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index 1ce28e90b7..eefe2755c1 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -69,7 +69,6 @@ struct cpt_request_info {
uint64_t ei0;
uint64_t ei1;
uint64_t ei2;
- uint64_t ei3;
} ist;
uint8_t *rptr;
const struct otx2_cpt_qp *qp;
diff --git a/drivers/common/cpt/cpt_mcode_defines.h b/drivers/common/cpt/cpt_mcode_defines.h
index 0a05bd5639..846ceb4a02 100644
--- a/drivers/common/cpt/cpt_mcode_defines.h
+++ b/drivers/common/cpt/cpt_mcode_defines.h
@@ -245,8 +245,8 @@ struct cpt_sess_misc {
uint16_t is_null:1;
/** Flag for GMAC */
uint16_t is_gmac:1;
- /** Engine group */
- uint16_t egrp:3;
+ /** Unused field */
+ uint16_t rsvd1:3;
/** AAD length */
uint16_t aad_length;
/** MAC len in bytes */
@@ -255,14 +255,16 @@ struct cpt_sess_misc {
uint8_t iv_length;
/** Auth IV length in bytes */
uint8_t auth_iv_length;
- /** Reserved field */
- uint8_t rsvd1;
+ /** Unused field */
+ uint8_t rsvd2;
/** IV offset in bytes */
uint16_t iv_offset;
/** Auth IV offset in bytes */
uint16_t auth_iv_offset;
/** Salt */
uint32_t salt;
+ /** CPT inst word 7 */
+ uint64_t cpt_inst_w7;
/** Context DMA address */
phys_addr_t ctx_dma_addr;
};
@@ -319,7 +321,7 @@ struct cpt_ctx {
mc_fc_context_t fctx;
mc_zuc_snow3g_ctx_t zs_ctx;
mc_kasumi_ctx_t k_ctx;
- };
+ } mc_ctx;
uint8_t auth_key[1024];
};
@@ -350,6 +352,7 @@ struct cpt_asym_sess_misc {
struct rte_crypto_modex_xform mod_ctx;
struct cpt_asym_ec_ctx ec_ctx;
};
+ uint64_t cpt_inst_w7;
};
/* Buffer pointer */
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
index 5f28bd7591..7d938d0c91 100644
--- a/drivers/common/cpt/cpt_ucode.h
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -77,11 +77,11 @@ cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
}
static __rte_always_inline void
-cpt_fc_salt_update(void *ctx,
+cpt_fc_salt_update(struct cpt_ctx *cpt_ctx,
uint8_t *salt)
{
- struct cpt_ctx *cpt_ctx = ctx;
- memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
+ mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
+ memcpy(fctx->enc.encr_iv, salt, 4);
}
static __rte_always_inline int
@@ -190,10 +190,12 @@ static __rte_always_inline void
cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
+ mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
uint32_t keyx[4];
+
cpt_ctx->snow3g = 1;
gen_key_snow3g(key, keyx);
- memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
+ memcpy(zs_ctx->ci_key, keyx, key_len);
cpt_ctx->zsk_flags = 0;
}
@@ -201,9 +203,11 @@ static __rte_always_inline void
cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
+ mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
+
cpt_ctx->snow3g = 0;
- memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
- memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
+ memcpy(zs_ctx->ci_key, key, key_len);
+ memcpy(zs_ctx->zuc_const, zuc_d, 32);
cpt_ctx->zsk_flags = 0;
}
@@ -211,8 +215,10 @@ static __rte_always_inline void
cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
+ mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
+
cpt_ctx->k_ecb = 1;
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->zsk_flags = 0;
}
@@ -220,16 +226,17 @@ static __rte_always_inline void
cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
{
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
+
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->zsk_flags = 0;
}
static __rte_always_inline int
-cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, const uint8_t *key,
- uint16_t key_len, uint8_t *salt)
+cpt_fc_ciph_set_key(struct cpt_ctx *cpt_ctx, cipher_type_t type,
+ const uint8_t *key, uint16_t key_len, uint8_t *salt)
{
- struct cpt_ctx *cpt_ctx = ctx;
- mc_fc_context_t *fctx = &cpt_ctx->fctx;
+ mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
int ret;
ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
@@ -480,7 +487,6 @@ cpt_digest_gen_prep(uint32_t flags,
uint32_t g_size_bytes, s_size_bytes;
uint64_t dptr_dma, rptr_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr, *m_vaddr;
uint64_t c_dma, m_dma;
opcode_info_t opcode;
@@ -633,9 +639,6 @@ cpt_digest_gen_prep(uint32_t flags,
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@@ -643,7 +646,6 @@ cpt_digest_gen_prep(uint32_t flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@@ -671,9 +673,8 @@ cpt_enc_hmac_prep(uint32_t flags,
uint32_t encr_data_len, auth_data_len, aad_len = 0;
uint32_t passthrough_len = 0;
void *m_vaddr, *offset_vaddr;
- uint64_t m_dma, offset_dma, ctx_dma;
+ uint64_t m_dma, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr;
uint64_t c_dma;
opcode_info_t opcode;
@@ -1003,13 +1004,6 @@ cpt_enc_hmac_prep(uint32_t flags,
req->ist.ei2 = rptr_dma;
}
- ctx_dma = fc_params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, fctx);
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = ctx_dma;
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@@ -1017,7 +1011,6 @@ cpt_enc_hmac_prep(uint32_t flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@@ -1044,10 +1037,9 @@ cpt_dec_hmac_prep(uint32_t flags,
uint32_t encr_data_len, auth_data_len, aad_len = 0;
uint32_t passthrough_len = 0;
void *m_vaddr, *offset_vaddr;
- uint64_t m_dma, offset_dma, ctx_dma;
+ uint64_t m_dma, offset_dma;
opcode_info_t opcode;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr;
uint64_t c_dma;
@@ -1388,13 +1380,6 @@ cpt_dec_hmac_prep(uint32_t flags,
req->ist.ei2 = rptr_dma;
}
- ctx_dma = fc_params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, fctx);
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = ctx_dma;
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@@ -1402,7 +1387,6 @@ cpt_dec_hmac_prep(uint32_t flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@@ -1433,7 +1417,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
uint64_t *offset_vaddr, offset_dma;
uint32_t *iv_s, iv[4];
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
buf_p = ¶ms->meta_buf;
@@ -1710,12 +1693,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
req->ist.ei2 = rptr_dma;
}
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, zs_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@@ -1723,7 +1700,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@@ -1753,7 +1729,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
uint64_t *offset_vaddr, offset_dma;
uint32_t *iv_s, iv[4], j;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
buf_p = ¶ms->meta_buf;
@@ -1974,12 +1949,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
req->ist.ei2 = rptr_dma;
}
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, zs_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@@ -1987,7 +1956,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@@ -2019,7 +1987,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
uint8_t *in_buffer;
uint32_t g_size_bytes, s_size_bytes;
@@ -2221,12 +2188,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, k_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@@ -2234,7 +2195,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@@ -2263,7 +2223,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- vq_cmd_word3_t vq_cmd_w3;
opcode_info_t opcode;
uint8_t *in_buffer;
uint32_t g_size_bytes, s_size_bytes;
@@ -2411,12 +2370,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* vq command w3 */
- vq_cmd_w3.u64 = 0;
- vq_cmd_w3.s.grp = 0;
- vq_cmd_w3.s.cptr = params->ctx_buf.dma_addr +
- offsetof(struct cpt_ctx, k_ctx);
-
/* 16 byte aligned cpt res address */
req->completion_addr = (uint64_t *)((uint8_t *)c_vaddr);
*req->completion_addr = COMPLETION_CODE_INIT;
@@ -2424,7 +2377,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
/* Fill microcode part of instruction */
req->ist.ei0 = vq_cmd_w0.u64;
- req->ist.ei3 = vq_cmd_w3.u64;
req->op = op;
@@ -2492,11 +2444,12 @@ cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
}
static __rte_always_inline int
-cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
- uint16_t key_len, uint16_t mac_len)
+cpt_fc_auth_set_key(struct cpt_ctx *cpt_ctx, auth_type_t type,
+ const uint8_t *key, uint16_t key_len, uint16_t mac_len)
{
- struct cpt_ctx *cpt_ctx = ctx;
- mc_fc_context_t *fctx = &cpt_ctx->fctx;
+ mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
+ mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
+ mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
if ((type >= ZUC_EIA3) && (type <= KASUMI_F9_ECB)) {
uint32_t keyx[4];
@@ -2511,26 +2464,26 @@ cpt_fc_auth_set_key(void *ctx, auth_type_t type, const uint8_t *key,
case SNOW3G_UIA2:
cpt_ctx->snow3g = 1;
gen_key_snow3g(key, keyx);
- memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
+ memcpy(zs_ctx->ci_key, keyx, key_len);
cpt_ctx->fc_type = ZUC_SNOW3G;
cpt_ctx->zsk_flags = 0x1;
break;
case ZUC_EIA3:
cpt_ctx->snow3g = 0;
- memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
- memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
+ memcpy(zs_ctx->ci_key, key, key_len);
+ memcpy(zs_ctx->zuc_const, zuc_d, 32);
cpt_ctx->fc_type = ZUC_SNOW3G;
cpt_ctx->zsk_flags = 0x1;
break;
case KASUMI_F9_ECB:
/* Kasumi ECB mode */
cpt_ctx->k_ecb = 1;
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->fc_type = KASUMI;
cpt_ctx->zsk_flags = 0x1;
break;
case KASUMI_F9_CBC:
- memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+ memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->fc_type = KASUMI;
cpt_ctx->zsk_flags = 0x1;
break;
diff --git a/drivers/crypto/octeontx/otx_cryptodev_hw_access.h b/drivers/crypto/octeontx/otx_cryptodev_hw_access.h
index 063c6367fc..0ec258157a 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_hw_access.h
+++ b/drivers/crypto/octeontx/otx_cryptodev_hw_access.h
@@ -210,7 +210,7 @@ get_cpt_inst(struct command_queue *cqueue)
}
static __rte_always_inline void
-fill_cpt_inst(struct cpt_instance *instance, void *req)
+fill_cpt_inst(struct cpt_instance *instance, void *req, uint64_t ucmd_w3)
{
struct command_queue *cqueue;
cpt_inst_s_t *cpt_ist_p;
@@ -237,7 +237,7 @@ fill_cpt_inst(struct cpt_instance *instance, void *req)
/* MC EI2 */
cpt_ist_p->s8x.ei2 = user_req->ist.ei2;
/* MC EI3 */
- cpt_ist_p->s8x.ei3 = user_req->ist.ei3;
+ cpt_ist_p->s8x.ei3 = ucmd_w3;
}
static __rte_always_inline void
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
index 14f22e3011..8fc1fa3695 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -241,6 +241,7 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
{
struct rte_crypto_sym_xform *temp_xform = xform;
struct cpt_sess_misc *misc;
+ vq_cmd_word3_t vq_cmd_w3;
void *priv;
int ret;
@@ -254,7 +255,7 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
}
memset(priv, 0, sizeof(struct cpt_sess_misc) +
- offsetof(struct cpt_ctx, fctx));
+ offsetof(struct cpt_ctx, mc_ctx));
misc = priv;
@@ -292,6 +293,13 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
sizeof(struct cpt_sess_misc);
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = 0;
+ vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
+ mc_ctx);
+
+ misc->cpt_inst_w7 = vq_cmd_w3.u64;
+
return 0;
priv_put:
@@ -372,6 +380,8 @@ otx_cpt_asym_session_cfg(struct rte_cryptodev *dev,
return ret;
}
+ priv->cpt_inst_w7 = 0;
+
set_asym_session_private_data(sess, dev->driver_id, priv);
return 0;
}
@@ -401,14 +411,14 @@ otx_cpt_asym_session_clear(struct rte_cryptodev *dev,
static __rte_always_inline int32_t __rte_hot
otx_cpt_request_enqueue(struct cpt_instance *instance,
struct pending_queue *pqueue,
- void *req)
+ void *req, uint64_t cpt_inst_w7)
{
struct cpt_request_info *user_req = (struct cpt_request_info *)req;
if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
return -EAGAIN;
- fill_cpt_inst(instance, req);
+ fill_cpt_inst(instance, req, cpt_inst_w7);
CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
@@ -496,7 +506,8 @@ otx_cpt_enq_single_asym(struct cpt_instance *instance,
goto req_fail;
}
- ret = otx_cpt_request_enqueue(instance, pqueue, params.req);
+ ret = otx_cpt_request_enqueue(instance, pqueue, params.req,
+ sess->cpt_inst_w7);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("Could not enqueue crypto req");
@@ -518,7 +529,8 @@ otx_cpt_enq_single_sym(struct cpt_instance *instance,
{
struct cpt_sess_misc *sess;
struct rte_crypto_sym_op *sym_op = op->sym;
- void *prep_req, *mdata = NULL;
+ struct cpt_request_info *prep_req;
+ void *mdata = NULL;
int ret = 0;
uint64_t cpt_op;
@@ -530,10 +542,10 @@ otx_cpt_enq_single_sym(struct cpt_instance *instance,
if (likely(cpt_op & CPT_OP_CIPHER_MASK))
ret = fill_fc_params(op, sess, &instance->meta_info, &mdata,
- &prep_req);
+ (void **)&prep_req);
else
ret = fill_digest_params(op, sess, &instance->meta_info,
- &mdata, &prep_req);
+ &mdata, (void **)&prep_req);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
@@ -542,7 +554,8 @@ otx_cpt_enq_single_sym(struct cpt_instance *instance,
}
/* Enqueue prepared instruction to h/w */
- ret = otx_cpt_request_enqueue(instance, pqueue, prep_req);
+ ret = otx_cpt_request_enqueue(instance, pqueue, prep_req,
+ sess->cpt_inst_w7);
if (unlikely(ret)) {
/* Buffer allocated for request preparation need to be freed */
diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
index 00bc5c0ab0..c7feb6f9e2 100644
--- a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
+++ b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
@@ -356,6 +356,7 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
{
struct rte_crypto_sym_xform *temp_xform = xform;
struct cpt_sess_misc *misc;
+ vq_cmd_word3_t vq_cmd_w3;
void *priv;
int ret;
@@ -369,7 +370,7 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
}
memset(priv, 0, sizeof(struct cpt_sess_misc) +
- offsetof(struct cpt_ctx, fctx));
+ offsetof(struct cpt_ctx, mc_ctx));
misc = priv;
@@ -407,15 +408,21 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
sizeof(struct cpt_sess_misc);
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
+ mc_ctx);
+
/*
* IE engines support IPsec operations
* SE engines support IPsec operations, Chacha-Poly and
* Air-Crypto operations
*/
if (misc->zsk_flag || misc->chacha_poly)
- misc->egrp = OTX2_CPT_EGRP_SE;
+ vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE;
else
- misc->egrp = OTX2_CPT_EGRP_SE_IE;
+ vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE_IE;
+
+ misc->cpt_inst_w7 = vq_cmd_w3.u64;
return 0;
@@ -428,7 +435,8 @@ sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
static __rte_always_inline void __rte_hot
otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
struct cpt_request_info *req,
- void *lmtline)
+ void *lmtline,
+ uint64_t cpt_inst_w7)
{
union cpt_inst_s inst;
uint64_t lmt_status;
@@ -441,7 +449,7 @@ otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
inst.s9x.ei0 = req->ist.ei0;
inst.s9x.ei1 = req->ist.ei1;
inst.s9x.ei2 = req->ist.ei2;
- inst.s9x.ei3 = req->ist.ei3;
+ inst.s9x.ei3 = cpt_inst_w7;
inst.s9x.qord = 1;
inst.s9x.grp = qp->ev.queue_id;
@@ -470,14 +478,15 @@ otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
static __rte_always_inline int32_t __rte_hot
otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
struct pending_queue *pend_q,
- struct cpt_request_info *req)
+ struct cpt_request_info *req,
+ uint64_t cpt_inst_w7)
{
void *lmtline = qp->lmtline;
union cpt_inst_s inst;
uint64_t lmt_status;
if (qp->ca_enable) {
- otx2_ca_enqueue_req(qp, req, lmtline);
+ otx2_ca_enqueue_req(qp, req, lmtline, cpt_inst_w7);
return 0;
}
@@ -492,7 +501,7 @@ otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
inst.s9x.ei0 = req->ist.ei0;
inst.s9x.ei1 = req->ist.ei1;
inst.s9x.ei2 = req->ist.ei2;
- inst.s9x.ei3 = req->ist.ei3;
+ inst.s9x.ei3 = cpt_inst_w7;
req->time_out = rte_get_timer_cycles() +
DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
@@ -529,7 +538,6 @@ otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
struct rte_crypto_asym_op *asym_op = op->asym;
struct asym_op_params params = {0};
struct cpt_asym_sess_misc *sess;
- vq_cmd_word3_t *w3;
uintptr_t *cop;
void *mdata;
int ret;
@@ -584,11 +592,7 @@ otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
goto req_fail;
}
- /* Set engine group of AE */
- w3 = (vq_cmd_word3_t *)¶ms.req->ist.ei3;
- w3->s.grp = OTX2_CPT_EGRP_AE;
-
- ret = otx2_cpt_enqueue_req(qp, pend_q, params.req);
+ ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, sess->cpt_inst_w7);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("Could not enqueue crypto req");
@@ -610,7 +614,6 @@ otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
struct rte_crypto_sym_op *sym_op = op->sym;
struct cpt_request_info *req;
struct cpt_sess_misc *sess;
- vq_cmd_word3_t *w3;
uint64_t cpt_op;
void *mdata;
int ret;
@@ -633,10 +636,7 @@ otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
return ret;
}
- w3 = ((vq_cmd_word3_t *)(&req->ist.ei3));
- w3->s.grp = sess->egrp;
-
- ret = otx2_cpt_enqueue_req(qp, pend_q, req);
+ ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
if (unlikely(ret)) {
/* Free buffer allocated by fill params routines */
@@ -671,7 +671,7 @@ otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
return ret;
}
- ret = otx2_cpt_enqueue_req(qp, pend_q, req);
+ ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
return ret;
}
@@ -1266,6 +1266,7 @@ otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
struct rte_mempool *pool)
{
struct cpt_asym_sess_misc *priv;
+ vq_cmd_word3_t vq_cmd_w3;
int ret;
CPT_PMD_INIT_FUNC_TRACE();
@@ -1286,7 +1287,12 @@ otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
return ret;
}
+ vq_cmd_w3.u64 = 0;
+ vq_cmd_w3.s.grp = OTX2_CPT_EGRP_AE;
+ priv->cpt_inst_w7 = vq_cmd_w3.u64;
+
set_asym_session_private_data(sess, dev->driver_id, priv);
+
return 0;
}
diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_sec.c b/drivers/crypto/octeontx2/otx2_cryptodev_sec.c
index 5d8766682a..b0664578be 100644
--- a/drivers/crypto/octeontx2/otx2_cryptodev_sec.c
+++ b/drivers/crypto/octeontx2/otx2_cryptodev_sec.c
@@ -323,7 +323,7 @@ crypto_sec_ipsec_outb_session_create(struct rte_cryptodev *crypto_dev,
inst.egrp = OTX2_CPT_EGRP_SE;
inst.cptr = rte_mempool_virt2iova(sa);
- lp->ucmd_w3 = inst.u64[7];
+ lp->cpt_inst_w7 = inst.u64[7];
lp->ucmd_opcode = (lp->ctx_len << 8) |
(OTX2_IPSEC_PO_PROCESS_IPSEC_OUTB);
@@ -407,7 +407,7 @@ crypto_sec_ipsec_inb_session_create(struct rte_cryptodev *crypto_dev,
inst.egrp = OTX2_CPT_EGRP_SE;
inst.cptr = rte_mempool_virt2iova(sa);
- lp->ucmd_w3 = inst.u64[7];
+ lp->cpt_inst_w7 = inst.u64[7];
lp->ucmd_opcode = (lp->ctx_len << 8) |
(OTX2_IPSEC_PO_PROCESS_IPSEC_INB);
diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_sec.h b/drivers/crypto/octeontx2/otx2_cryptodev_sec.h
index b4a39d2fe4..2849c1ab75 100644
--- a/drivers/crypto/octeontx2/otx2_cryptodev_sec.h
+++ b/drivers/crypto/octeontx2/otx2_cryptodev_sec.h
@@ -18,7 +18,7 @@ struct otx2_sec_session_ipsec_lp {
struct otx2_ipsec_po_out_sa out_sa;
};
- uint64_t ucmd_w3;
+ uint64_t cpt_inst_w7;
union {
uint64_t ucmd_w0;
struct {
diff --git a/drivers/crypto/octeontx2/otx2_ipsec_po_ops.h b/drivers/crypto/octeontx2/otx2_ipsec_po_ops.h
index 5dd0b391a9..bc702d5c79 100644
--- a/drivers/crypto/octeontx2/otx2_ipsec_po_ops.h
+++ b/drivers/crypto/octeontx2/otx2_ipsec_po_ops.h
@@ -123,7 +123,6 @@ process_outb_sa(struct rte_crypto_op *cop,
req->ist.ei0 = word0.u64;
req->ist.ei1 = rte_pktmbuf_iova(m_src);
req->ist.ei2 = req->ist.ei1;
- req->ist.ei3 = sess->ucmd_w3;
hdr->seq = rte_cpu_to_be_32(sess->seq_lo);
hdr->ip_id = rte_cpu_to_be_32(sess->ip_id);
@@ -170,7 +169,6 @@ process_inb_sa(struct rte_crypto_op *cop,
req->ist.ei0 = word0.u64;
req->ist.ei1 = rte_pktmbuf_iova(m_src);
req->ist.ei2 = req->ist.ei1;
- req->ist.ei3 = sess->ucmd_w3;
exit:
*prep_req = req;
--
2.22.0
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH 2/4] common/cpt: remove temporary variable
2020-11-03 8:37 [dpdk-dev] [PATCH 0/4] code cleanup and improvements Archana Muniganti
2020-11-03 8:37 ` [dpdk-dev] [PATCH 1/4] common/cpt: prepopulate word7 in sess Archana Muniganti
@ 2020-11-03 8:37 ` Archana Muniganti
2020-11-03 8:37 ` [dpdk-dev] [PATCH 3/4] common/cpt: use predefined macros Archana Muniganti
` (2 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Archana Muniganti @ 2020-11-03 8:37 UTC (permalink / raw)
To: akhil.goyal, anoobj, adwivedi; +Cc: Archana Muniganti, dev
Remove temporary variable used in datapath.
Signed-off-by: Archana Muniganti <marchana@marvell.com>
---
drivers/common/cpt/cpt_hw_types.h | 10 ++-
drivers/common/cpt/cpt_mcode_defines.h | 8 --
drivers/common/cpt/cpt_ucode.h | 79 ++++++-------------
drivers/common/cpt/cpt_ucode_asym.h | 46 ++++-------
drivers/crypto/octeontx2/otx2_cryptodev_ops.c | 2 +-
5 files changed, 48 insertions(+), 97 deletions(-)
diff --git a/drivers/common/cpt/cpt_hw_types.h b/drivers/common/cpt/cpt_hw_types.h
index e2b127de41..a1f969eb14 100644
--- a/drivers/common/cpt/cpt_hw_types.h
+++ b/drivers/common/cpt/cpt_hw_types.h
@@ -31,7 +31,10 @@ typedef union {
uint64_t u64;
struct {
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
- uint16_t opcode;
+ struct {
+ uint8_t minor;
+ uint8_t major;
+ } opcode;
uint16_t param1;
uint16_t param2;
uint16_t dlen;
@@ -39,7 +42,10 @@ typedef union {
uint16_t dlen;
uint16_t param2;
uint16_t param1;
- uint16_t opcode;
+ struct {
+ uint8_t major;
+ uint8_t minor;
+ } opcode;
#endif
} s;
} vq_cmd_word0_t;
diff --git a/drivers/common/cpt/cpt_mcode_defines.h b/drivers/common/cpt/cpt_mcode_defines.h
index 846ceb4a02..56a745f419 100644
--- a/drivers/common/cpt/cpt_mcode_defines.h
+++ b/drivers/common/cpt/cpt_mcode_defines.h
@@ -369,14 +369,6 @@ typedef struct{
buf_ptr_t bufs[0];
} iov_ptr_t;
-typedef union opcode_info {
- uint16_t flags;
- struct {
- uint8_t major;
- uint8_t minor;
- } s;
-} opcode_info_t;
-
typedef struct fc_params {
/* 0th cache line */
union {
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
index 7d938d0c91..664a04e1a0 100644
--- a/drivers/common/cpt/cpt_ucode.h
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -489,7 +489,6 @@ cpt_digest_gen_prep(uint32_t flags,
vq_cmd_word0_t vq_cmd_w0;
void *c_vaddr, *m_vaddr;
uint64_t c_dma, m_dma;
- opcode_info_t opcode;
ctx = params->ctx_buf.vaddr;
meta_p = ¶ms->meta_buf;
@@ -524,31 +523,27 @@ cpt_digest_gen_prep(uint32_t flags,
data_len = AUTH_DLEN(d_lens);
/*GP op header */
- vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.opcode.minor = 0;
vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
if (ctx->hmac) {
- opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
vq_cmd_w0.s.param1 = key_len;
vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
} else {
- opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
vq_cmd_w0.s.param1 = 0;
vq_cmd_w0.s.dlen = data_len;
}
- opcode.s.minor = 0;
-
/* Null auth only case enters the if */
if (unlikely(!hash_type && !ctx->enc_cipher)) {
- opcode.s.major = CPT_MAJOR_OP_MISC;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_MISC;
/* Minor op is passthrough */
- opcode.s.minor = 0x03;
+ vq_cmd_w0.s.opcode.minor = 0x03;
/* Send out completion code only */
vq_cmd_w0.s.param2 = 0x1;
}
- vq_cmd_w0.s.opcode = opcode.flags;
-
/* DPTR has SG list */
in_buffer = m_vaddr;
dptr_dma = m_dma;
@@ -677,7 +672,6 @@ cpt_enc_hmac_prep(uint32_t flags,
vq_cmd_word0_t vq_cmd_w0;
void *c_vaddr;
uint64_t c_dma;
- opcode_info_t opcode;
meta_p = &fc_params->meta_buf;
m_vaddr = meta_p->vaddr;
@@ -756,8 +750,8 @@ cpt_enc_hmac_prep(uint32_t flags,
}
/* Encryption */
- opcode.s.major = CPT_MAJOR_OP_FC;
- opcode.s.minor = 0;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
+ vq_cmd_w0.s.opcode.minor = 0;
if (hash_type == GMAC_TYPE) {
encr_offset = 0;
@@ -783,7 +777,6 @@ cpt_enc_hmac_prep(uint32_t flags,
}
/* GP op header */
- vq_cmd_w0.u64 = 0;
vq_cmd_w0.s.param1 = encr_data_len;
vq_cmd_w0.s.param2 = auth_data_len;
/*
@@ -814,8 +807,6 @@ cpt_enc_hmac_prep(uint32_t flags,
vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
- vq_cmd_w0.s.opcode = opcode.flags;
-
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
+ OFF_CTRL_LEN);
@@ -844,9 +835,7 @@ cpt_enc_hmac_prep(uint32_t flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- opcode.s.major |= CPT_DMA_MODE;
-
- vq_cmd_w0.s.opcode = opcode.flags;
+ vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
@@ -1038,7 +1027,6 @@ cpt_dec_hmac_prep(uint32_t flags,
uint32_t passthrough_len = 0;
void *m_vaddr, *offset_vaddr;
uint64_t m_dma, offset_dma;
- opcode_info_t opcode;
vq_cmd_word0_t vq_cmd_w0;
void *c_vaddr;
uint64_t c_dma;
@@ -1120,8 +1108,8 @@ cpt_dec_hmac_prep(uint32_t flags,
m_dma += size;
/* Decryption */
- opcode.s.major = CPT_MAJOR_OP_FC;
- opcode.s.minor = 1;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
+ vq_cmd_w0.s.opcode.minor = 1;
if (hash_type == GMAC_TYPE) {
encr_offset = 0;
@@ -1139,7 +1127,6 @@ cpt_dec_hmac_prep(uint32_t flags,
outputlen = enc_dlen;
}
- vq_cmd_w0.u64 = 0;
vq_cmd_w0.s.param1 = encr_data_len;
vq_cmd_w0.s.param2 = auth_data_len;
@@ -1176,8 +1163,6 @@ cpt_dec_hmac_prep(uint32_t flags,
vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
- vq_cmd_w0.s.opcode = opcode.flags;
-
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
OFF_CTRL_LEN);
@@ -1207,9 +1192,7 @@ cpt_dec_hmac_prep(uint32_t flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- opcode.s.major |= CPT_DMA_MODE;
-
- vq_cmd_w0.s.opcode = opcode.flags;
+ vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
@@ -1417,7 +1400,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
uint64_t *offset_vaddr, offset_dma;
uint32_t *iv_s, iv[4];
vq_cmd_word0_t vq_cmd_w0;
- opcode_info_t opcode;
buf_p = ¶ms->meta_buf;
m_vaddr = buf_p->vaddr;
@@ -1451,11 +1433,11 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
/* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
- opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
+ vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
(0 << 3) | (flags & 0x7));
if (flags == 0x1) {
@@ -1518,7 +1500,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
/*
* GP op header, lengths are expected in bits.
*/
- vq_cmd_w0.u64 = 0;
vq_cmd_w0.s.param1 = encr_data_len;
vq_cmd_w0.s.param2 = auth_data_len;
@@ -1551,8 +1532,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
- vq_cmd_w0.s.opcode = opcode.flags;
-
if (likely(iv_len)) {
uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
+ OFF_CTRL_LEN);
@@ -1575,9 +1554,7 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
m_dma += OFF_CTRL_LEN + iv_len;
- opcode.s.major |= CPT_DMA_MODE;
-
- vq_cmd_w0.s.opcode = opcode.flags;
+ vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
/* DPTR has SG list */
in_buffer = m_vaddr;
@@ -1729,7 +1706,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
uint64_t *offset_vaddr, offset_dma;
uint32_t *iv_s, iv[4], j;
vq_cmd_word0_t vq_cmd_w0;
- opcode_info_t opcode;
buf_p = ¶ms->meta_buf;
m_vaddr = buf_p->vaddr;
@@ -1768,11 +1744,12 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ZUC_SNOW3G;
/* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
- opcode.s.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
+ vq_cmd_w0.s.opcode.minor = ((1 << 7) | (snow3g << 5) | (0 << 4) |
(0 << 3) | (flags & 0x7));
/* consider iv len */
@@ -1801,7 +1778,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
/*
* GP op header, lengths are expected in bits.
*/
- vq_cmd_w0.u64 = 0;
vq_cmd_w0.s.param1 = encr_data_len;
/*
@@ -1833,8 +1809,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
- vq_cmd_w0.s.opcode = opcode.flags;
-
if (likely(iv_len)) {
uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
+ OFF_CTRL_LEN);
@@ -1858,9 +1832,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
m_dma += OFF_CTRL_LEN + iv_len;
- opcode.s.major |= CPT_DMA_MODE;
-
- vq_cmd_w0.s.opcode = opcode.flags;
+ vq_cmd_w0.s.opcode.major |= CPT_DMA_MODE;
/* DPTR has SG list */
in_buffer = m_vaddr;
@@ -1987,7 +1959,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- opcode_info_t opcode;
uint8_t *in_buffer;
uint32_t g_size_bytes, s_size_bytes;
uint64_t dptr_dma, rptr_dma;
@@ -2037,19 +2008,17 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
/* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
- opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
+ vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
(dir << 4) | (0 << 3) | (flags & 0x7));
/*
* GP op header, lengths are expected in bits.
*/
- vq_cmd_w0.u64 = 0;
vq_cmd_w0.s.param1 = encr_data_len;
vq_cmd_w0.s.param2 = auth_data_len;
- vq_cmd_w0.s.opcode = opcode.flags;
/* consider iv len */
if (flags == 0x0) {
@@ -2223,7 +2192,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
vq_cmd_word0_t vq_cmd_w0;
- opcode_info_t opcode;
uint8_t *in_buffer;
uint32_t g_size_bytes, s_size_bytes;
uint64_t dptr_dma, rptr_dma;
@@ -2262,18 +2230,17 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
+ vq_cmd_w0.u64 = 0;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
/* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
- opcode.s.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
+ vq_cmd_w0.s.opcode.minor = ((1 << 6) | (cpt_ctx->k_ecb << 5) |
(dir << 4) | (0 << 3) | (flags & 0x7));
/*
* GP op header, lengths are expected in bits.
*/
- vq_cmd_w0.u64 = 0;
vq_cmd_w0.s.param1 = encr_data_len;
- vq_cmd_w0.s.opcode = opcode.flags;
/* consider iv len */
encr_offset += iv_len;
diff --git a/drivers/common/cpt/cpt_ucode_asym.h b/drivers/common/cpt/cpt_ucode_asym.h
index 5d1c7b5f02..286f155849 100644
--- a/drivers/common/cpt/cpt_ucode_asym.h
+++ b/drivers/common/cpt/cpt_ucode_asym.h
@@ -234,7 +234,6 @@ cpt_modex_prep(struct asym_op_params *modex_params,
struct rte_crypto_op **op;
vq_cmd_word0_t vq_cmd_w0;
uint64_t total_key_len;
- opcode_info_t opcode;
uint32_t dlen, rlen;
uint32_t base_len;
buf_ptr_t caddr;
@@ -265,9 +264,8 @@ cpt_modex_prep(struct asym_op_params *modex_params,
rlen = mod_len;
/* Setup opcodes */
- opcode.s.major = CPT_MAJOR_OP_MODEX;
- opcode.s.minor = CPT_MINOR_OP_MODEX;
- vq_cmd_w0.s.opcode = opcode.flags;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_MODEX;
+ vq_cmd_w0.s.opcode.minor = CPT_MINOR_OP_MODEX;
/* GP op header */
vq_cmd_w0.s.param1 = mod_len;
@@ -307,7 +305,6 @@ cpt_rsa_prep(struct asym_op_params *rsa_params,
struct rte_crypto_op **op;
vq_cmd_word0_t vq_cmd_w0;
uint64_t total_key_len;
- opcode_info_t opcode;
uint32_t dlen, rlen;
uint32_t in_size;
buf_ptr_t caddr;
@@ -334,16 +331,16 @@ cpt_rsa_prep(struct asym_op_params *rsa_params,
if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
/* Use mod_exp operation for no_padding type */
- opcode.s.minor = CPT_MINOR_OP_MODEX;
+ vq_cmd_w0.s.opcode.minor = CPT_MINOR_OP_MODEX;
vq_cmd_w0.s.param2 = exp_len;
} else {
if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
- opcode.s.minor = CPT_MINOR_OP_PKCS_ENC;
+ vq_cmd_w0.s.opcode.minor = CPT_MINOR_OP_PKCS_ENC;
/* Public key encrypt, use BT2*/
vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2 |
((uint16_t)(exp_len) << 1);
} else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
- opcode.s.minor = CPT_MINOR_OP_PKCS_DEC;
+ vq_cmd_w0.s.opcode.minor = CPT_MINOR_OP_PKCS_DEC;
/* Public key decrypt, use BT1 */
vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
/* + 2 for decrypted len */
@@ -351,9 +348,7 @@ cpt_rsa_prep(struct asym_op_params *rsa_params,
}
}
- /* Setup opcodes */
- opcode.s.major = CPT_MAJOR_OP_MODEX;
- vq_cmd_w0.s.opcode = opcode.flags;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_MODEX;
/* GP op header */
vq_cmd_w0.s.param1 = mod_len;
@@ -395,7 +390,6 @@ cpt_rsa_crt_prep(struct asym_op_params *rsa_params,
struct rte_crypto_op **op;
vq_cmd_word0_t vq_cmd_w0;
uint64_t total_key_len;
- opcode_info_t opcode;
uint32_t dlen, rlen;
uint32_t in_size;
buf_ptr_t caddr;
@@ -422,14 +416,14 @@ cpt_rsa_crt_prep(struct asym_op_params *rsa_params,
if (rsa_op.pad == RTE_CRYPTO_RSA_PADDING_NONE) {
/*Use mod_exp operation for no_padding type */
- opcode.s.minor = CPT_MINOR_OP_MODEX_CRT;
+ vq_cmd_w0.s.opcode.minor = CPT_MINOR_OP_MODEX_CRT;
} else {
if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
- opcode.s.minor = CPT_MINOR_OP_PKCS_ENC_CRT;
+ vq_cmd_w0.s.opcode.minor = CPT_MINOR_OP_PKCS_ENC_CRT;
/* Private encrypt, use BT1 */
vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE1;
} else if (rsa_op.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
- opcode.s.minor = CPT_MINOR_OP_PKCS_DEC_CRT;
+ vq_cmd_w0.s.opcode.minor = CPT_MINOR_OP_PKCS_DEC_CRT;
/* Private decrypt, use BT2 */
vq_cmd_w0.s.param2 = CPT_BLOCK_TYPE2;
/* + 2 for decrypted len */
@@ -437,9 +431,7 @@ cpt_rsa_crt_prep(struct asym_op_params *rsa_params,
}
}
- /* Setup opcodes */
- opcode.s.major = CPT_MAJOR_OP_MODEX;
- vq_cmd_w0.s.opcode = opcode.flags;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_MODEX;
/* GP op header */
vq_cmd_w0.s.param1 = mod_len;
@@ -621,7 +613,6 @@ cpt_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
uint16_t order_len, prime_len;
uint16_t o_offset, pk_offset;
vq_cmd_word0_t vq_cmd_w0;
- opcode_info_t opcode;
uint16_t rlen, dlen;
buf_ptr_t caddr;
uint8_t *dptr;
@@ -676,9 +667,8 @@ cpt_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
rlen = 2 * p_align;
/* Setup opcodes */
- opcode.s.major = CPT_MAJOR_OP_ECDSA;
- opcode.s.minor = CPT_MINOR_OP_ECDSA_SIGN;
- vq_cmd_w0.s.opcode = opcode.flags;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ECDSA;
+ vq_cmd_w0.s.opcode.minor = CPT_MINOR_OP_ECDSA_SIGN;
/* GP op header */
vq_cmd_w0.s.param1 = curveid | (message_len << 8);
@@ -722,7 +712,6 @@ cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
uint16_t qx_offset, qy_offset;
uint16_t p_align, m_align;
vq_cmd_word0_t vq_cmd_w0;
- opcode_info_t opcode;
buf_ptr_t caddr;
uint16_t dlen;
uint8_t *dptr;
@@ -783,9 +772,8 @@ cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
dptr += p_align;
/* Setup opcodes */
- opcode.s.major = CPT_MAJOR_OP_ECDSA;
- opcode.s.minor = CPT_MINOR_OP_ECDSA_VERIFY;
- vq_cmd_w0.s.opcode = opcode.flags;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ECDSA;
+ vq_cmd_w0.s.opcode.minor = CPT_MINOR_OP_ECDSA_VERIFY;
/* GP op header */
vq_cmd_w0.s.param1 = curveid | (message_len << 8);
@@ -845,7 +833,6 @@ cpt_ecpm_prep(struct rte_crypto_ecpm_op_param *ecpm,
uint16_t dlen, rlen, prime_len;
uint16_t x1_offset, y1_offset;
vq_cmd_word0_t vq_cmd_w0;
- opcode_info_t opcode;
buf_ptr_t caddr;
uint8_t *dptr;
@@ -880,11 +867,10 @@ cpt_ecpm_prep(struct rte_crypto_ecpm_op_param *ecpm,
dptr += p_align;
/* Setup opcodes */
- opcode.s.major = CPT_MAJOR_OP_ECC;
- opcode.s.minor = CPT_MINOR_OP_ECC_UMP;
+ vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_ECC;
+ vq_cmd_w0.s.opcode.minor = CPT_MINOR_OP_ECC_UMP;
/* GP op header */
- vq_cmd_w0.s.opcode = opcode.flags;
vq_cmd_w0.s.param1 = curveid;
vq_cmd_w0.s.param2 = ecpm->scalar.length;
vq_cmd_w0.s.dlen = dlen;
diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
index c7feb6f9e2..46daf73725 100644
--- a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
+++ b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
@@ -889,7 +889,7 @@ otx2_cpt_sec_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
mdata_len = (int)rsp[3];
rte_pktmbuf_trim(m, mdata_len);
- if ((word0->s.opcode & 0xff) == OTX2_IPSEC_PO_PROCESS_IPSEC_INB) {
+ if (word0->s.opcode.major == OTX2_IPSEC_PO_PROCESS_IPSEC_INB) {
data = rte_pktmbuf_mtod(m, char *);
if (rsp[4] == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
--
2.22.0
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH 3/4] common/cpt: use predefined macros
2020-11-03 8:37 [dpdk-dev] [PATCH 0/4] code cleanup and improvements Archana Muniganti
2020-11-03 8:37 ` [dpdk-dev] [PATCH 1/4] common/cpt: prepopulate word7 in sess Archana Muniganti
2020-11-03 8:37 ` [dpdk-dev] [PATCH 2/4] common/cpt: remove temporary variable Archana Muniganti
@ 2020-11-03 8:37 ` Archana Muniganti
2020-11-03 8:37 ` [dpdk-dev] [PATCH 4/4] common/cpt: remove redundant structure Archana Muniganti
2020-11-03 8:51 ` [dpdk-dev] [PATCH 0/4] code cleanup and improvements Anoob Joseph
4 siblings, 0 replies; 7+ messages in thread
From: Archana Muniganti @ 2020-11-03 8:37 UTC (permalink / raw)
To: akhil.goyal, anoobj, adwivedi; +Cc: Archana Muniganti, dev
Replace redundant macro ROUNDUP* with predefined macros.
Signed-off-by: Archana Muniganti <marchana@marvell.com>
---
drivers/common/cpt/cpt_common.h | 12 ------------
drivers/common/cpt/cpt_pmd_ops_helper.c | 5 +++--
drivers/common/cpt/cpt_ucode.h | 11 +++++++----
drivers/common/cpt/cpt_ucode_asym.h | 14 +++++++-------
drivers/crypto/octeontx/otx_cryptodev_ops.c | 6 ++++--
drivers/crypto/octeontx2/otx2_cryptodev_ops.c | 6 ++++--
6 files changed, 25 insertions(+), 29 deletions(-)
diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index eefe2755c1..f61495e458 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -19,18 +19,6 @@
#define CPT_COUNT_THOLD 32
#define CPT_TIMER_THOLD 0x3F
-#ifndef ROUNDUP4
-#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
-#endif
-
-#ifndef ROUNDUP8
-#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
-#endif
-
-#ifndef ROUNDUP16
-#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
-#endif
-
#define MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
struct cpt_qp_meta_info {
diff --git a/drivers/common/cpt/cpt_pmd_ops_helper.c b/drivers/common/cpt/cpt_pmd_ops_helper.c
index 09b762f81e..2cddddf4ce 100644
--- a/drivers/common/cpt/cpt_pmd_ops_helper.c
+++ b/drivers/common/cpt/cpt_pmd_ops_helper.c
@@ -35,8 +35,9 @@ cpt_pmd_ops_helper_get_mlen_sg_mode(void)
len += sizeof(struct cpt_request_info);
len += CPT_OFFSET_CONTROL_BYTES + CPT_MAX_IV_LEN;
- len += ROUNDUP8(SG_LIST_HDR_SIZE +
- (ROUNDUP4(CPT_MAX_SG_IN_OUT_CNT) >> 2) * SG_ENTRY_SIZE);
+ len += RTE_ALIGN_CEIL((SG_LIST_HDR_SIZE +
+ (RTE_ALIGN_CEIL(CPT_MAX_SG_IN_OUT_CNT, 4) >> 2) *
+ SG_ENTRY_SIZE), 8);
len += 2 * COMPLETION_CODE_SIZE;
len += 2 * sizeof(cpt_res_s_t);
return len;
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
index 664a04e1a0..557379ed01 100644
--- a/drivers/common/cpt/cpt_ucode.h
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -528,7 +528,7 @@ cpt_digest_gen_prep(uint32_t flags,
if (ctx->hmac) {
vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
vq_cmd_w0.s.param1 = key_len;
- vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
+ vq_cmd_w0.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
} else {
vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
vq_cmd_w0.s.param1 = 0;
@@ -564,7 +564,8 @@ cpt_digest_gen_prep(uint32_t flags,
uint64_t k_dma = params->ctx_buf.dma_addr +
offsetof(struct cpt_ctx, auth_key);
/* Key */
- i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
+ i = fill_sg_comp(gather_comp, i, k_dma,
+ RTE_ALIGN_CEIL(key_len, 8));
}
/* input data */
@@ -762,10 +763,12 @@ cpt_enc_hmac_prep(uint32_t flags,
enc_dlen = encr_data_len + encr_offset;
if (unlikely(encr_data_len & 0xf)) {
if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
- enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
+ enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
+ encr_offset;
else if (likely((cipher_type == AES_CBC) ||
(cipher_type == AES_ECB)))
- enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
+ enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
+ encr_offset;
}
if (unlikely(auth_dlen > enc_dlen)) {
diff --git a/drivers/common/cpt/cpt_ucode_asym.h b/drivers/common/cpt/cpt_ucode_asym.h
index 286f155849..50c6f58d3a 100644
--- a/drivers/common/cpt/cpt_ucode_asym.h
+++ b/drivers/common/cpt/cpt_ucode_asym.h
@@ -623,10 +623,10 @@ cpt_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
/* Truncate input length to curve prime length */
if (message_len > prime_len)
message_len = prime_len;
- m_align = ROUNDUP8(message_len);
+ m_align = RTE_ALIGN_CEIL(message_len, 8);
- p_align = ROUNDUP8(prime_len);
- k_align = ROUNDUP8(k_len);
+ p_align = RTE_ALIGN_CEIL(prime_len, 8);
+ k_align = RTE_ALIGN_CEIL(k_len, 8);
/* Set write offset for order and private key */
o_offset = prime_len - order_len;
@@ -723,8 +723,8 @@ cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param *ecdsa,
if (message_len > prime_len)
message_len = prime_len;
- m_align = ROUNDUP8(message_len);
- p_align = ROUNDUP8(prime_len);
+ m_align = RTE_ALIGN_CEIL(message_len, 8);
+ p_align = RTE_ALIGN_CEIL(prime_len, 8);
/* Set write offset for sign, order and public key coordinates */
o_offset = prime_len - order_len;
@@ -841,8 +841,8 @@ cpt_ecpm_prep(struct rte_crypto_ecpm_op_param *ecpm,
/* Input buffer */
dptr = RTE_PTR_ADD(req, sizeof(struct cpt_request_info));
- p_align = ROUNDUP8(prime_len);
- scalar_align = ROUNDUP8(ecpm->scalar.length);
+ p_align = RTE_ALIGN_CEIL(prime_len, 8);
+ scalar_align = RTE_ALIGN_CEIL(ecpm->scalar.length, 8);
/*
* Set dlen = sum(ROUNDUP8(input point(x and y coordinates), prime,
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
index 8fc1fa3695..0a0c50a363 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -738,7 +738,8 @@ otx_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
/* Separate out sign r and s components */
memcpy(ecdsa->r.data, req->rptr, prime_len);
- memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+ memcpy(ecdsa->s.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
+ prime_len);
ecdsa->r.length = prime_len;
ecdsa->s.length = prime_len;
}
@@ -751,7 +752,8 @@ otx_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
int prime_len = ec_grp[ec->curveid].prime.length;
memcpy(ecpm->r.x.data, req->rptr, prime_len);
- memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+ memcpy(ecpm->r.y.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
+ prime_len);
ecpm->r.x.length = prime_len;
ecpm->r.y.length = prime_len;
}
diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
index 46daf73725..fe76fe38c2 100644
--- a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
+++ b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
@@ -823,7 +823,8 @@ otx2_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
/* Separate out sign r and s components */
memcpy(ecdsa->r.data, req->rptr, prime_len);
- memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+ memcpy(ecdsa->s.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
+ prime_len);
ecdsa->r.length = prime_len;
ecdsa->s.length = prime_len;
}
@@ -836,7 +837,8 @@ otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
int prime_len = ec_grp[ec->curveid].prime.length;
memcpy(ecpm->r.x.data, req->rptr, prime_len);
- memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+ memcpy(ecpm->r.y.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
+ prime_len);
ecpm->r.x.length = prime_len;
ecpm->r.y.length = prime_len;
}
--
2.22.0
^ permalink raw reply [flat|nested] 7+ messages in thread