* [dpdk-dev] [PATCH 1/2] common/cpt: remove redundant bit swaps
@ 2019-07-06 13:23 Anoob Joseph
2019-07-06 13:23 ` [dpdk-dev] [PATCH 2/2] common/cpt: remove redundant code in datapath Anoob Joseph
` (2 more replies)
0 siblings, 3 replies; 5+ messages in thread
From: Anoob Joseph @ 2019-07-06 13:23 UTC (permalink / raw)
To: Akhil Goyal, Pablo de Lara
Cc: Anoob Joseph, Jerin Jacob, Narayana Prasad, dev
The bit swaps can be removed by re-arranging the structure.
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
drivers/common/cpt/cpt_hw_types.h | 7 +++
drivers/common/cpt/cpt_ucode.h | 116 ++++++++++++--------------------------
2 files changed, 44 insertions(+), 79 deletions(-)
diff --git a/drivers/common/cpt/cpt_hw_types.h b/drivers/common/cpt/cpt_hw_types.h
index 7be1d12..e2b127d 100644
--- a/drivers/common/cpt/cpt_hw_types.h
+++ b/drivers/common/cpt/cpt_hw_types.h
@@ -30,10 +30,17 @@
typedef union {
uint64_t u64;
struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
uint16_t opcode;
uint16_t param1;
uint16_t param2;
uint16_t dlen;
+#else
+ uint16_t dlen;
+ uint16_t param2;
+ uint16_t param1;
+ uint16_t opcode;
+#endif
} s;
} vq_cmd_word0_t;
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
index e02b34a..c589b58 100644
--- a/drivers/common/cpt/cpt_ucode.h
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -520,16 +520,15 @@ cpt_digest_gen_prep(uint32_t flags,
/*GP op header */
vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param2 = rte_cpu_to_be_16(((uint16_t)hash_type << 8));
+ vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
if (ctx->hmac) {
opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(key_len);
- vq_cmd_w0.s.dlen =
- rte_cpu_to_be_16((data_len + ROUNDUP8(key_len)));
+ vq_cmd_w0.s.param1 = key_len;
+ vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
} else {
opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
vq_cmd_w0.s.param1 = 0;
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(data_len);
+ vq_cmd_w0.s.dlen = data_len;
}
opcode.s.minor = 0;
@@ -540,10 +539,10 @@ cpt_digest_gen_prep(uint32_t flags,
/* Minor op is passthrough */
opcode.s.minor = 0x03;
/* Send out completion code only */
- vq_cmd_w0.s.param2 = rte_cpu_to_be_16(0x1);
+ vq_cmd_w0.s.param2 = 0x1;
}
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode = opcode.flags;
/* DPTR has SG list */
in_buffer = m_vaddr;
@@ -622,7 +621,7 @@ cpt_digest_gen_prep(uint32_t flags,
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
@@ -635,11 +634,6 @@ cpt_digest_gen_prep(uint32_t flags,
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
/* vq command w3 */
vq_cmd_w3.u64 = 0;
@@ -798,8 +792,8 @@ cpt_enc_hmac_prep(uint32_t flags,
/* GP op header */
vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
- vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
/*
* In 83XX since we have a limitation of
* IV & Offset control word not part of instruction
@@ -826,9 +820,9 @@ cpt_enc_hmac_prep(uint32_t flags,
req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ outputlen - iv_len);
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode = opcode.flags;
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
@@ -861,7 +855,7 @@ cpt_enc_hmac_prep(uint32_t flags,
opcode.s.major |= CPT_DMA_MODE;
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode = opcode.flags;
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
@@ -1005,7 +999,7 @@ cpt_enc_hmac_prep(uint32_t flags,
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
@@ -1020,12 +1014,6 @@ cpt_enc_hmac_prep(uint32_t flags,
req->ist.ei2 = rptr_dma;
}
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
ctx_dma = fc_params->ctx_buf.dma_addr +
offsetof(struct cpt_ctx, fctx);
/* vq command w3 */
@@ -1175,8 +1163,8 @@ cpt_dec_hmac_prep(uint32_t flags,
encr_offset = inputlen;
vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
- vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
/*
* In 83XX since we have a limitation of
@@ -1209,9 +1197,9 @@ cpt_dec_hmac_prep(uint32_t flags,
* hmac.
*/
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode = opcode.flags;
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
@@ -1245,7 +1233,7 @@ cpt_dec_hmac_prep(uint32_t flags,
opcode.s.major |= CPT_DMA_MODE;
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode = opcode.flags;
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
@@ -1401,7 +1389,7 @@ cpt_dec_hmac_prep(uint32_t flags,
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
@@ -1417,12 +1405,6 @@ cpt_dec_hmac_prep(uint32_t flags,
req->ist.ei2 = rptr_dma;
}
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
ctx_dma = fc_params->ctx_buf.dma_addr +
offsetof(struct cpt_ctx, fctx);
/* vq command w3 */
@@ -1579,8 +1561,8 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
* GP op header, lengths are expected in bits.
*/
vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
- vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
/*
* In 83XX since we have a limitation of
@@ -1609,9 +1591,9 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ outputlen - iv_len);
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode = opcode.flags;
if (likely(iv_len)) {
uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
@@ -1638,7 +1620,7 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
opcode.s.major |= CPT_DMA_MODE;
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode = opcode.flags;
/* DPTR has SG list */
in_buffer = m_vaddr;
@@ -1740,7 +1722,7 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
@@ -1755,12 +1737,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
req->ist.ei2 = rptr_dma;
}
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
/* vq command w3 */
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = 0;
@@ -1886,7 +1862,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
* GP op header, lengths are expected in bits.
*/
vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
+ vq_cmd_w0.s.param1 = encr_data_len;
/*
* In 83XX since we have a limitation of
@@ -1915,9 +1891,9 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
+ outputlen - iv_len);
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+ vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode = opcode.flags;
if (likely(iv_len)) {
uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
@@ -1945,7 +1921,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
opcode.s.major |= CPT_DMA_MODE;
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.opcode = opcode.flags;
/* DPTR has SG list */
in_buffer = m_vaddr;
@@ -2020,7 +1996,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
@@ -2035,12 +2011,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
req->ist.ei2 = rptr_dma;
}
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
/* vq command w3 */
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = 0;
@@ -2150,9 +2120,9 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
* GP op header, lengths are expected in bits.
*/
vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
- vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.param2 = auth_data_len;
+ vq_cmd_w0.s.opcode = opcode.flags;
/* consider iv len */
if (flags == 0x0) {
@@ -2279,7 +2249,7 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
@@ -2293,12 +2263,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
/* vq command w3 */
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = 0;
@@ -2394,8 +2358,8 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
* GP op header, lengths are expected in bits.
*/
vq_cmd_w0.u64 = 0;
- vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
- vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+ vq_cmd_w0.s.param1 = encr_data_len;
+ vq_cmd_w0.s.opcode = opcode.flags;
/* consider iv len */
encr_offset += iv_len;
@@ -2480,7 +2444,7 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
/* This is DPTR len incase of SG mode */
- vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+ vq_cmd_w0.s.dlen = size;
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
@@ -2494,12 +2458,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
req->ist.ei1 = dptr_dma;
req->ist.ei2 = rptr_dma;
- /* First 16-bit swap then 64-bit swap */
- /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
- * to eliminate all the swapping
- */
- vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
/* vq command w3 */
vq_cmd_w3.u64 = 0;
vq_cmd_w3.s.grp = 0;
--
2.7.4
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dpdk-dev] [PATCH 2/2] common/cpt: remove redundant code in datapath
2019-07-06 13:23 [dpdk-dev] [PATCH 1/2] common/cpt: remove redundant bit swaps Anoob Joseph
@ 2019-07-06 13:23 ` Anoob Joseph
2019-07-07 14:11 ` Anoob Joseph
2019-07-07 14:10 ` [dpdk-dev] [PATCH 1/2] common/cpt: remove redundant bit swaps Anoob Joseph
2019-07-14 13:55 ` Thomas Monjalon
2 siblings, 1 reply; 5+ messages in thread
From: Anoob Joseph @ 2019-07-06 13:23 UTC (permalink / raw)
To: Akhil Goyal, Pablo de Lara
Cc: Anoob Joseph, Jerin Jacob, Narayana Prasad, dev
Removing redundant checks and unused local variables from datapath.
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
drivers/common/cpt/cpt_ucode.h | 133 ++++++++++-------------------------------
1 file changed, 33 insertions(+), 100 deletions(-)
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
index c589b58..e197e4e 100644
--- a/drivers/common/cpt/cpt_ucode.h
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -89,8 +89,7 @@ cpt_fc_ciph_validate_key_aes(uint16_t key_len)
}
static __rte_always_inline int
-cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
- uint16_t key_len)
+cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t key_len)
{
int fc_type = 0;
switch (type) {
@@ -125,7 +124,7 @@ cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
if (unlikely(key_len != 16))
return -1;
/* No support for AEAD yet */
- if (unlikely(cpt_ctx->hash_type))
+ if (unlikely(ctx->hash_type))
return -1;
fc_type = ZUC_SNOW3G;
break;
@@ -134,14 +133,16 @@ cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
if (unlikely(key_len != 16))
return -1;
/* No support for AEAD yet */
- if (unlikely(cpt_ctx->hash_type))
+ if (unlikely(ctx->hash_type))
return -1;
fc_type = KASUMI;
break;
default:
return -1;
}
- return fc_type;
+
+ ctx->fc_type = fc_type;
+ return 0;
}
static __rte_always_inline void
@@ -181,7 +182,6 @@ cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, uint8_t *key,
cpt_ctx->snow3g = 1;
gen_key_snow3g(key, keyx);
memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
- cpt_ctx->fc_type = ZUC_SNOW3G;
cpt_ctx->zsk_flags = 0;
}
@@ -192,7 +192,6 @@ cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, uint8_t *key,
cpt_ctx->snow3g = 0;
memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
- cpt_ctx->fc_type = ZUC_SNOW3G;
cpt_ctx->zsk_flags = 0;
}
@@ -203,7 +202,6 @@ cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, uint8_t *key,
cpt_ctx->k_ecb = 1;
memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
cpt_ctx->zsk_flags = 0;
- cpt_ctx->fc_type = KASUMI;
}
static __rte_always_inline void
@@ -212,7 +210,6 @@ cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, uint8_t *key,
{
memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
cpt_ctx->zsk_flags = 0;
- cpt_ctx->fc_type = KASUMI;
}
static __rte_always_inline int
@@ -222,15 +219,13 @@ cpt_fc_ciph_set_key(void *ctx, cipher_type_t type, uint8_t *key,
struct cpt_ctx *cpt_ctx = ctx;
mc_fc_context_t *fctx = &cpt_ctx->fctx;
uint64_t *ctrl_flags = NULL;
- int fc_type;
+ int ret;
- /* Validate key before proceeding */
- fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len);
- if (unlikely(fc_type == -1))
+ ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
+ if (unlikely(ret))
return -1;
- if (fc_type == FC_GEN) {
- cpt_ctx->fc_type = FC_GEN;
+ if (cpt_ctx->fc_type == FC_GEN) {
ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
*ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
/*
@@ -467,7 +462,6 @@ cpt_digest_gen_prep(uint32_t flags,
{
struct cpt_request_info *req;
uint32_t size, i;
- int32_t m_size;
uint16_t data_len, mac_len, key_len;
auth_type_t hash_type;
buf_ptr_t *meta_p;
@@ -488,7 +482,6 @@ cpt_digest_gen_prep(uint32_t flags,
m_vaddr = meta_p->vaddr;
m_dma = meta_p->dma_addr;
- m_size = meta_p->size;
/*
* Save initial space that followed app data for completion code &
@@ -504,14 +497,12 @@ cpt_digest_gen_prep(uint32_t flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
req = m_vaddr;
size = sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
hash_type = ctx->hash_type;
mac_len = ctx->mac_len;
@@ -625,7 +616,6 @@ cpt_digest_gen_prep(uint32_t flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
@@ -678,13 +668,11 @@ cpt_enc_hmac_prep(uint32_t flags,
vq_cmd_word3_t vq_cmd_w3;
void *c_vaddr;
uint64_t c_dma;
- int32_t m_size;
opcode_info_t opcode;
meta_p = &fc_params->meta_buf;
m_vaddr = meta_p->vaddr;
m_dma = meta_p->dma_addr;
- m_size = meta_p->size;
encr_offset = ENCR_OFFSET(d_offs);
auth_offset = AUTH_OFFSET(d_offs);
@@ -720,7 +708,6 @@ cpt_enc_hmac_prep(uint32_t flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* start cpt request info struct at 8 byte boundary */
size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
@@ -731,7 +718,6 @@ cpt_enc_hmac_prep(uint32_t flags,
size += sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
if (hash_type == GMAC_TYPE)
encr_data_len = 0;
@@ -851,7 +837,6 @@ cpt_enc_hmac_prep(uint32_t flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
opcode.s.major |= CPT_DMA_MODE;
@@ -1003,7 +988,6 @@ cpt_enc_hmac_prep(uint32_t flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
@@ -1047,7 +1031,7 @@ cpt_dec_hmac_prep(uint32_t flags,
uint32_t iv_offset = 0, size;
int32_t inputlen, outputlen, enc_dlen, auth_dlen;
struct cpt_ctx *cpt_ctx;
- int32_t hash_type, mac_len, m_size;
+ int32_t hash_type, mac_len;
uint8_t iv_len = 16;
struct cpt_request_info *req;
buf_ptr_t *meta_p, *aad_buf = NULL;
@@ -1065,7 +1049,6 @@ cpt_dec_hmac_prep(uint32_t flags,
meta_p = &fc_params->meta_buf;
m_vaddr = meta_p->vaddr;
m_dma = meta_p->dma_addr;
- m_size = meta_p->size;
encr_offset = ENCR_OFFSET(d_offs);
auth_offset = AUTH_OFFSET(d_offs);
@@ -1131,7 +1114,6 @@ cpt_dec_hmac_prep(uint32_t flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* start cpt request info structure at 8 byte alignment */
size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) -
@@ -1142,7 +1124,6 @@ cpt_dec_hmac_prep(uint32_t flags,
size += sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* Decryption */
opcode.s.major = CPT_MAJOR_OP_FC;
@@ -1229,7 +1210,6 @@ cpt_dec_hmac_prep(uint32_t flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
opcode.s.major |= CPT_DMA_MODE;
@@ -1393,7 +1373,6 @@ cpt_dec_hmac_prep(uint32_t flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
@@ -1444,7 +1423,7 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
buf_ptr_t *buf_p;
uint32_t encr_offset = 0, auth_offset = 0;
uint32_t encr_data_len = 0, auth_data_len = 0;
- int flags, iv_len = 16, m_size;
+ int flags, iv_len = 16;
void *m_vaddr, *c_vaddr;
uint64_t m_dma, c_dma, offset_ctrl;
uint64_t *offset_vaddr, offset_dma;
@@ -1456,7 +1435,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
buf_p = ¶ms->meta_buf;
m_vaddr = buf_p->vaddr;
m_dma = buf_p->dma_addr;
- m_size = buf_p->size;
cpt_ctx = params->ctx_buf.vaddr;
flags = cpt_ctx->zsk_flags;
@@ -1478,7 +1456,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* Reserve memory for cpt request info */
req = m_vaddr;
@@ -1486,7 +1463,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
size = sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
@@ -1616,7 +1592,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
m_dma += OFF_CTRL_LEN + iv_len;
- m_size -= OFF_CTRL_LEN + iv_len;
opcode.s.major |= CPT_DMA_MODE;
@@ -1726,7 +1701,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
@@ -1774,7 +1748,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
buf_ptr_t *buf_p;
uint32_t encr_offset;
uint32_t encr_data_len;
- int flags, m_size;
+ int flags;
void *m_vaddr, *c_vaddr;
uint64_t m_dma, c_dma;
uint64_t *offset_vaddr, offset_dma;
@@ -1786,7 +1760,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
buf_p = ¶ms->meta_buf;
m_vaddr = buf_p->vaddr;
m_dma = buf_p->dma_addr;
- m_size = buf_p->size;
/*
* Microcode expects offsets in bytes
@@ -1813,7 +1786,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* Reserve memory for cpt request info */
req = m_vaddr;
@@ -1821,7 +1793,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
size = sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
@@ -1917,7 +1888,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
m_dma += OFF_CTRL_LEN + iv_len;
- m_size -= OFF_CTRL_LEN + iv_len;
opcode.s.major |= CPT_DMA_MODE;
@@ -2000,7 +1970,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
@@ -2049,7 +2018,7 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
buf_ptr_t *buf_p;
uint32_t encr_offset, auth_offset;
uint32_t encr_data_len, auth_data_len;
- int flags, m_size;
+ int flags;
uint8_t *iv_s, *iv_d, iv_len = 8;
uint8_t dir = 0;
void *m_vaddr, *c_vaddr;
@@ -2067,7 +2036,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
buf_p = ¶ms->meta_buf;
m_vaddr = buf_p->vaddr;
m_dma = buf_p->dma_addr;
- m_size = buf_p->size;
encr_offset = ENCR_OFFSET(d_offs) / 8;
auth_offset = AUTH_OFFSET(d_offs) / 8;
@@ -2100,7 +2068,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* Reserve memory for cpt request info */
req = m_vaddr;
@@ -2108,7 +2075,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
size = sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
@@ -2136,7 +2102,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
m_dma += OFF_CTRL_LEN + iv_len;
- m_size -= OFF_CTRL_LEN + iv_len;
/* DPTR has SG list */
in_buffer = m_vaddr;
@@ -2253,7 +2218,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
@@ -2299,7 +2263,7 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
buf_ptr_t *buf_p;
uint32_t encr_offset;
uint32_t encr_data_len;
- int flags, m_size;
+ int flags;
uint8_t dir = 0;
void *m_vaddr, *c_vaddr;
uint64_t m_dma, c_dma;
@@ -2316,7 +2280,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
buf_p = ¶ms->meta_buf;
m_vaddr = buf_p->vaddr;
m_dma = buf_p->dma_addr;
- m_size = buf_p->size;
encr_offset = ENCR_OFFSET(d_offs) / 8;
encr_data_len = ENCR_DLEN(d_lens);
@@ -2338,7 +2301,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* Reserve memory for cpt request info */
req = m_vaddr;
@@ -2346,7 +2308,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
size = sizeof(struct cpt_request_info);
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
@@ -2373,7 +2334,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
m_dma += OFF_CTRL_LEN + iv_len;
- m_size -= OFF_CTRL_LEN + iv_len;
/* DPTR has SG list */
in_buffer = m_vaddr;
@@ -2448,7 +2408,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += size;
- m_size -= size;
/* cpt alternate completion address saved earlier */
req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8);
@@ -2627,9 +2586,9 @@ fill_sess_aead(struct rte_crypto_sym_xform *xform,
cipher_type_t enc_type = 0; /* NULL Cipher type */
auth_type_t auth_type = 0; /* NULL Auth type */
uint32_t cipher_key_len = 0;
- uint8_t zsk_flag = 0, aes_gcm = 0;
+ uint8_t aes_gcm = 0;
aead_form = &xform->aead;
- void *ctx;
+ void *ctx = SESS_PRIV(sess);
if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) {
@@ -2663,13 +2622,12 @@ fill_sess_aead(struct rte_crypto_sym_xform *xform,
(unsigned int long)aead_form->key.length);
return -1;
}
- sess->zsk_flag = zsk_flag;
+ sess->zsk_flag = 0;
sess->aes_gcm = aes_gcm;
sess->mac_len = aead_form->digest_length;
sess->iv_offset = aead_form->iv.offset;
sess->iv_length = aead_form->iv.length;
sess->aad_length = aead_form->aad_length;
- ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
aead_form->key.length, NULL);
@@ -2686,10 +2644,7 @@ fill_sess_cipher(struct rte_crypto_sym_xform *xform,
struct rte_crypto_cipher_xform *c_form;
cipher_type_t enc_type = 0; /* NULL Cipher type */
uint32_t cipher_key_len = 0;
- uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0;
-
- if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
- return -1;
+ uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
c_form = &xform->cipher;
@@ -2771,7 +2726,7 @@ fill_sess_cipher(struct rte_crypto_sym_xform *xform,
}
sess->zsk_flag = zsk_flag;
- sess->aes_gcm = aes_gcm;
+ sess->aes_gcm = 0;
sess->aes_ctr = aes_ctr;
sess->iv_offset = c_form->iv.offset;
sess->iv_length = c_form->iv.length;
@@ -2791,9 +2746,6 @@ fill_sess_auth(struct rte_crypto_sym_xform *xform,
auth_type_t auth_type = 0; /* NULL Auth type */
uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
- if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
- goto error_out;
-
a_form = &xform->auth;
if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
@@ -2865,11 +2817,11 @@ fill_sess_auth(struct rte_crypto_sym_xform *xform,
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
a_form->algo);
- goto error_out;
+ return -1;
default:
CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
a_form->algo);
- goto error_out;
+ return -1;
}
sess->zsk_flag = zsk_flag;
@@ -2884,9 +2836,6 @@ fill_sess_auth(struct rte_crypto_sym_xform *xform,
a_form->key.length, a_form->digest_length);
return 0;
-
-error_out:
- return -1;
}
static __rte_always_inline int
@@ -2896,11 +2845,7 @@ fill_sess_gmac(struct rte_crypto_sym_xform *xform,
struct rte_crypto_auth_xform *a_form;
cipher_type_t enc_type = 0; /* NULL Cipher type */
auth_type_t auth_type = 0; /* NULL Auth type */
- uint8_t zsk_flag = 0, aes_gcm = 0;
- void *ctx;
-
- if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
- return -1;
+ void *ctx = SESS_PRIV(sess);
a_form = &xform->auth;
@@ -2924,13 +2869,12 @@ fill_sess_gmac(struct rte_crypto_sym_xform *xform,
return -1;
}
- sess->zsk_flag = zsk_flag;
- sess->aes_gcm = aes_gcm;
+ sess->zsk_flag = 0;
+ sess->aes_gcm = 0;
sess->is_gmac = 1;
sess->iv_offset = a_form->iv.offset;
sess->iv_length = a_form->iv.length;
sess->mac_len = a_form->digest_length;
- ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
a_form->key.length, NULL);
@@ -3130,9 +3074,6 @@ fill_fc_params(struct rte_crypto_op *cop,
uint64_t d_offs, d_lens;
struct rte_mbuf *m_src, *m_dst;
uint8_t cpt_op = sess_misc->cpt_op;
- uint8_t zsk_flag = sess_misc->zsk_flag;
- uint8_t aes_gcm = sess_misc->aes_gcm;
- uint16_t mac_len = sess_misc->mac_len;
#ifdef CPT_ALWAYS_USE_SG_MODE
uint8_t inplace = 0;
#else
@@ -3158,21 +3099,17 @@ fill_fc_params(struct rte_crypto_op *cop,
}
}
- if (zsk_flag) {
+ if (sess_misc->zsk_flag) {
fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
uint8_t *,
sess_misc->auth_iv_offset);
- if (zsk_flag == K_F9) {
- CPT_LOG_DP_ERR("Should not reach here for "
- "kasumi F9\n");
- }
- if (zsk_flag != ZS_EA)
+ if (sess_misc->zsk_flag != ZS_EA)
inplace = 0;
}
m_src = sym_op->m_src;
m_dst = sym_op->m_dst;
- if (aes_gcm) {
+ if (sess_misc->aes_gcm) {
uint8_t *salt;
uint8_t *aad_data;
uint16_t aad_len;
@@ -3206,7 +3143,7 @@ fill_fc_params(struct rte_crypto_op *cop,
sess_misc->salt = *(uint32_t *)salt;
}
fc_params.iv_buf = salt + 4;
- if (likely(mac_len)) {
+ if (likely(sess_misc->mac_len)) {
struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ? m_dst :
m_src;
@@ -3249,7 +3186,7 @@ fill_fc_params(struct rte_crypto_op *cop,
}
fc_params.iv_buf = salt + 4;
}
- if (likely(mac_len)) {
+ if (likely(sess_misc->mac_len)) {
struct rte_mbuf *m;
m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src;
@@ -3308,7 +3245,6 @@ fill_fc_params(struct rte_crypto_op *cop,
uint32_t pkt_len;
/* Try to make room as much as src has */
- m_dst = sym_op->m_dst;
pkt_len = rte_pktmbuf_pkt_len(m_dst);
if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
@@ -3501,7 +3437,6 @@ fill_digest_params(struct rte_crypto_op *cop,
uint64_t d_offs = 0, d_lens;
struct rte_mbuf *m_src, *m_dst;
uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
- uint8_t zsk_flag = sess->zsk_flag;
uint16_t mac_len = sess->mac_len;
fc_params_t params;
char src[SRC_IOV_SIZE];
@@ -3532,7 +3467,7 @@ fill_digest_params(struct rte_crypto_op *cop,
flags = VALID_MAC_BUF;
params.src_iov = (void *)src;
- if (unlikely(zsk_flag)) {
+ if (unlikely(sess->zsk_flag)) {
/*
* Since for Zuc, Kasumi, Snow3g offsets are in bits
* we will send pass through even for auth only case,
@@ -3542,10 +3477,9 @@ fill_digest_params(struct rte_crypto_op *cop,
auth_range_off = 0;
params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
uint8_t *, sess->auth_iv_offset);
- if (zsk_flag == K_F9) {
+ if (sess->zsk_flag == K_F9) {
uint32_t length_in_bits, num_bytes;
uint8_t *src, direction = 0;
- uint32_t counter_num_bytes;
memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
uint8_t *), 8);
@@ -3555,10 +3489,9 @@ fill_digest_params(struct rte_crypto_op *cop,
*/
length_in_bits = cop->sym->auth.data.length;
num_bytes = (length_in_bits >> 3);
- counter_num_bytes = num_bytes;
src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
find_kasumif9_direction_and_length(src,
- counter_num_bytes,
+ num_bytes,
&length_in_bits,
&direction);
length_in_bits -= 64;
--
2.7.4
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] common/cpt: remove redundant bit swaps
2019-07-06 13:23 [dpdk-dev] [PATCH 1/2] common/cpt: remove redundant bit swaps Anoob Joseph
2019-07-06 13:23 ` [dpdk-dev] [PATCH 2/2] common/cpt: remove redundant code in datapath Anoob Joseph
@ 2019-07-07 14:10 ` Anoob Joseph
2019-07-14 13:55 ` Thomas Monjalon
2 siblings, 0 replies; 5+ messages in thread
From: Anoob Joseph @ 2019-07-07 14:10 UTC (permalink / raw)
To: Akhil Goyal, Pablo de Lara
Cc: Jerin Jacob Kollanukkaran, Narayana Prasad Raju Athreya, dev
Hi Akhil, Pablo
This patch is good to go if you don't have any comments.
Thanks,
Anoob
> -----Original Message-----
> From: Anoob Joseph <anoobj@marvell.com>
> Sent: Saturday, July 6, 2019 6:54 PM
> To: Akhil Goyal <akhil.goyal@nxp.com>; Pablo de Lara
> <pablo.de.lara.guarch@intel.com>
> Cc: Anoob Joseph <anoobj@marvell.com>; Jerin Jacob Kollanukkaran
> <jerinj@marvell.com>; Narayana Prasad Raju Athreya
> <pathreya@marvell.com>; dev@dpdk.org
> Subject: [PATCH 1/2] common/cpt: remove redundant bit swaps
>
> The bit swaps can be removed by re-arranging the structure.
>
> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> ---
> drivers/common/cpt/cpt_hw_types.h | 7 +++
> drivers/common/cpt/cpt_ucode.h | 116 ++++++++++++--------------------------
> 2 files changed, 44 insertions(+), 79 deletions(-)
>
> diff --git a/drivers/common/cpt/cpt_hw_types.h
> b/drivers/common/cpt/cpt_hw_types.h
> index 7be1d12..e2b127d 100644
> --- a/drivers/common/cpt/cpt_hw_types.h
> +++ b/drivers/common/cpt/cpt_hw_types.h
> @@ -30,10 +30,17 @@
> typedef union {
> uint64_t u64;
> struct {
> +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
> uint16_t opcode;
> uint16_t param1;
> uint16_t param2;
> uint16_t dlen;
> +#else
> + uint16_t dlen;
> + uint16_t param2;
> + uint16_t param1;
> + uint16_t opcode;
> +#endif
> } s;
> } vq_cmd_word0_t;
>
> diff --git a/drivers/common/cpt/cpt_ucode.h
> b/drivers/common/cpt/cpt_ucode.h index e02b34a..c589b58 100644
> --- a/drivers/common/cpt/cpt_ucode.h
> +++ b/drivers/common/cpt/cpt_ucode.h
> @@ -520,16 +520,15 @@ cpt_digest_gen_prep(uint32_t flags,
>
> /*GP op header */
> vq_cmd_w0.u64 = 0;
> - vq_cmd_w0.s.param2 = rte_cpu_to_be_16(((uint16_t)hash_type << 8));
> + vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
> if (ctx->hmac) {
> opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
> - vq_cmd_w0.s.param1 = rte_cpu_to_be_16(key_len);
> - vq_cmd_w0.s.dlen =
> - rte_cpu_to_be_16((data_len + ROUNDUP8(key_len)));
> + vq_cmd_w0.s.param1 = key_len;
> + vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
> } else {
> opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
> vq_cmd_w0.s.param1 = 0;
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(data_len);
> + vq_cmd_w0.s.dlen = data_len;
> }
>
> opcode.s.minor = 0;
> @@ -540,10 +539,10 @@ cpt_digest_gen_prep(uint32_t flags,
> /* Minor op is passthrough */
> opcode.s.minor = 0x03;
> /* Send out completion code only */
> - vq_cmd_w0.s.param2 = rte_cpu_to_be_16(0x1);
> + vq_cmd_w0.s.param2 = 0x1;
> }
>
> - vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
> + vq_cmd_w0.s.opcode = opcode.flags;
>
> /* DPTR has SG list */
> in_buffer = m_vaddr;
> @@ -622,7 +621,7 @@ cpt_digest_gen_prep(uint32_t flags,
> size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
>
> /* This is DPTR len incase of SG mode */
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
> + vq_cmd_w0.s.dlen = size;
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> @@ -635,11 +634,6 @@ cpt_digest_gen_prep(uint32_t flags,
>
> req->ist.ei1 = dptr_dma;
> req->ist.ei2 = rptr_dma;
> - /* First 16-bit swap then 64-bit swap */
> - /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
> - * to eliminate all the swapping
> - */
> - vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
>
> /* vq command w3 */
> vq_cmd_w3.u64 = 0;
> @@ -798,8 +792,8 @@ cpt_enc_hmac_prep(uint32_t flags,
>
> /* GP op header */
> vq_cmd_w0.u64 = 0;
> - vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
> - vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
> + vq_cmd_w0.s.param1 = encr_data_len;
> + vq_cmd_w0.s.param2 = auth_data_len;
> /*
> * In 83XX since we have a limitation of
> * IV & Offset control word not part of instruction @@ -826,9 +820,9
> @@ cpt_enc_hmac_prep(uint32_t flags,
> req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
> + outputlen - iv_len);
>
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen +
> OFF_CTRL_LEN);
> + vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
>
> - vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
> + vq_cmd_w0.s.opcode = opcode.flags;
>
> if (likely(iv_len)) {
> uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
> @@ -861,7 +855,7 @@ cpt_enc_hmac_prep(uint32_t flags,
>
> opcode.s.major |= CPT_DMA_MODE;
>
> - vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
> + vq_cmd_w0.s.opcode = opcode.flags;
>
> if (likely(iv_len)) {
> uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
> @@ -1005,7 +999,7 @@ cpt_enc_hmac_prep(uint32_t flags,
> size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
>
> /* This is DPTR len incase of SG mode */
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
> + vq_cmd_w0.s.dlen = size;
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> @@ -1020,12 +1014,6 @@ cpt_enc_hmac_prep(uint32_t flags,
> req->ist.ei2 = rptr_dma;
> }
>
> - /* First 16-bit swap then 64-bit swap */
> - /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
> - * to eliminate all the swapping
> - */
> - vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
> -
> ctx_dma = fc_params->ctx_buf.dma_addr +
> offsetof(struct cpt_ctx, fctx);
> /* vq command w3 */
> @@ -1175,8 +1163,8 @@ cpt_dec_hmac_prep(uint32_t flags,
> encr_offset = inputlen;
>
> vq_cmd_w0.u64 = 0;
> - vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
> - vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
> + vq_cmd_w0.s.param1 = encr_data_len;
> + vq_cmd_w0.s.param2 = auth_data_len;
>
> /*
> * In 83XX since we have a limitation of @@ -1209,9 +1197,9 @@
> cpt_dec_hmac_prep(uint32_t flags,
> * hmac.
> */
>
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen +
> OFF_CTRL_LEN);
> + vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
>
> - vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
> + vq_cmd_w0.s.opcode = opcode.flags;
>
> if (likely(iv_len)) {
> uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
> @@ -1245,7 +1233,7 @@ cpt_dec_hmac_prep(uint32_t flags,
>
> opcode.s.major |= CPT_DMA_MODE;
>
> - vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
> + vq_cmd_w0.s.opcode = opcode.flags;
>
> if (likely(iv_len)) {
> uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
> @@ -1401,7 +1389,7 @@ cpt_dec_hmac_prep(uint32_t flags,
> size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
>
> /* This is DPTR len incase of SG mode */
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
> + vq_cmd_w0.s.dlen = size;
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> @@ -1417,12 +1405,6 @@ cpt_dec_hmac_prep(uint32_t flags,
> req->ist.ei2 = rptr_dma;
> }
>
> - /* First 16-bit swap then 64-bit swap */
> - /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
> - * to eliminate all the swapping
> - */
> - vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
> -
> ctx_dma = fc_params->ctx_buf.dma_addr +
> offsetof(struct cpt_ctx, fctx);
> /* vq command w3 */
> @@ -1579,8 +1561,8 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
> * GP op header, lengths are expected in bits.
> */
> vq_cmd_w0.u64 = 0;
> - vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
> - vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
> + vq_cmd_w0.s.param1 = encr_data_len;
> + vq_cmd_w0.s.param2 = auth_data_len;
>
> /*
> * In 83XX since we have a limitation of @@ -1609,9 +1591,9 @@
> cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
> req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
> + outputlen - iv_len);
>
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen +
> OFF_CTRL_LEN);
> + vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
>
> - vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
> + vq_cmd_w0.s.opcode = opcode.flags;
>
> if (likely(iv_len)) {
> uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
> @@ -1638,7 +1620,7 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
>
> opcode.s.major |= CPT_DMA_MODE;
>
> - vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
> + vq_cmd_w0.s.opcode = opcode.flags;
>
> /* DPTR has SG list */
> in_buffer = m_vaddr;
> @@ -1740,7 +1722,7 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
> size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
>
> /* This is DPTR len incase of SG mode */
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
> + vq_cmd_w0.s.dlen = size;
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> @@ -1755,12 +1737,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
> req->ist.ei2 = rptr_dma;
> }
>
> - /* First 16-bit swap then 64-bit swap */
> - /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
> - * to eliminate all the swapping
> - */
> - vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
> -
> /* vq command w3 */
> vq_cmd_w3.u64 = 0;
> vq_cmd_w3.s.grp = 0;
> @@ -1886,7 +1862,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
> * GP op header, lengths are expected in bits.
> */
> vq_cmd_w0.u64 = 0;
> - vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
> + vq_cmd_w0.s.param1 = encr_data_len;
>
> /*
> * In 83XX since we have a limitation of @@ -1915,9 +1891,9 @@
> cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
> req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
> + outputlen - iv_len);
>
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen +
> OFF_CTRL_LEN);
> + vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
>
> - vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
> + vq_cmd_w0.s.opcode = opcode.flags;
>
> if (likely(iv_len)) {
> uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
> @@ -1945,7 +1921,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
>
> opcode.s.major |= CPT_DMA_MODE;
>
> - vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
> + vq_cmd_w0.s.opcode = opcode.flags;
>
> /* DPTR has SG list */
> in_buffer = m_vaddr;
> @@ -2020,7 +1996,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
> size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
>
> /* This is DPTR len incase of SG mode */
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
> + vq_cmd_w0.s.dlen = size;
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> @@ -2035,12 +2011,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
> req->ist.ei2 = rptr_dma;
> }
>
> - /* First 16-bit swap then 64-bit swap */
> - /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
> - * to eliminate all the swapping
> - */
> - vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
> -
> /* vq command w3 */
> vq_cmd_w3.u64 = 0;
> vq_cmd_w3.s.grp = 0;
> @@ -2150,9 +2120,9 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
> * GP op header, lengths are expected in bits.
> */
> vq_cmd_w0.u64 = 0;
> - vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
> - vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
> - vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
> + vq_cmd_w0.s.param1 = encr_data_len;
> + vq_cmd_w0.s.param2 = auth_data_len;
> + vq_cmd_w0.s.opcode = opcode.flags;
>
> /* consider iv len */
> if (flags == 0x0) {
> @@ -2279,7 +2249,7 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
> size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
>
> /* This is DPTR len incase of SG mode */
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
> + vq_cmd_w0.s.dlen = size;
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> @@ -2293,12 +2263,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
> req->ist.ei1 = dptr_dma;
> req->ist.ei2 = rptr_dma;
>
> - /* First 16-bit swap then 64-bit swap */
> - /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
> - * to eliminate all the swapping
> - */
> - vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
> -
> /* vq command w3 */
> vq_cmd_w3.u64 = 0;
> vq_cmd_w3.s.grp = 0;
> @@ -2394,8 +2358,8 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
> * GP op header, lengths are expected in bits.
> */
> vq_cmd_w0.u64 = 0;
> - vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
> - vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
> + vq_cmd_w0.s.param1 = encr_data_len;
> + vq_cmd_w0.s.opcode = opcode.flags;
>
> /* consider iv len */
> encr_offset += iv_len;
> @@ -2480,7 +2444,7 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
> size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
>
> /* This is DPTR len incase of SG mode */
> - vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
> + vq_cmd_w0.s.dlen = size;
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> @@ -2494,12 +2458,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
> req->ist.ei1 = dptr_dma;
> req->ist.ei2 = rptr_dma;
>
> - /* First 16-bit swap then 64-bit swap */
> - /* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
> - * to eliminate all the swapping
> - */
> - vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
> -
> /* vq command w3 */
> vq_cmd_w3.u64 = 0;
> vq_cmd_w3.s.grp = 0;
> --
> 2.7.4
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH 2/2] common/cpt: remove redundant code in datapath
2019-07-06 13:23 ` [dpdk-dev] [PATCH 2/2] common/cpt: remove redundant code in datapath Anoob Joseph
@ 2019-07-07 14:11 ` Anoob Joseph
0 siblings, 0 replies; 5+ messages in thread
From: Anoob Joseph @ 2019-07-07 14:11 UTC (permalink / raw)
To: Akhil Goyal, Pablo de Lara
Cc: Jerin Jacob Kollanukkaran, Narayana Prasad Raju Athreya, dev
Hi Akhil, Pablo
This patch is good to go if you don't have any comments.
Thanks,
Anoob
> -----Original Message-----
> From: Anoob Joseph <anoobj@marvell.com>
> Sent: Saturday, July 6, 2019 6:54 PM
> To: Akhil Goyal <akhil.goyal@nxp.com>; Pablo de Lara
> <pablo.de.lara.guarch@intel.com>
> Cc: Anoob Joseph <anoobj@marvell.com>; Jerin Jacob Kollanukkaran
> <jerinj@marvell.com>; Narayana Prasad Raju Athreya
> <pathreya@marvell.com>; dev@dpdk.org
> Subject: [PATCH 2/2] common/cpt: remove redundant code in datapath
>
> Removing redundant checks and unused local variables from datapath.
>
> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> ---
> drivers/common/cpt/cpt_ucode.h | 133 ++++++++++-------------------------------
> 1 file changed, 33 insertions(+), 100 deletions(-)
>
> diff --git a/drivers/common/cpt/cpt_ucode.h
> b/drivers/common/cpt/cpt_ucode.h index c589b58..e197e4e 100644
> --- a/drivers/common/cpt/cpt_ucode.h
> +++ b/drivers/common/cpt/cpt_ucode.h
> @@ -89,8 +89,7 @@ cpt_fc_ciph_validate_key_aes(uint16_t key_len) }
>
> static __rte_always_inline int
> -cpt_fc_ciph_validate_key(cipher_type_t type, struct cpt_ctx *cpt_ctx,
> - uint16_t key_len)
> +cpt_fc_ciph_set_type(cipher_type_t type, struct cpt_ctx *ctx, uint16_t
> +key_len)
> {
> int fc_type = 0;
> switch (type) {
> @@ -125,7 +124,7 @@ cpt_fc_ciph_validate_key(cipher_type_t type, struct
> cpt_ctx *cpt_ctx,
> if (unlikely(key_len != 16))
> return -1;
> /* No support for AEAD yet */
> - if (unlikely(cpt_ctx->hash_type))
> + if (unlikely(ctx->hash_type))
> return -1;
> fc_type = ZUC_SNOW3G;
> break;
> @@ -134,14 +133,16 @@ cpt_fc_ciph_validate_key(cipher_type_t type, struct
> cpt_ctx *cpt_ctx,
> if (unlikely(key_len != 16))
> return -1;
> /* No support for AEAD yet */
> - if (unlikely(cpt_ctx->hash_type))
> + if (unlikely(ctx->hash_type))
> return -1;
> fc_type = KASUMI;
> break;
> default:
> return -1;
> }
> - return fc_type;
> +
> + ctx->fc_type = fc_type;
> + return 0;
> }
>
> static __rte_always_inline void
> @@ -181,7 +182,6 @@ cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx
> *cpt_ctx, uint8_t *key,
> cpt_ctx->snow3g = 1;
> gen_key_snow3g(key, keyx);
> memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
> - cpt_ctx->fc_type = ZUC_SNOW3G;
> cpt_ctx->zsk_flags = 0;
> }
>
> @@ -192,7 +192,6 @@ cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx,
> uint8_t *key,
> cpt_ctx->snow3g = 0;
> memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
> memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
> - cpt_ctx->fc_type = ZUC_SNOW3G;
> cpt_ctx->zsk_flags = 0;
> }
>
> @@ -203,7 +202,6 @@ cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx
> *cpt_ctx, uint8_t *key,
> cpt_ctx->k_ecb = 1;
> memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
> cpt_ctx->zsk_flags = 0;
> - cpt_ctx->fc_type = KASUMI;
> }
>
> static __rte_always_inline void
> @@ -212,7 +210,6 @@ cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx
> *cpt_ctx, uint8_t *key, {
> memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
> cpt_ctx->zsk_flags = 0;
> - cpt_ctx->fc_type = KASUMI;
> }
>
> static __rte_always_inline int
> @@ -222,15 +219,13 @@ cpt_fc_ciph_set_key(void *ctx, cipher_type_t type,
> uint8_t *key,
> struct cpt_ctx *cpt_ctx = ctx;
> mc_fc_context_t *fctx = &cpt_ctx->fctx;
> uint64_t *ctrl_flags = NULL;
> - int fc_type;
> + int ret;
>
> - /* Validate key before proceeding */
> - fc_type = cpt_fc_ciph_validate_key(type, cpt_ctx, key_len);
> - if (unlikely(fc_type == -1))
> + ret = cpt_fc_ciph_set_type(type, cpt_ctx, key_len);
> + if (unlikely(ret))
> return -1;
>
> - if (fc_type == FC_GEN) {
> - cpt_ctx->fc_type = FC_GEN;
> + if (cpt_ctx->fc_type == FC_GEN) {
> ctrl_flags = (uint64_t *)&(fctx->enc.enc_ctrl.flags);
> *ctrl_flags = rte_be_to_cpu_64(*ctrl_flags);
> /*
> @@ -467,7 +462,6 @@ cpt_digest_gen_prep(uint32_t flags, {
> struct cpt_request_info *req;
> uint32_t size, i;
> - int32_t m_size;
> uint16_t data_len, mac_len, key_len;
> auth_type_t hash_type;
> buf_ptr_t *meta_p;
> @@ -488,7 +482,6 @@ cpt_digest_gen_prep(uint32_t flags,
>
> m_vaddr = meta_p->vaddr;
> m_dma = meta_p->dma_addr;
> - m_size = meta_p->size;
>
> /*
> * Save initial space that followed app data for completion code & @@ -
> 504,14 +497,12 @@ cpt_digest_gen_prep(uint32_t flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> req = m_vaddr;
>
> size = sizeof(struct cpt_request_info);
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> hash_type = ctx->hash_type;
> mac_len = ctx->mac_len;
> @@ -625,7 +616,6 @@ cpt_digest_gen_prep(uint32_t flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* cpt alternate completion address saved earlier */
> req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); @@ -678,13
> +668,11 @@ cpt_enc_hmac_prep(uint32_t flags,
> vq_cmd_word3_t vq_cmd_w3;
> void *c_vaddr;
> uint64_t c_dma;
> - int32_t m_size;
> opcode_info_t opcode;
>
> meta_p = &fc_params->meta_buf;
> m_vaddr = meta_p->vaddr;
> m_dma = meta_p->dma_addr;
> - m_size = meta_p->size;
>
> encr_offset = ENCR_OFFSET(d_offs);
> auth_offset = AUTH_OFFSET(d_offs);
> @@ -720,7 +708,6 @@ cpt_enc_hmac_prep(uint32_t flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* start cpt request info struct at 8 byte boundary */
> size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) - @@ -731,7 +718,6 @@
> cpt_enc_hmac_prep(uint32_t flags,
> size += sizeof(struct cpt_request_info);
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> if (hash_type == GMAC_TYPE)
> encr_data_len = 0;
> @@ -851,7 +837,6 @@ cpt_enc_hmac_prep(uint32_t flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> opcode.s.major |= CPT_DMA_MODE;
>
> @@ -1003,7 +988,6 @@ cpt_enc_hmac_prep(uint32_t flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* cpt alternate completion address saved earlier */
> req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); @@
> -1047,7 +1031,7 @@ cpt_dec_hmac_prep(uint32_t flags,
> uint32_t iv_offset = 0, size;
> int32_t inputlen, outputlen, enc_dlen, auth_dlen;
> struct cpt_ctx *cpt_ctx;
> - int32_t hash_type, mac_len, m_size;
> + int32_t hash_type, mac_len;
> uint8_t iv_len = 16;
> struct cpt_request_info *req;
> buf_ptr_t *meta_p, *aad_buf = NULL;
> @@ -1065,7 +1049,6 @@ cpt_dec_hmac_prep(uint32_t flags,
> meta_p = &fc_params->meta_buf;
> m_vaddr = meta_p->vaddr;
> m_dma = meta_p->dma_addr;
> - m_size = meta_p->size;
>
> encr_offset = ENCR_OFFSET(d_offs);
> auth_offset = AUTH_OFFSET(d_offs);
> @@ -1131,7 +1114,6 @@ cpt_dec_hmac_prep(uint32_t flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* start cpt request info structure at 8 byte alignment */
> size = (uint8_t *)RTE_PTR_ALIGN(m_vaddr, 8) - @@ -1142,7 +1124,6
> @@ cpt_dec_hmac_prep(uint32_t flags,
> size += sizeof(struct cpt_request_info);
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* Decryption */
> opcode.s.major = CPT_MAJOR_OP_FC;
> @@ -1229,7 +1210,6 @@ cpt_dec_hmac_prep(uint32_t flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> opcode.s.major |= CPT_DMA_MODE;
>
> @@ -1393,7 +1373,6 @@ cpt_dec_hmac_prep(uint32_t flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* cpt alternate completion address saved earlier */
> req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); @@
> -1444,7 +1423,7 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
> buf_ptr_t *buf_p;
> uint32_t encr_offset = 0, auth_offset = 0;
> uint32_t encr_data_len = 0, auth_data_len = 0;
> - int flags, iv_len = 16, m_size;
> + int flags, iv_len = 16;
> void *m_vaddr, *c_vaddr;
> uint64_t m_dma, c_dma, offset_ctrl;
> uint64_t *offset_vaddr, offset_dma;
> @@ -1456,7 +1435,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
> buf_p = ¶ms->meta_buf;
> m_vaddr = buf_p->vaddr;
> m_dma = buf_p->dma_addr;
> - m_size = buf_p->size;
>
> cpt_ctx = params->ctx_buf.vaddr;
> flags = cpt_ctx->zsk_flags;
> @@ -1478,7 +1456,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* Reserve memory for cpt request info */
> req = m_vaddr;
> @@ -1486,7 +1463,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
> size = sizeof(struct cpt_request_info);
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
>
> @@ -1616,7 +1592,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
>
> m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
> m_dma += OFF_CTRL_LEN + iv_len;
> - m_size -= OFF_CTRL_LEN + iv_len;
>
> opcode.s.major |= CPT_DMA_MODE;
>
> @@ -1726,7 +1701,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* cpt alternate completion address saved earlier */
> req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); @@
> -1774,7 +1748,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
> buf_ptr_t *buf_p;
> uint32_t encr_offset;
> uint32_t encr_data_len;
> - int flags, m_size;
> + int flags;
> void *m_vaddr, *c_vaddr;
> uint64_t m_dma, c_dma;
> uint64_t *offset_vaddr, offset_dma;
> @@ -1786,7 +1760,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
> buf_p = ¶ms->meta_buf;
> m_vaddr = buf_p->vaddr;
> m_dma = buf_p->dma_addr;
> - m_size = buf_p->size;
>
> /*
> * Microcode expects offsets in bytes
> @@ -1813,7 +1786,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* Reserve memory for cpt request info */
> req = m_vaddr;
> @@ -1821,7 +1793,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
> size = sizeof(struct cpt_request_info);
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> opcode.s.major = CPT_MAJOR_OP_ZUC_SNOW3G;
>
> @@ -1917,7 +1888,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
>
> m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
> m_dma += OFF_CTRL_LEN + iv_len;
> - m_size -= OFF_CTRL_LEN + iv_len;
>
> opcode.s.major |= CPT_DMA_MODE;
>
> @@ -2000,7 +1970,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* cpt alternate completion address saved earlier */
> req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); @@
> -2049,7 +2018,7 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
> buf_ptr_t *buf_p;
> uint32_t encr_offset, auth_offset;
> uint32_t encr_data_len, auth_data_len;
> - int flags, m_size;
> + int flags;
> uint8_t *iv_s, *iv_d, iv_len = 8;
> uint8_t dir = 0;
> void *m_vaddr, *c_vaddr;
> @@ -2067,7 +2036,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
> buf_p = ¶ms->meta_buf;
> m_vaddr = buf_p->vaddr;
> m_dma = buf_p->dma_addr;
> - m_size = buf_p->size;
>
> encr_offset = ENCR_OFFSET(d_offs) / 8;
> auth_offset = AUTH_OFFSET(d_offs) / 8; @@ -2100,7 +2068,6 @@
> cpt_kasumi_enc_prep(uint32_t req_flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* Reserve memory for cpt request info */
> req = m_vaddr;
> @@ -2108,7 +2075,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
> size = sizeof(struct cpt_request_info);
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
>
> @@ -2136,7 +2102,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
>
> m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
> m_dma += OFF_CTRL_LEN + iv_len;
> - m_size -= OFF_CTRL_LEN + iv_len;
>
> /* DPTR has SG list */
> in_buffer = m_vaddr;
> @@ -2253,7 +2218,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* cpt alternate completion address saved earlier */
> req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); @@ -2299,7
> +2263,7 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
> buf_ptr_t *buf_p;
> uint32_t encr_offset;
> uint32_t encr_data_len;
> - int flags, m_size;
> + int flags;
> uint8_t dir = 0;
> void *m_vaddr, *c_vaddr;
> uint64_t m_dma, c_dma;
> @@ -2316,7 +2280,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
> buf_p = ¶ms->meta_buf;
> m_vaddr = buf_p->vaddr;
> m_dma = buf_p->dma_addr;
> - m_size = buf_p->size;
>
> encr_offset = ENCR_OFFSET(d_offs) / 8;
> encr_data_len = ENCR_DLEN(d_lens);
> @@ -2338,7 +2301,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* Reserve memory for cpt request info */
> req = m_vaddr;
> @@ -2346,7 +2308,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
> size = sizeof(struct cpt_request_info);
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> opcode.s.major = CPT_MAJOR_OP_KASUMI | CPT_DMA_MODE;
>
> @@ -2373,7 +2334,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
>
> m_vaddr = (uint8_t *)m_vaddr + OFF_CTRL_LEN + iv_len;
> m_dma += OFF_CTRL_LEN + iv_len;
> - m_size -= OFF_CTRL_LEN + iv_len;
>
> /* DPTR has SG list */
> in_buffer = m_vaddr;
> @@ -2448,7 +2408,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
>
> m_vaddr = (uint8_t *)m_vaddr + size;
> m_dma += size;
> - m_size -= size;
>
> /* cpt alternate completion address saved earlier */
> req->alternate_caddr = (uint64_t *)((uint8_t *)c_vaddr - 8); @@ -2627,9
> +2586,9 @@ fill_sess_aead(struct rte_crypto_sym_xform *xform,
> cipher_type_t enc_type = 0; /* NULL Cipher type */
> auth_type_t auth_type = 0; /* NULL Auth type */
> uint32_t cipher_key_len = 0;
> - uint8_t zsk_flag = 0, aes_gcm = 0;
> + uint8_t aes_gcm = 0;
> aead_form = &xform->aead;
> - void *ctx;
> + void *ctx = SESS_PRIV(sess);
>
> if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
> aead_form->algo == RTE_CRYPTO_AEAD_AES_GCM) { @@ -2663,13
> +2622,12 @@ fill_sess_aead(struct rte_crypto_sym_xform *xform,
> (unsigned int long)aead_form->key.length);
> return -1;
> }
> - sess->zsk_flag = zsk_flag;
> + sess->zsk_flag = 0;
> sess->aes_gcm = aes_gcm;
> sess->mac_len = aead_form->digest_length;
> sess->iv_offset = aead_form->iv.offset;
> sess->iv_length = aead_form->iv.length;
> sess->aad_length = aead_form->aad_length;
> - ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
>
> cpt_fc_ciph_set_key(ctx, enc_type, aead_form->key.data,
> aead_form->key.length, NULL);
> @@ -2686,10 +2644,7 @@ fill_sess_cipher(struct rte_crypto_sym_xform
> *xform,
> struct rte_crypto_cipher_xform *c_form;
> cipher_type_t enc_type = 0; /* NULL Cipher type */
> uint32_t cipher_key_len = 0;
> - uint8_t zsk_flag = 0, aes_gcm = 0, aes_ctr = 0, is_null = 0;
> -
> - if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
> - return -1;
> + uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
>
> c_form = &xform->cipher;
>
> @@ -2771,7 +2726,7 @@ fill_sess_cipher(struct rte_crypto_sym_xform *xform,
> }
>
> sess->zsk_flag = zsk_flag;
> - sess->aes_gcm = aes_gcm;
> + sess->aes_gcm = 0;
> sess->aes_ctr = aes_ctr;
> sess->iv_offset = c_form->iv.offset;
> sess->iv_length = c_form->iv.length;
> @@ -2791,9 +2746,6 @@ fill_sess_auth(struct rte_crypto_sym_xform *xform,
> auth_type_t auth_type = 0; /* NULL Auth type */
> uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
>
> - if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
> - goto error_out;
> -
> a_form = &xform->auth;
>
> if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY) @@ -2865,11
> +2817,11 @@ fill_sess_auth(struct rte_crypto_sym_xform *xform,
> case RTE_CRYPTO_AUTH_AES_CBC_MAC:
> CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
> a_form->algo);
> - goto error_out;
> + return -1;
> default:
> CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
> a_form->algo);
> - goto error_out;
> + return -1;
> }
>
> sess->zsk_flag = zsk_flag;
> @@ -2884,9 +2836,6 @@ fill_sess_auth(struct rte_crypto_sym_xform *xform,
> a_form->key.length, a_form->digest_length);
>
> return 0;
> -
> -error_out:
> - return -1;
> }
>
> static __rte_always_inline int
> @@ -2896,11 +2845,7 @@ fill_sess_gmac(struct rte_crypto_sym_xform
> *xform,
> struct rte_crypto_auth_xform *a_form;
> cipher_type_t enc_type = 0; /* NULL Cipher type */
> auth_type_t auth_type = 0; /* NULL Auth type */
> - uint8_t zsk_flag = 0, aes_gcm = 0;
> - void *ctx;
> -
> - if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH)
> - return -1;
> + void *ctx = SESS_PRIV(sess);
>
> a_form = &xform->auth;
>
> @@ -2924,13 +2869,12 @@ fill_sess_gmac(struct rte_crypto_sym_xform
> *xform,
> return -1;
> }
>
> - sess->zsk_flag = zsk_flag;
> - sess->aes_gcm = aes_gcm;
> + sess->zsk_flag = 0;
> + sess->aes_gcm = 0;
> sess->is_gmac = 1;
> sess->iv_offset = a_form->iv.offset;
> sess->iv_length = a_form->iv.length;
> sess->mac_len = a_form->digest_length;
> - ctx = (void *)((uint8_t *)sess + sizeof(struct cpt_sess_misc)),
>
> cpt_fc_ciph_set_key(ctx, enc_type, a_form->key.data,
> a_form->key.length, NULL);
> @@ -3130,9 +3074,6 @@ fill_fc_params(struct rte_crypto_op *cop,
> uint64_t d_offs, d_lens;
> struct rte_mbuf *m_src, *m_dst;
> uint8_t cpt_op = sess_misc->cpt_op;
> - uint8_t zsk_flag = sess_misc->zsk_flag;
> - uint8_t aes_gcm = sess_misc->aes_gcm;
> - uint16_t mac_len = sess_misc->mac_len;
> #ifdef CPT_ALWAYS_USE_SG_MODE
> uint8_t inplace = 0;
> #else
> @@ -3158,21 +3099,17 @@ fill_fc_params(struct rte_crypto_op *cop,
> }
> }
>
> - if (zsk_flag) {
> + if (sess_misc->zsk_flag) {
> fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
> uint8_t *,
> sess_misc->auth_iv_offset);
> - if (zsk_flag == K_F9) {
> - CPT_LOG_DP_ERR("Should not reach here for "
> - "kasumi F9\n");
> - }
> - if (zsk_flag != ZS_EA)
> + if (sess_misc->zsk_flag != ZS_EA)
> inplace = 0;
> }
> m_src = sym_op->m_src;
> m_dst = sym_op->m_dst;
>
> - if (aes_gcm) {
> + if (sess_misc->aes_gcm) {
> uint8_t *salt;
> uint8_t *aad_data;
> uint16_t aad_len;
> @@ -3206,7 +3143,7 @@ fill_fc_params(struct rte_crypto_op *cop,
> sess_misc->salt = *(uint32_t *)salt;
> }
> fc_params.iv_buf = salt + 4;
> - if (likely(mac_len)) {
> + if (likely(sess_misc->mac_len)) {
> struct rte_mbuf *m = (cpt_op & CPT_OP_ENCODE) ?
> m_dst :
> m_src;
>
> @@ -3249,7 +3186,7 @@ fill_fc_params(struct rte_crypto_op *cop,
> }
> fc_params.iv_buf = salt + 4;
> }
> - if (likely(mac_len)) {
> + if (likely(sess_misc->mac_len)) {
> struct rte_mbuf *m;
>
> m = (cpt_op & CPT_OP_ENCODE) ? m_dst : m_src; @@
> -3308,7 +3245,6 @@ fill_fc_params(struct rte_crypto_op *cop,
> uint32_t pkt_len;
>
> /* Try to make room as much as src has */
> - m_dst = sym_op->m_dst;
> pkt_len = rte_pktmbuf_pkt_len(m_dst);
>
> if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
> @@ -3501,7 +3437,6 @@ fill_digest_params(struct rte_crypto_op *cop,
> uint64_t d_offs = 0, d_lens;
> struct rte_mbuf *m_src, *m_dst;
> uint16_t auth_op = sess->cpt_op & CPT_OP_AUTH_MASK;
> - uint8_t zsk_flag = sess->zsk_flag;
> uint16_t mac_len = sess->mac_len;
> fc_params_t params;
> char src[SRC_IOV_SIZE];
> @@ -3532,7 +3467,7 @@ fill_digest_params(struct rte_crypto_op *cop,
>
> flags = VALID_MAC_BUF;
> params.src_iov = (void *)src;
> - if (unlikely(zsk_flag)) {
> + if (unlikely(sess->zsk_flag)) {
> /*
> * Since for Zuc, Kasumi, Snow3g offsets are in bits
> * we will send pass through even for auth only case, @@ -
> 3542,10 +3477,9 @@ fill_digest_params(struct rte_crypto_op *cop,
> auth_range_off = 0;
> params.auth_iv_buf = rte_crypto_op_ctod_offset(cop,
> uint8_t *, sess->auth_iv_offset);
> - if (zsk_flag == K_F9) {
> + if (sess->zsk_flag == K_F9) {
> uint32_t length_in_bits, num_bytes;
> uint8_t *src, direction = 0;
> - uint32_t counter_num_bytes;
>
> memcpy(iv_buf, rte_pktmbuf_mtod(cop->sym->m_src,
> uint8_t *), 8);
> @@ -3555,10 +3489,9 @@ fill_digest_params(struct rte_crypto_op *cop,
> */
> length_in_bits = cop->sym->auth.data.length;
> num_bytes = (length_in_bits >> 3);
> - counter_num_bytes = num_bytes;
> src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
> find_kasumif9_direction_and_length(src,
> - counter_num_bytes,
> + num_bytes,
> &length_in_bits,
> &direction);
> length_in_bits -= 64;
> --
> 2.7.4
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] common/cpt: remove redundant bit swaps
2019-07-06 13:23 [dpdk-dev] [PATCH 1/2] common/cpt: remove redundant bit swaps Anoob Joseph
2019-07-06 13:23 ` [dpdk-dev] [PATCH 2/2] common/cpt: remove redundant code in datapath Anoob Joseph
2019-07-07 14:10 ` [dpdk-dev] [PATCH 1/2] common/cpt: remove redundant bit swaps Anoob Joseph
@ 2019-07-14 13:55 ` Thomas Monjalon
2 siblings, 0 replies; 5+ messages in thread
From: Thomas Monjalon @ 2019-07-14 13:55 UTC (permalink / raw)
To: Anoob Joseph
Cc: dev, Akhil Goyal, Pablo de Lara, Jerin Jacob, Narayana Prasad
06/07/2019 15:23, Anoob Joseph:
> The bit swaps can be removed by re-arranging the structure.
>
> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Series applied, thanks
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2019-07-14 13:55 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-06 13:23 [dpdk-dev] [PATCH 1/2] common/cpt: remove redundant bit swaps Anoob Joseph
2019-07-06 13:23 ` [dpdk-dev] [PATCH 2/2] common/cpt: remove redundant code in datapath Anoob Joseph
2019-07-07 14:11 ` Anoob Joseph
2019-07-07 14:10 ` [dpdk-dev] [PATCH 1/2] common/cpt: remove redundant bit swaps Anoob Joseph
2019-07-14 13:55 ` Thomas Monjalon
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).