* [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue
@ 2020-01-21 10:45 Gagandeep Singh
2020-01-21 10:45 ` [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: reorganize structure members Gagandeep Singh
2020-01-27 9:07 ` [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue Gagandeep Singh
0 siblings, 2 replies; 5+ messages in thread
From: Gagandeep Singh @ 2020-01-21 10:45 UTC (permalink / raw)
To: akhil.goyal, dev; +Cc: Gagandeep Singh
DPAA sec driver is using virtual to physical address
translation in its data path and driver is using
dpaax_iova_table_update() API in every address translation
which is very costly.
This patch moves dpaax_iova_table_update() calling to rte_dpaa_mem_ptov(),
only if it fails to found translation from DPAAX table.
Fixes: 033968bdc28 ("drivers/crypto/dpaa_sec: update DPAA iova table in dpaa_mem_vtop")
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/bus/dpaa/rte_dpaa_bus.h | 18 ++++-
drivers/crypto/dpaa_sec/dpaa_sec.c | 117 ++++++++++++-----------------
2 files changed, 63 insertions(+), 72 deletions(-)
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
index 9bf2cd9d6..373aca978 100644
--- a/drivers/bus/dpaa/rte_dpaa_bus.h
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -132,7 +132,23 @@ static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
}
/* If not, Fallback to full memseg list searching */
- return rte_mem_iova2virt(paddr);
+ va = rte_mem_iova2virt(paddr);
+
+ dpaax_iova_table_update(paddr, va, RTE_CACHE_LINE_SIZE);
+
+ return va;
+}
+
+static inline rte_iova_t
+rte_dpaa_mem_vtop(void *vaddr)
+{
+ const struct rte_memseg *ms;
+
+ ms = rte_mem_virt2memseg(vaddr, NULL);
+ if (ms)
+ return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+
+ return (size_t)NULL;
}
/**
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index e0b307cec..df684d265 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -94,31 +94,6 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
return ctx;
}
-static inline rte_iova_t
-dpaa_mem_vtop(void *vaddr)
-{
- const struct rte_memseg *ms;
-
- ms = rte_mem_virt2memseg(vaddr, NULL);
- if (ms) {
- dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
- return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
- }
- return (size_t)NULL;
-}
-
-static inline void *
-dpaa_mem_ptov(rte_iova_t paddr)
-{
- void *va;
-
- va = (void *)dpaax_iova_table_get_va(paddr);
- if (likely(va))
- return va;
-
- return rte_mem_iova2virt(paddr);
-}
-
static void
ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
struct qman_fq *fq,
@@ -183,7 +158,7 @@ dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
@@ -304,12 +279,12 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
cipherdata.key =
- (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
+ (size_t)rte_dpaa_mem_vtop((void *)(size_t)cipherdata.key);
cipherdata.key_type = RTA_DATA_PTR;
}
if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
authdata.key =
- (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
+ (size_t)rte_dpaa_mem_vtop((void *)(size_t)authdata.key);
authdata.key_type = RTA_DATA_PTR;
}
@@ -405,14 +380,14 @@ dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
if (cdb->sh_desc[2] & 1)
cipherdata.key_type = RTA_DATA_IMM;
else {
- cipherdata.key = (size_t)dpaa_mem_vtop(
+ cipherdata.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)cipherdata.key);
cipherdata.key_type = RTA_DATA_PTR;
}
if (cdb->sh_desc[2] & (1<<1))
authdata.key_type = RTA_DATA_IMM;
else {
- authdata.key = (size_t)dpaa_mem_vtop(
+ authdata.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)authdata.key);
authdata.key_type = RTA_DATA_PTR;
}
@@ -591,14 +566,14 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
if (cdb->sh_desc[2] & 1)
alginfo_c.key_type = RTA_DATA_IMM;
else {
- alginfo_c.key = (size_t)dpaa_mem_vtop(
+ alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)alginfo_c.key);
alginfo_c.key_type = RTA_DATA_PTR;
}
if (cdb->sh_desc[2] & (1<<1))
alginfo_a.key_type = RTA_DATA_IMM;
else {
- alginfo_a.key = (size_t)dpaa_mem_vtop(
+ alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)alginfo_a.key);
alginfo_a.key_type = RTA_DATA_PTR;
}
@@ -674,7 +649,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
@@ -768,7 +743,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->extension = 1;
in_sg->final = 1;
in_sg->length = data_len;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
/* 1st seg */
sg = in_sg + 1;
@@ -788,7 +763,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
} else {
sg->length = ses->iv.length;
}
- qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
in_sg->length += sg->length;
cpu_to_hw_sg(sg);
sg++;
@@ -821,7 +796,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
rte_memcpy(old_digest, sym->auth.digest.data,
ses->digest_length);
- start_addr = dpaa_mem_vtop(old_digest);
+ start_addr = rte_dpaa_mem_vtop(old_digest);
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
in_sg->length += ses->digest_length;
@@ -888,7 +863,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->extension = 1;
in_sg->final = 1;
in_sg->length = data_len;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
sg = &cf->sg[2];
if (ses->iv.length) {
@@ -906,7 +881,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
} else {
sg->length = ses->iv.length;
}
- qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
in_sg->length += sg->length;
cpu_to_hw_sg(sg);
sg++;
@@ -923,7 +898,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
rte_memcpy(old_digest, sym->auth.digest.data,
ses->digest_length);
/* let's check digest by hw */
- start_addr = dpaa_mem_vtop(old_digest);
+ start_addr = rte_dpaa_mem_vtop(old_digest);
sg++;
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
@@ -987,7 +962,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
out_sg = &cf->sg[0];
out_sg->extension = 1;
out_sg->length = data_len;
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -1016,11 +991,11 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->length = data_len + ses->iv.length;
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* IV */
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
@@ -1098,11 +1073,11 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg->extension = 1;
sg->final = 1;
sg->length = data_len + ses->iv.length;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
@@ -1163,7 +1138,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -1206,18 +1181,18 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
/* 2nd seg auth only */
if (ses->auth_only_len) {
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
sg->length = ses->auth_only_len;
cpu_to_hw_sg(sg);
}
@@ -1243,7 +1218,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
memcpy(ctx->digest, sym->aead.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
@@ -1281,9 +1256,9 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
if (is_encode(ses)) {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1291,7 +1266,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
if (ses->auth_only_len) {
qm_sg_entry_set64(sg,
- dpaa_mem_vtop(sym->aead.aad.data));
+ rte_dpaa_mem_vtop(sym->aead.aad.data));
sg->length = ses->auth_only_len;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1303,7 +1278,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg->final = 1;
cpu_to_hw_sg(sg);
} else {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1311,7 +1286,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
if (ses->auth_only_len) {
qm_sg_entry_set64(sg,
- dpaa_mem_vtop(sym->aead.aad.data));
+ rte_dpaa_mem_vtop(sym->aead.aad.data));
sg->length = ses->auth_only_len;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1326,7 +1301,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -1340,7 +1315,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg,
dst_start_addr + sym->aead.data.offset);
sg->length = sym->aead.data.length;
@@ -1409,7 +1384,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -1451,11 +1426,11 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
@@ -1481,7 +1456,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
memcpy(ctx->digest, sym->auth.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
@@ -1518,9 +1493,9 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
if (is_encode(ses)) {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1532,7 +1507,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg->final = 1;
cpu_to_hw_sg(sg);
} else {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1548,7 +1523,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -1562,7 +1537,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
sg->length = sym->cipher.data.length;
length = sg->length;
@@ -1656,7 +1631,7 @@ build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
out_sg = &cf->sg[0];
out_sg->extension = 1;
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
/* 1st seg */
sg = &cf->sg[2];
@@ -1689,7 +1664,7 @@ build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_len = mbuf->data_len;
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
/* 1st seg */
qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
@@ -1884,7 +1859,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
fd->opaque_addr = 0;
fd->cmd = 0;
- qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
+ qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
fd->_format1 = qm_fd_compound;
fd->length29 = 2 * sizeof(struct qm_sg_entry);
@@ -2349,7 +2324,7 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
}
}
ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
- dpaa_mem_vtop(&sess->cdb),
+ rte_dpaa_mem_vtop(&sess->cdb),
qman_fq_fqid(&qp->outq));
if (ret)
DPAA_SEC_ERR("Unable to init sec queue");
@@ -3149,7 +3124,7 @@ dpaa_sec_process_parallel_event(void *event,
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
@@ -3204,7 +3179,7 @@ dpaa_sec_process_atomic_event(void *event,
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
--
2.17.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: reorganize structure members
2020-01-21 10:45 [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue Gagandeep Singh
@ 2020-01-21 10:45 ` Gagandeep Singh
2020-01-27 9:07 ` [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue Gagandeep Singh
1 sibling, 0 replies; 5+ messages in thread
From: Gagandeep Singh @ 2020-01-21 10:45 UTC (permalink / raw)
To: akhil.goyal, dev; +Cc: Gagandeep Singh
This patch reorganize the members of a structure
used by driver in its data-path to improve
performance.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/crypto/dpaa_sec/dpaa_sec.h | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 3ecc7eae5..2cd50cc8d 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016 NXP
+ * Copyright 2016-2020 NXP
*
*/
@@ -117,6 +117,9 @@ struct sec_pdcp_ctxt {
};
#endif
typedef struct dpaa_sec_session_entry {
+ struct sec_cdb cdb; /**< cmd block associated with qp */
+ struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
+ struct qman_fq *inq[MAX_DPAA_CORES];
uint8_t dir; /*!< Operation Direction */
uint8_t ctxt; /*!< Session Context Type */
enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
@@ -169,9 +172,6 @@ typedef struct dpaa_sec_session_entry {
struct sec_pdcp_ctxt pdcp;
#endif
};
- struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
- struct qman_fq *inq[MAX_DPAA_CORES];
- struct sec_cdb cdb; /**< cmd block associated with qp */
} dpaa_sec_session;
struct dpaa_sec_qp {
--
2.17.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue
2020-01-21 10:45 [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue Gagandeep Singh
2020-01-21 10:45 ` [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: reorganize structure members Gagandeep Singh
@ 2020-01-27 9:07 ` Gagandeep Singh
2020-01-27 9:07 ` [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: reorganize structure members Gagandeep Singh
2020-01-28 6:16 ` [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue Akhil Goyal
1 sibling, 2 replies; 5+ messages in thread
From: Gagandeep Singh @ 2020-01-27 9:07 UTC (permalink / raw)
To: dev, akhil.goyal; +Cc: Gagandeep Singh
DPAA sec driver is using virtual to physical address
translation in its data path and driver is using
dpaax_iova_table_update() API in every address translation
which is very costly.
This patch moves dpaax_iova_table_update() calling to rte_dpaa_mem_ptov(),
only if it fails to found translation from DPAAX table.
Fixes: 033968bdc28 ("drivers/crypto/dpaa_sec: update DPAA iova table in dpaa_mem_vtop")
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/bus/dpaa/rte_dpaa_bus.h | 18 ++++-
drivers/crypto/dpaa_sec/dpaa_sec.c | 117 +++++++++++-----------------
drivers/crypto/dpaa_sec/meson.build | 2 +-
3 files changed, 64 insertions(+), 73 deletions(-)
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
index 9bf2cd9d6..373aca978 100644
--- a/drivers/bus/dpaa/rte_dpaa_bus.h
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -132,7 +132,23 @@ static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
}
/* If not, Fallback to full memseg list searching */
- return rte_mem_iova2virt(paddr);
+ va = rte_mem_iova2virt(paddr);
+
+ dpaax_iova_table_update(paddr, va, RTE_CACHE_LINE_SIZE);
+
+ return va;
+}
+
+static inline rte_iova_t
+rte_dpaa_mem_vtop(void *vaddr)
+{
+ const struct rte_memseg *ms;
+
+ ms = rte_mem_virt2memseg(vaddr, NULL);
+ if (ms)
+ return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+
+ return (size_t)NULL;
}
/**
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index e0b307cec..df684d265 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -94,31 +94,6 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
return ctx;
}
-static inline rte_iova_t
-dpaa_mem_vtop(void *vaddr)
-{
- const struct rte_memseg *ms;
-
- ms = rte_mem_virt2memseg(vaddr, NULL);
- if (ms) {
- dpaax_iova_table_update(ms->iova, ms->addr, ms->len);
- return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
- }
- return (size_t)NULL;
-}
-
-static inline void *
-dpaa_mem_ptov(rte_iova_t paddr)
-{
- void *va;
-
- va = (void *)dpaax_iova_table_get_va(paddr);
- if (likely(va))
- return va;
-
- return rte_mem_iova2virt(paddr);
-}
-
static void
ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
struct qman_fq *fq,
@@ -183,7 +158,7 @@ dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
@@ -304,12 +279,12 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
cipherdata.key =
- (size_t)dpaa_mem_vtop((void *)(size_t)cipherdata.key);
+ (size_t)rte_dpaa_mem_vtop((void *)(size_t)cipherdata.key);
cipherdata.key_type = RTA_DATA_PTR;
}
if (!(cdb->sh_desc[2] & (1 << 1)) && authdata.keylen) {
authdata.key =
- (size_t)dpaa_mem_vtop((void *)(size_t)authdata.key);
+ (size_t)rte_dpaa_mem_vtop((void *)(size_t)authdata.key);
authdata.key_type = RTA_DATA_PTR;
}
@@ -405,14 +380,14 @@ dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
if (cdb->sh_desc[2] & 1)
cipherdata.key_type = RTA_DATA_IMM;
else {
- cipherdata.key = (size_t)dpaa_mem_vtop(
+ cipherdata.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)cipherdata.key);
cipherdata.key_type = RTA_DATA_PTR;
}
if (cdb->sh_desc[2] & (1<<1))
authdata.key_type = RTA_DATA_IMM;
else {
- authdata.key = (size_t)dpaa_mem_vtop(
+ authdata.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)authdata.key);
authdata.key_type = RTA_DATA_PTR;
}
@@ -591,14 +566,14 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
if (cdb->sh_desc[2] & 1)
alginfo_c.key_type = RTA_DATA_IMM;
else {
- alginfo_c.key = (size_t)dpaa_mem_vtop(
+ alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)alginfo_c.key);
alginfo_c.key_type = RTA_DATA_PTR;
}
if (cdb->sh_desc[2] & (1<<1))
alginfo_a.key_type = RTA_DATA_IMM;
else {
- alginfo_a.key = (size_t)dpaa_mem_vtop(
+ alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
(void *)(size_t)alginfo_a.key);
alginfo_a.key_type = RTA_DATA_PTR;
}
@@ -674,7 +649,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
@@ -768,7 +743,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->extension = 1;
in_sg->final = 1;
in_sg->length = data_len;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
/* 1st seg */
sg = in_sg + 1;
@@ -788,7 +763,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
} else {
sg->length = ses->iv.length;
}
- qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
in_sg->length += sg->length;
cpu_to_hw_sg(sg);
sg++;
@@ -821,7 +796,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
rte_memcpy(old_digest, sym->auth.digest.data,
ses->digest_length);
- start_addr = dpaa_mem_vtop(old_digest);
+ start_addr = rte_dpaa_mem_vtop(old_digest);
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
in_sg->length += ses->digest_length;
@@ -888,7 +863,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->extension = 1;
in_sg->final = 1;
in_sg->length = data_len;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
sg = &cf->sg[2];
if (ses->iv.length) {
@@ -906,7 +881,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
} else {
sg->length = ses->iv.length;
}
- qm_sg_entry_set64(sg, dpaa_mem_vtop(iv_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
in_sg->length += sg->length;
cpu_to_hw_sg(sg);
sg++;
@@ -923,7 +898,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
rte_memcpy(old_digest, sym->auth.digest.data,
ses->digest_length);
/* let's check digest by hw */
- start_addr = dpaa_mem_vtop(old_digest);
+ start_addr = rte_dpaa_mem_vtop(old_digest);
sg++;
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
@@ -987,7 +962,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
out_sg = &cf->sg[0];
out_sg->extension = 1;
out_sg->length = data_len;
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -1016,11 +991,11 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->length = data_len + ses->iv.length;
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* IV */
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
@@ -1098,11 +1073,11 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg->extension = 1;
sg->final = 1;
sg->length = data_len + ses->iv.length;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
@@ -1163,7 +1138,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -1206,18 +1181,18 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
/* 2nd seg auth only */
if (ses->auth_only_len) {
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
sg->length = ses->auth_only_len;
cpu_to_hw_sg(sg);
}
@@ -1243,7 +1218,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
memcpy(ctx->digest, sym->aead.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
@@ -1281,9 +1256,9 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
if (is_encode(ses)) {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1291,7 +1266,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
if (ses->auth_only_len) {
qm_sg_entry_set64(sg,
- dpaa_mem_vtop(sym->aead.aad.data));
+ rte_dpaa_mem_vtop(sym->aead.aad.data));
sg->length = ses->auth_only_len;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1303,7 +1278,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg->final = 1;
cpu_to_hw_sg(sg);
} else {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1311,7 +1286,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
if (ses->auth_only_len) {
qm_sg_entry_set64(sg,
- dpaa_mem_vtop(sym->aead.aad.data));
+ rte_dpaa_mem_vtop(sym->aead.aad.data));
sg->length = ses->auth_only_len;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1326,7 +1301,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -1340,7 +1315,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg,
dst_start_addr + sym->aead.data.offset);
sg->length = sym->aead.data.length;
@@ -1409,7 +1384,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -1451,11 +1426,11 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
cpu_to_hw_sg(sg);
@@ -1481,7 +1456,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
memcpy(ctx->digest, sym->auth.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
@@ -1518,9 +1493,9 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
if (is_encode(ses)) {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1532,7 +1507,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg->final = 1;
cpu_to_hw_sg(sg);
} else {
- qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
length += sg->length;
cpu_to_hw_sg(sg);
@@ -1548,7 +1523,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -1562,7 +1537,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
sg->length = sym->cipher.data.length;
length = sg->length;
@@ -1656,7 +1631,7 @@ build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
out_sg = &cf->sg[0];
out_sg->extension = 1;
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
+ qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
/* 1st seg */
sg = &cf->sg[2];
@@ -1689,7 +1664,7 @@ build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_len = mbuf->data_len;
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
/* 1st seg */
qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
@@ -1884,7 +1859,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
fd->opaque_addr = 0;
fd->cmd = 0;
- qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
+ qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
fd->_format1 = qm_fd_compound;
fd->length29 = 2 * sizeof(struct qm_sg_entry);
@@ -2349,7 +2324,7 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
}
}
ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
- dpaa_mem_vtop(&sess->cdb),
+ rte_dpaa_mem_vtop(&sess->cdb),
qman_fq_fqid(&qp->outq));
if (ret)
DPAA_SEC_ERR("Unable to init sec queue");
@@ -3149,7 +3124,7 @@ dpaa_sec_process_parallel_event(void *event,
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
@@ -3204,7 +3179,7 @@ dpaa_sec_process_atomic_event(void *event,
* sg[0] is for output
* sg[1] for input
*/
- job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
ctx = container_of(job, struct dpaa_sec_op_ctx, job);
ctx->fd_status = fd->status;
diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build
index 9f17d3a43..71de81940 100644
--- a/drivers/crypto/dpaa_sec/meson.build
+++ b/drivers/crypto/dpaa_sec/meson.build
@@ -6,7 +6,7 @@ if not is_linux
reason = 'only supported on linux'
endif
-deps += ['bus_dpaa', 'security']
+deps += ['bus_dpaa', 'mempool_dpaa', 'security']
sources = files('dpaa_sec.c')
allow_experimental_apis = true
--
2.17.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: reorganize structure members
2020-01-27 9:07 ` [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue Gagandeep Singh
@ 2020-01-27 9:07 ` Gagandeep Singh
2020-01-28 6:16 ` [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue Akhil Goyal
1 sibling, 0 replies; 5+ messages in thread
From: Gagandeep Singh @ 2020-01-27 9:07 UTC (permalink / raw)
To: dev, akhil.goyal; +Cc: Gagandeep Singh
This patch reorganize the members of a structure
used by driver in its data-path to improve
performance.
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/crypto/dpaa_sec/dpaa_sec.h | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 3ecc7eae5..2cd50cc8d 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016 NXP
+ * Copyright 2016-2020 NXP
*
*/
@@ -117,6 +117,9 @@ struct sec_pdcp_ctxt {
};
#endif
typedef struct dpaa_sec_session_entry {
+ struct sec_cdb cdb; /**< cmd block associated with qp */
+ struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
+ struct qman_fq *inq[MAX_DPAA_CORES];
uint8_t dir; /*!< Operation Direction */
uint8_t ctxt; /*!< Session Context Type */
enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
@@ -169,9 +172,6 @@ typedef struct dpaa_sec_session_entry {
struct sec_pdcp_ctxt pdcp;
#endif
};
- struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
- struct qman_fq *inq[MAX_DPAA_CORES];
- struct sec_cdb cdb; /**< cmd block associated with qp */
} dpaa_sec_session;
struct dpaa_sec_qp {
--
2.17.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue
2020-01-27 9:07 ` [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue Gagandeep Singh
2020-01-27 9:07 ` [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: reorganize structure members Gagandeep Singh
@ 2020-01-28 6:16 ` Akhil Goyal
1 sibling, 0 replies; 5+ messages in thread
From: Akhil Goyal @ 2020-01-28 6:16 UTC (permalink / raw)
To: Gagandeep Singh, dev; +Cc: Gagandeep Singh
>
> DPAA sec driver is using virtual to physical address
> translation in its data path and driver is using
> dpaax_iova_table_update() API in every address translation
> which is very costly.
> This patch moves dpaax_iova_table_update() calling to rte_dpaa_mem_ptov(),
> only if it fails to found translation from DPAAX table.
>
> Fixes: 033968bdc28 ("drivers/crypto/dpaa_sec: update DPAA iova table in
> dpaa_mem_vtop")
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> ---
Series
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
From next time, please update the version of the updated patch in title
And specify change log after '---'
Series applied to dpdk-next-crypto
Thanks.
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2020-01-28 6:16 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-21 10:45 [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue Gagandeep Singh
2020-01-21 10:45 ` [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: reorganize structure members Gagandeep Singh
2020-01-27 9:07 ` [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue Gagandeep Singh
2020-01-27 9:07 ` [dpdk-dev] [PATCH 2/2] crypto/dpaa_sec: reorganize structure members Gagandeep Singh
2020-01-28 6:16 ` [dpdk-dev] [PATCH 1/2] crypto/dpaa_sec: fix performance issue Akhil Goyal
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).