From: Anoob Joseph <anoobj@marvell.com>
To: Akhil Goyal <gakhil@marvell.com>, Jerin Jacob <jerinj@marvell.com>
Cc: Archana Muniganti <marchana@marvell.com>,
Tejasree Kondoj <ktejasree@marvell.com>, <dev@dpdk.org>
Subject: [PATCH v2 06/18] crypto/cnxk: add separate datapath for pdcp cipher operation
Date: Tue, 9 Aug 2022 16:23:44 +0530 [thread overview]
Message-ID: <20220809105356.561-7-anoobj@marvell.com> (raw)
In-Reply-To: <20220809105356.561-1-anoobj@marvell.com>
Add separate datapath for PDCP opcode performing cipher operation.
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 19 ---
drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 27 +---
drivers/crypto/cnxk/cnxk_se.h | 177 +++++++++++++++++++---
3 files changed, 158 insertions(+), 65 deletions(-)
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index bfa6374005..1b70d02e2a 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -77,25 +77,6 @@ cpt_sec_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
return ret;
}
-static __rte_always_inline int __rte_hot
-cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
- struct cnxk_se_sess *sess, struct cpt_inflight_req *infl_req,
- struct cpt_inst_s *inst)
-{
- uint64_t cpt_op;
- int ret;
-
- cpt_op = sess->cpt_op;
-
- if (cpt_op & ROC_SE_OP_CIPHER_MASK)
- ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
- else
- ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
- inst);
-
- return ret;
-}
-
static inline int
cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
struct cpt_inst_s inst[], struct cpt_inflight_req *infl_req)
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 2182c1bd2f..3d69723809 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -17,27 +17,6 @@
#include "cnxk_cryptodev_ops.h"
#include "cnxk_se.h"
-static __rte_always_inline int __rte_hot
-cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
- struct cnxk_se_sess *sess,
- struct cpt_inflight_req *infl_req,
- struct cpt_inst_s *inst)
-{
- uint64_t cpt_op;
- int ret;
-
- cpt_op = sess->cpt_op;
-
- if (sess->roc_se_ctx.fc_type == ROC_SE_PDCP_CHAIN)
- ret = fill_pdcp_chain_params(op, sess, &qp->meta_info, infl_req, inst);
- else if (cpt_op & ROC_SE_OP_CIPHER_MASK)
- ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
- else
- ret = fill_digest_params(op, sess, &qp->meta_info, infl_req, inst);
-
- return ret;
-}
-
static __rte_always_inline int __rte_hot
cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
struct cpt_inflight_req *infl_req,
@@ -118,8 +97,7 @@ cn9k_cpt_inst_prep(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
sym_op = op->sym;
sess = get_sym_session_private_data(
sym_op->session, cn9k_cryptodev_driver_id);
- ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
- inst);
+ ret = cpt_sym_inst_fill(qp, op, sess, infl_req, inst);
inst->w7.u64 = sess->cpt_inst_w7;
} else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
ret = cn9k_cpt_sec_inst_fill(op, infl_req, inst);
@@ -130,8 +108,7 @@ cn9k_cpt_inst_prep(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
return -1;
}
- ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
- inst);
+ ret = cpt_sym_inst_fill(qp, op, sess, infl_req, inst);
if (unlikely(ret)) {
sym_session_clear(cn9k_cryptodev_driver_id,
op->sym->session);
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index 2b477284c0..35d074ea34 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -1865,8 +1865,6 @@ cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
if (likely(fc_type == ROC_SE_FC_GEN)) {
ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
- } else if (fc_type == ROC_SE_PDCP) {
- ret = cpt_pdcp_alg_prep(flags, d_offs, d_lens, fc_params, inst);
} else if (fc_type == ROC_SE_KASUMI) {
ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
}
@@ -2400,8 +2398,8 @@ prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
static __rte_always_inline int
fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
- struct cpt_qp_meta_info *m_info,
- struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
+ struct cpt_qp_meta_info *m_info, struct cpt_inflight_req *infl_req,
+ struct cpt_inst_s *inst, const bool is_kasumi)
{
struct roc_se_ctx *ctx = &sess->roc_se_ctx;
uint8_t op_minor = ctx->template_w4.s.opcode_minor;
@@ -2424,7 +2422,9 @@ fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
int ret;
fc_params.cipher_iv_len = sess->iv_length;
- fc_params.auth_iv_len = sess->auth_iv_length;
+ fc_params.auth_iv_len = 0;
+ fc_params.auth_iv_buf = NULL;
+ fc_params.iv_buf = NULL;
if (likely(sess->iv_length)) {
flags |= ROC_SE_VALID_IV_BUF;
@@ -2440,13 +2440,15 @@ fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
}
}
- if (sess->zsk_flag) {
+ /* Kasumi would need auth IV */
+ if (is_kasumi && sess->zsk_flag) {
+ fc_params.auth_iv_len = sess->auth_iv_length;
if (sess->auth_iv_length)
fc_params.auth_iv_buf =
rte_crypto_op_ctod_offset(cop, uint8_t *, sess->auth_iv_offset);
- if (sess->zsk_flag != ROC_SE_ZS_EA)
- inplace = 0;
+ inplace = 0;
}
+
m_src = sym_op->m_src;
m_dst = sym_op->m_dst;
@@ -2508,14 +2510,6 @@ fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
d_lens = ci_data_length;
d_lens = (d_lens << 32) | a_data_length;
- if (sess->auth_first)
- mc_hash_off = a_data_offset + a_data_length;
- else
- mc_hash_off = ci_data_offset + ci_data_length;
-
- if (mc_hash_off < (a_data_offset + a_data_length)) {
- mc_hash_off = (a_data_offset + a_data_length);
- }
/* for gmac, salt should be updated like in gcm */
if (unlikely(sess->is_gmac)) {
uint8_t *salt;
@@ -2529,6 +2523,14 @@ fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
if (likely(sess->mac_len)) {
struct rte_mbuf *m = cpt_m_dst_get(cpt_op, m_src, m_dst);
+ if (sess->auth_first)
+ mc_hash_off = a_data_offset + a_data_length;
+ else
+ mc_hash_off = ci_data_offset + ci_data_length;
+
+ if (mc_hash_off < (a_data_offset + a_data_length))
+ mc_hash_off = (a_data_offset + a_data_length);
+
/* hmac immediately following data is best case */
if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
(unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
@@ -2599,11 +2601,8 @@ fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
}
if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
- (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
- ((ctx->fc_type != ROC_SE_KASUMI) &&
- (ctx->fc_type != ROC_SE_HASH_HMAC))))) {
- mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
- m_info->pool, infl_req);
+ (flags & ROC_SE_SINGLE_BUF_HEADROOM) && !is_kasumi))) {
+ mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen, m_info->pool, infl_req);
if (mdata == NULL) {
plt_dp_err("Error allocating meta buffer for request");
return -ENOMEM;
@@ -2632,6 +2631,112 @@ fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
return ret;
}
+static __rte_always_inline int
+fill_pdcp_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
+ struct cpt_qp_meta_info *m_info, struct cpt_inflight_req *infl_req,
+ struct cpt_inst_s *inst)
+{
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct roc_se_fc_params fc_params;
+ uint32_t c_data_len, c_data_off;
+ struct rte_mbuf *m_src, *m_dst;
+ uint64_t d_offs, d_lens;
+ char src[SRC_IOV_SIZE];
+ char dst[SRC_IOV_SIZE];
+ void *mdata = NULL;
+ uint32_t flags = 0;
+ int ret;
+
+ /* Cipher only */
+
+ fc_params.cipher_iv_len = sess->iv_length;
+ fc_params.auth_iv_len = 0;
+ fc_params.iv_buf = NULL;
+ fc_params.auth_iv_buf = NULL;
+
+ if (likely(sess->iv_length))
+ fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *, sess->iv_offset);
+
+ m_src = sym_op->m_src;
+ m_dst = sym_op->m_dst;
+
+ c_data_len = sym_op->cipher.data.length;
+ c_data_off = sym_op->cipher.data.offset;
+
+ d_offs = (uint64_t)c_data_off << 16;
+ d_lens = (uint64_t)c_data_len << 32;
+
+ fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
+
+ if (likely(m_dst == NULL || m_src == m_dst)) {
+ fc_params.dst_iov = fc_params.src_iov = (void *)src;
+ prepare_iov_from_pkt_inplace(m_src, &fc_params, &flags);
+ } else {
+ /* Out of place processing */
+ fc_params.src_iov = (void *)src;
+ fc_params.dst_iov = (void *)dst;
+
+ /* Store SG I/O in the api for reuse */
+ if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
+ plt_dp_err("Prepare src iov failed");
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ if (unlikely(m_dst != NULL)) {
+ uint32_t pkt_len;
+
+ /* Try to make room as much as src has */
+ pkt_len = rte_pktmbuf_pkt_len(m_dst);
+
+ if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
+ pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
+ if (!rte_pktmbuf_append(m_dst, pkt_len)) {
+ plt_dp_err("Not enough space in "
+ "m_dst %p, need %u"
+ " more",
+ m_dst, pkt_len);
+ ret = -EINVAL;
+ goto err_exit;
+ }
+ }
+
+ if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
+ plt_dp_err("Prepare dst iov failed for "
+ "m_dst %p",
+ m_dst);
+ ret = -EINVAL;
+ goto err_exit;
+ }
+ } else {
+ fc_params.dst_iov = (void *)src;
+ }
+ }
+
+ if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
+ (flags & ROC_SE_SINGLE_BUF_HEADROOM)))) {
+ mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen, m_info->pool, infl_req);
+ if (mdata == NULL) {
+ plt_dp_err("Could not allocate meta buffer");
+ return -ENOMEM;
+ }
+ }
+
+ ret = cpt_pdcp_alg_prep(flags, d_offs, d_lens, &fc_params, inst);
+ if (unlikely(ret)) {
+ plt_dp_err("Could not prepare instruction");
+ goto free_mdata_and_exit;
+ }
+
+ return 0;
+
+free_mdata_and_exit:
+ if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
+ rte_mempool_put(m_info->pool, infl_req->mdata);
+err_exit:
+ return ret;
+}
+
static __rte_always_inline int
fill_pdcp_chain_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
struct cpt_qp_meta_info *m_info, struct cpt_inflight_req *infl_req,
@@ -2974,4 +3079,34 @@ fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
err_exit:
return ret;
}
+
+static __rte_always_inline int __rte_hot
+cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op, struct cnxk_se_sess *sess,
+ struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
+{
+ uint64_t cpt_op = sess->cpt_op;
+ int ret;
+
+ if (cpt_op & ROC_SE_OP_CIPHER_MASK) {
+ switch (sess->roc_se_ctx.fc_type) {
+ case ROC_SE_PDCP_CHAIN:
+ ret = fill_pdcp_chain_params(op, sess, &qp->meta_info, infl_req, inst);
+ break;
+ case ROC_SE_PDCP:
+ ret = fill_pdcp_params(op, sess, &qp->meta_info, infl_req, inst);
+ break;
+ case ROC_SE_KASUMI:
+ ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst, true);
+ break;
+ default:
+ ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst, false);
+ break;
+ }
+ } else {
+ ret = fill_digest_params(op, sess, &qp->meta_info, infl_req, inst);
+ }
+
+ return ret;
+}
+
#endif /*_CNXK_SE_H_ */
--
2.25.1
next prev parent reply other threads:[~2022-08-09 10:54 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-08 8:05 [PATCH 00/18] Fixes and improvements in cnxk crypto PMDs Anoob Joseph
2022-08-08 8:05 ` [PATCH 01/18] crypto/cnxk: add AES-CCM support Anoob Joseph
2022-08-08 8:05 ` [PATCH 02/18] crypto/cnxk: add burst enqueue for event crypto Anoob Joseph
2022-08-08 8:05 ` [PATCH 03/18] crypto/cnxk: remove zero IV Anoob Joseph
2022-08-08 8:05 ` [PATCH 04/18] crypto/cnxk: limit the meta buf cache to 128 Anoob Joseph
2022-08-08 8:05 ` [PATCH 05/18] crypto/cnxk: add separate path for pdcp chain opcode Anoob Joseph
2022-08-08 8:05 ` [PATCH 06/18] crypto/cnxk: add separate datapath for pdcp cipher operation Anoob Joseph
2022-08-08 8:05 ` [PATCH 07/18] crypto/cnxk: remove MAC len check for AEAD Anoob Joseph
2022-08-08 8:05 ` [PATCH 08/18] crypto/cnxk: fix endianness in anti-replay Anoob Joseph
2022-08-08 8:05 ` [PATCH 09/18] crypto/cnxk: remove extra indirection for FC and Kasumi Anoob Joseph
2022-08-08 8:05 ` [PATCH 10/18] crypto/cnxk: remove extra digest len check Anoob Joseph
2022-08-08 8:05 ` [PATCH 11/18] crypto/cnxk: avoid accessing se ctx in aes gcm path Anoob Joseph
2022-08-08 8:06 ` [PATCH 12/18] crypto/cnxk: remove auth iv from kasumi cipher Anoob Joseph
2022-08-08 8:06 ` [PATCH 13/18] crypto/cnxk: enable IE engine for Chacha-Poly Anoob Joseph
2022-08-08 8:06 ` [PATCH 14/18] crypto/cnxk: use dedicated dp threads depending on operation Anoob Joseph
2022-08-08 8:06 ` [PATCH 15/18] crypto/cnxk: remove unused ctx buf len Anoob Joseph
2022-08-08 8:06 ` [PATCH 16/18] drivers: change crypto adapter datapath error print to debug Anoob Joseph
2022-08-08 8:06 ` [PATCH 17/18] crypto/cnxk: update flow label copy capability Anoob Joseph
2022-08-08 8:06 ` [PATCH 18/18] crypto/cnxk: add support for DOCSIS algorithm Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 00/18] Fixes and improvements in cnxk crypto PMDs Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 01/18] crypto/cnxk: add AES-CCM support Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 02/18] crypto/cnxk: add burst enqueue for event crypto Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 03/18] crypto/cnxk: remove zero IV Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 04/18] crypto/cnxk: limit the meta buf cache to 128 Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 05/18] crypto/cnxk: add separate path for pdcp chain opcode Anoob Joseph
2022-08-09 10:53 ` Anoob Joseph [this message]
2022-08-09 10:53 ` [PATCH v2 07/18] crypto/cnxk: remove MAC len check for AEAD Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 08/18] crypto/cnxk: fix endianness in anti-replay Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 09/18] crypto/cnxk: remove extra indirection for FC and Kasumi Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 10/18] crypto/cnxk: remove extra digest len check Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 11/18] crypto/cnxk: avoid accessing se ctx in aes gcm path Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 12/18] crypto/cnxk: remove auth iv from kasumi cipher Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 13/18] crypto/cnxk: enable IE engine for Chacha-Poly Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 14/18] crypto/cnxk: use dedicated dp threads depending on operation Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 15/18] crypto/cnxk: remove unused ctx buf len Anoob Joseph
2022-09-28 10:14 ` Thomas Monjalon
2022-09-28 10:17 ` [EXT] " Anoob Joseph
2022-09-28 11:00 ` Thomas Monjalon
2022-08-09 10:53 ` [PATCH v2 16/18] drivers: change crypto adapter datapath error print to debug Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 17/18] crypto/cnxk: update flow label copy capability Anoob Joseph
2022-08-09 10:53 ` [PATCH v2 18/18] crypto/cnxk: add support for DOCSIS algorithm Anoob Joseph
2022-08-28 13:25 ` [PATCH v2 00/18] Fixes and improvements in cnxk crypto PMDs Akhil Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220809105356.561-7-anoobj@marvell.com \
--to=anoobj@marvell.com \
--cc=dev@dpdk.org \
--cc=gakhil@marvell.com \
--cc=jerinj@marvell.com \
--cc=ktejasree@marvell.com \
--cc=marchana@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).