From: Gagandeep Singh <g.singh@nxp.com>
To: gakhil@marvell.com, dev@dpdk.org
Cc: Gagandeep Singh <g.singh@nxp.com>
Subject: [PATCH v2 09/14] crypto/dpaa2_sec: per queue pair fle pool
Date: Thu, 28 Apr 2022 17:17:28 +0530 [thread overview]
Message-ID: <20220428114733.1419957-10-g.singh@nxp.com> (raw)
In-Reply-To: <20220428114733.1419957-1-g.singh@nxp.com>
Driver is creating a fle pool with a fixed number of
buffers for all queue pairs of a DPSECI object.
These fle buffers are equivalent to the number of descriptors.
In this patch, creating the fle pool for each queue pair
so that user can control the number of descriptors of a
queue pair using API rte_cryptodev_queue_pair_setup().
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
drivers/bus/fslmc/portal/dpaa2_hw_dpio.c | 4 +-
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 3 +-
drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 190 ++++++++++----------
drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h | 5 +-
drivers/net/dpaa2/dpaa2_ethdev.h | 4 +-
drivers/net/dpaa2/dpaa2_rxtx.c | 3 +-
6 files changed, 101 insertions(+), 108 deletions(-)
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 943fadee48..22c51c1a82 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2022 NXP
*
*/
#include <unistd.h>
@@ -614,7 +614,7 @@ dpaa2_free_eq_descriptors(void)
if (qbman_result_eqresp_rc(eqresp)) {
txq = eqresp_meta->dpaa2_q;
- txq->cb_eqresp_free(dpio_dev->eqresp_ci);
+ txq->cb_eqresp_free(dpio_dev->eqresp_ci, txq);
}
qbman_result_eqresp_set_rspid(eqresp, 0);
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 36d68ea0aa..024fbf9935 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -147,7 +147,8 @@ typedef void (dpaa2_queue_cb_dqrr_t)(struct qbman_swp *swp,
struct dpaa2_queue *rxq,
struct rte_event *ev);
-typedef void (dpaa2_queue_cb_eqresp_free_t)(uint16_t eqresp_ci);
+typedef void (dpaa2_queue_cb_eqresp_free_t)(uint16_t eqresp_ci,
+ struct dpaa2_queue *dpaa2_q);
struct dpaa2_queue {
struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 2374d67978..86c8df241b 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -65,11 +65,10 @@ uint8_t cryptodev_driver_id;
uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
static inline void
-free_fle(const struct qbman_fd *fd)
+free_fle(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
{
struct qbman_fle *fle;
struct rte_crypto_op *op;
- struct ctxt_priv *priv;
#ifdef RTE_LIB_SECURITY
if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
@@ -78,10 +77,9 @@ free_fle(const struct qbman_fd *fd)
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
/* free the fle memory */
- if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src))) {
- priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
- rte_mempool_put(priv->fle_pool, (void *)(fle-1));
- } else
+ if (likely(rte_pktmbuf_is_contiguous(op->sym->m_src)))
+ rte_mempool_put(qp->fle_pool, (void *)(fle-1));
+ else
rte_free((void *)(fle-1));
}
@@ -206,7 +204,7 @@ build_proto_compound_sg_fd(dpaa2_sec_session *sess,
static inline int
build_proto_compound_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
- struct qbman_fd *fd, uint16_t bpid)
+ struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
{
struct rte_crypto_sym_op *sym_op = op->sym;
struct ctxt_priv *priv = sess->ctxt;
@@ -223,9 +221,9 @@ build_proto_compound_fd(dpaa2_sec_session *sess,
flc = &priv->flc_desc[0].flc;
/* we are using the first FLE entry to store Mbuf */
- retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
if (retval) {
- DPAA2_SEC_DP_ERR("Memory alloc failed");
+ DPAA2_SEC_DP_DEBUG("Proto: Memory alloc failed");
return -ENOMEM;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -282,11 +280,11 @@ build_proto_compound_fd(dpaa2_sec_session *sess,
static inline int
build_proto_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
- struct qbman_fd *fd, uint16_t bpid)
+ struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
{
struct rte_crypto_sym_op *sym_op = op->sym;
if (sym_op->m_dst)
- return build_proto_compound_fd(sess, op, fd, bpid);
+ return build_proto_compound_fd(sess, op, fd, bpid, qp);
struct ctxt_priv *priv = sess->ctxt;
struct sec_flow_context *flc;
@@ -461,7 +459,8 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
static inline int
build_authenc_gcm_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
- struct qbman_fd *fd, uint16_t bpid)
+ struct qbman_fd *fd, uint16_t bpid,
+ struct dpaa2_sec_qp *qp)
{
struct rte_crypto_sym_op *sym_op = op->sym;
struct ctxt_priv *priv = sess->ctxt;
@@ -485,9 +484,9 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
if (retval) {
- DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
+ DPAA2_SEC_DP_DEBUG("GCM: no buffer available in fle pool");
return -ENOMEM;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -748,7 +747,7 @@ build_authenc_sg_fd(dpaa2_sec_session *sess,
static inline int
build_authenc_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
- struct qbman_fd *fd, uint16_t bpid)
+ struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
{
struct rte_crypto_sym_op *sym_op = op->sym;
struct ctxt_priv *priv = sess->ctxt;
@@ -777,9 +776,9 @@ build_authenc_fd(dpaa2_sec_session *sess,
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
if (retval) {
- DPAA2_SEC_ERR("Memory alloc failed for SGE");
+ DPAA2_SEC_DP_DEBUG("AUTHENC: no buffer available in fle pool");
return -ENOMEM;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -1010,7 +1009,7 @@ static inline int build_auth_sg_fd(
static inline int
build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
- struct qbman_fd *fd, uint16_t bpid)
+ struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
{
struct rte_crypto_sym_op *sym_op = op->sym;
struct qbman_fle *fle, *sge;
@@ -1034,9 +1033,9 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
data_offset = data_offset >> 3;
}
- retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
if (retval) {
- DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
+ DPAA2_SEC_DP_DEBUG("AUTH: no buffer available in fle pool");
return -ENOMEM;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -1257,7 +1256,7 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
static int
build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
- struct qbman_fd *fd, uint16_t bpid)
+ struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
{
struct rte_crypto_sym_op *sym_op = op->sym;
struct qbman_fle *fle, *sge;
@@ -1287,9 +1286,9 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
else
dst = sym_op->m_src;
- retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ retval = rte_mempool_get(qp->fle_pool, (void **)(&fle));
if (retval) {
- DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
+ DPAA2_SEC_DP_DEBUG("CIPHER: no buffer available in fle pool");
return -ENOMEM;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -1374,7 +1373,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
static inline int
build_sec_fd(struct rte_crypto_op *op,
- struct qbman_fd *fd, uint16_t bpid)
+ struct qbman_fd *fd, uint16_t bpid, struct dpaa2_sec_qp *qp)
{
int ret = -1;
dpaa2_sec_session *sess;
@@ -1387,11 +1386,15 @@ build_sec_fd(struct rte_crypto_op *op,
sess = (dpaa2_sec_session *)get_sec_session_private_data(
op->sym->sec_session);
#endif
- else
+ else {
+ DPAA2_SEC_DP_ERR("Session type invalid\n");
return -ENOTSUP;
+ }
- if (!sess)
+ if (!sess) {
+ DPAA2_SEC_DP_ERR("Session not available\n");
return -EINVAL;
+ }
/* Any of the buffer is segmented*/
if (!rte_pktmbuf_is_contiguous(op->sym->m_src) ||
@@ -1423,23 +1426,23 @@ build_sec_fd(struct rte_crypto_op *op,
} else {
switch (sess->ctxt_type) {
case DPAA2_SEC_CIPHER:
- ret = build_cipher_fd(sess, op, fd, bpid);
+ ret = build_cipher_fd(sess, op, fd, bpid, qp);
break;
case DPAA2_SEC_AUTH:
- ret = build_auth_fd(sess, op, fd, bpid);
+ ret = build_auth_fd(sess, op, fd, bpid, qp);
break;
case DPAA2_SEC_AEAD:
- ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+ ret = build_authenc_gcm_fd(sess, op, fd, bpid, qp);
break;
case DPAA2_SEC_CIPHER_HASH:
- ret = build_authenc_fd(sess, op, fd, bpid);
+ ret = build_authenc_fd(sess, op, fd, bpid, qp);
break;
#ifdef RTE_LIB_SECURITY
case DPAA2_SEC_IPSEC:
- ret = build_proto_fd(sess, op, fd, bpid);
+ ret = build_proto_fd(sess, op, fd, bpid, qp);
break;
case DPAA2_SEC_PDCP:
- ret = build_proto_compound_fd(sess, op, fd, bpid);
+ ret = build_proto_compound_fd(sess, op, fd, bpid, qp);
break;
#endif
case DPAA2_SEC_HASH_CIPHER:
@@ -1513,10 +1516,9 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
mb_pool = (*ops)->sym->m_src->pool;
bpid = mempool_to_bpid(mb_pool);
- ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+ ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
if (ret) {
- DPAA2_SEC_ERR("error: Improper packet contents"
- " for crypto operation");
+ DPAA2_SEC_DP_DEBUG("FD build failed\n");
goto skip_tx;
}
ops++;
@@ -1537,7 +1539,8 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
/* freeing the fle buffers */
while (loop < frames_to_send) {
- free_fle(&fd_arr[loop]);
+ free_fle(&fd_arr[loop],
+ dpaa2_qp);
loop++;
}
goto skip_tx;
@@ -1593,11 +1596,10 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd)
#endif
static inline struct rte_crypto_op *
-sec_fd_to_mbuf(const struct qbman_fd *fd)
+sec_fd_to_mbuf(const struct qbman_fd *fd, struct dpaa2_sec_qp *qp)
{
struct qbman_fle *fle;
struct rte_crypto_op *op;
- struct ctxt_priv *priv;
struct rte_mbuf *dst, *src;
#ifdef RTE_LIB_SECURITY
@@ -1651,8 +1653,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
/* free the fle memory */
if (likely(rte_pktmbuf_is_contiguous(src))) {
- priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
- rte_mempool_put(priv->fle_pool, (void *)(fle-1));
+ rte_mempool_put(qp->fle_pool, (void *)(fle-1));
} else
rte_free((void *)(fle-1));
@@ -1737,14 +1738,17 @@ dpaa2_sec_dump(struct rte_crypto_op *op)
}
static void
-dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci)
+dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci,
+ struct dpaa2_queue *dpaa2_q)
{
struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
struct rte_crypto_op *op;
struct qbman_fd *fd;
+ struct dpaa2_sec_qp *dpaa2_qp;
+ dpaa2_qp = container_of(dpaa2_q, struct dpaa2_sec_qp, tx_vq);
fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
- op = sec_fd_to_mbuf(fd);
+ op = sec_fd_to_mbuf(fd, dpaa2_qp);
/* Instead of freeing, enqueue it to the sec tx queue (sec->core)
* after setting an error in FD. But this will have performance impact.
*/
@@ -1860,10 +1864,9 @@ dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
mb_pool = (*ops)->sym->m_src->pool;
bpid = mempool_to_bpid(mb_pool);
- ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+ ret = build_sec_fd(*ops, &fd_arr[loop], bpid, dpaa2_qp);
if (ret) {
- DPAA2_SEC_ERR("error: Improper packet contents"
- " for crypto operation");
+ DPAA2_SEC_DP_DEBUG("FD build failed\n");
goto skip_tx;
}
ops++;
@@ -1883,7 +1886,8 @@ dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
DPAA2_SEC_DP_DEBUG("Enqueue fail\n");
/* freeing the fle buffers */
while (loop < frames_to_send) {
- free_fle(&fd_arr[loop]);
+ free_fle(&fd_arr[loop],
+ dpaa2_qp);
loop++;
}
goto skip_tx;
@@ -1981,7 +1985,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
}
fd = qbman_result_DQ_fd(dq_storage);
- ops[num_rx] = sec_fd_to_mbuf(fd);
+ ops[num_rx] = sec_fd_to_mbuf(fd, dpaa2_qp);
if (unlikely(fd->simple.frc)) {
/* TODO Parse SEC errors */
@@ -2023,6 +2027,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
dpaa2_free_dq_storage(qp->rx_vq.q_storage);
rte_free(qp->rx_vq.q_storage);
}
+ rte_mempool_free(qp->fle_pool);
rte_free(qp);
dev->data->queue_pairs[queue_pair_id] = NULL;
@@ -2033,7 +2038,7 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
/** Setup a queue pair */
static int
dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
- __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ const struct rte_cryptodev_qp_conf *qp_conf,
__rte_unused int socket_id)
{
struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
@@ -2041,6 +2046,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
struct dpseci_rx_queue_cfg cfg;
int32_t retcode;
+ char str[30];
PMD_INIT_FUNC_TRACE();
@@ -2080,6 +2086,19 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
dev->data->queue_pairs[qp_id] = qp;
+ snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d_%d",
+ getpid(), dev->data->dev_id, qp_id);
+ qp->fle_pool = rte_mempool_create((const char *)str,
+ qp_conf->nb_descriptors,
+ FLE_POOL_BUF_SIZE,
+ FLE_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);
+ if (!qp->fle_pool) {
+ DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
+ return -ENOMEM;
+ }
+
cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
cfg.user_ctx = (size_t)(&qp->rx_vq);
retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
@@ -2097,11 +2116,9 @@ dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
static int
-dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
+dpaa2_sec_cipher_init(struct rte_crypto_sym_xform *xform,
dpaa2_sec_session *session)
{
- struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo cipherdata;
int bufsize, ret = 0;
struct ctxt_priv *priv;
@@ -2118,8 +2135,6 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
return -ENOMEM;
}
- priv->fle_pool = dev_priv->fle_pool;
-
flc = &priv->flc_desc[0].flc;
session->ctxt_type = DPAA2_SEC_CIPHER;
@@ -2238,11 +2253,9 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
}
static int
-dpaa2_sec_auth_init(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
+dpaa2_sec_auth_init(struct rte_crypto_sym_xform *xform,
dpaa2_sec_session *session)
{
- struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata;
int bufsize, ret = 0;
struct ctxt_priv *priv;
@@ -2260,7 +2273,6 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
return -ENOMEM;
}
- priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[DESC_INITFINAL].flc;
session->ctxt_type = DPAA2_SEC_AUTH;
@@ -2476,12 +2488,10 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
}
static int
-dpaa2_sec_aead_init(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
+dpaa2_sec_aead_init(struct rte_crypto_sym_xform *xform,
dpaa2_sec_session *session)
{
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
- struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo aeaddata;
int bufsize;
struct ctxt_priv *priv;
@@ -2505,7 +2515,6 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
return -ENOMEM;
}
- priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[0].flc;
session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
@@ -2601,11 +2610,9 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
static int
-dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
+dpaa2_sec_aead_chain_init(struct rte_crypto_sym_xform *xform,
dpaa2_sec_session *session)
{
- struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata, cipherdata;
int bufsize;
struct ctxt_priv *priv;
@@ -2643,7 +2650,6 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
return -ENOMEM;
}
- priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[0].flc;
session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
@@ -2849,8 +2855,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
}
static int
-dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *sess)
+dpaa2_sec_set_session_parameters(struct rte_crypto_sym_xform *xform, void *sess)
{
dpaa2_sec_session *session = sess;
int ret;
@@ -2868,37 +2873,37 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
/* Cipher Only */
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
- ret = dpaa2_sec_cipher_init(dev, xform, session);
+ ret = dpaa2_sec_cipher_init(xform, session);
/* Authentication Only */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next == NULL) {
- ret = dpaa2_sec_auth_init(dev, xform, session);
+ ret = dpaa2_sec_auth_init(xform, session);
/* Cipher then Authenticate */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
session->ext_params.aead_ctxt.auth_cipher_text = true;
if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
- ret = dpaa2_sec_auth_init(dev, xform, session);
+ ret = dpaa2_sec_auth_init(xform, session);
else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
- ret = dpaa2_sec_cipher_init(dev, xform, session);
+ ret = dpaa2_sec_cipher_init(xform, session);
else
- ret = dpaa2_sec_aead_chain_init(dev, xform, session);
+ ret = dpaa2_sec_aead_chain_init(xform, session);
/* Authenticate then Cipher */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
session->ext_params.aead_ctxt.auth_cipher_text = false;
if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
- ret = dpaa2_sec_cipher_init(dev, xform, session);
+ ret = dpaa2_sec_cipher_init(xform, session);
else if (xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
- ret = dpaa2_sec_auth_init(dev, xform, session);
+ ret = dpaa2_sec_auth_init(xform, session);
else
- ret = dpaa2_sec_aead_chain_init(dev, xform, session);
+ ret = dpaa2_sec_aead_chain_init(xform, session);
/* AEAD operation for AES-GCM kind of Algorithms */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
xform->next == NULL) {
- ret = dpaa2_sec_aead_init(dev, xform, session);
+ ret = dpaa2_sec_aead_init(xform, session);
} else {
DPAA2_SEC_ERR("Invalid crypto type");
@@ -3147,7 +3152,6 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
struct alginfo authdata, cipherdata;
int bufsize;
struct sec_flow_context *flc;
- struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
int ret = -1;
PMD_INIT_FUNC_TRACE();
@@ -3162,7 +3166,6 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
return -ENOMEM;
}
- priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[0].flc;
if (ipsec_xform->life.bytes_hard_limit != 0 ||
@@ -3395,7 +3398,6 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
struct rte_crypto_cipher_xform *cipher_xform = NULL;
dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
struct ctxt_priv *priv;
- struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata, cipherdata;
struct alginfo *p_authdata = NULL;
int bufsize = -1;
@@ -3420,7 +3422,6 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
return -ENOMEM;
}
- priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[0].flc;
/* find xfrm types */
@@ -3758,7 +3759,7 @@ dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
return -ENOMEM;
}
- ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
+ ret = dpaa2_sec_set_session_parameters(xform, sess_private_data);
if (ret != 0) {
DPAA2_SEC_ERR("Failed to configure session parameters");
/* Return session to mempool */
@@ -3989,6 +3990,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
struct dpaa2_queue *rxq,
struct rte_event *ev)
{
+ struct dpaa2_sec_qp *qp;
/* Prefetching mbuf */
rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
@@ -3996,6 +3998,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
/* Prefetching ipsec crypto_op stored in priv data of mbuf */
rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+ qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
ev->flow_id = rxq->ev.flow_id;
ev->sub_event_type = rxq->ev.sub_event_type;
ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
@@ -4003,7 +4006,7 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
ev->sched_type = rxq->ev.sched_type;
ev->queue_id = rxq->ev.queue_id;
ev->priority = rxq->ev.priority;
- ev->event_ptr = sec_fd_to_mbuf(fd);
+ ev->event_ptr = sec_fd_to_mbuf(fd, qp);
qbman_swp_dqrr_consume(swp, dq);
}
@@ -4015,6 +4018,7 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
struct rte_event *ev)
{
uint8_t dqrr_index;
+ struct dpaa2_sec_qp *qp;
struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
/* Prefetching mbuf */
rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
@@ -4023,6 +4027,7 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
/* Prefetching ipsec crypto_op stored in priv data of mbuf */
rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+ qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
ev->flow_id = rxq->ev.flow_id;
ev->sub_event_type = rxq->ev.sub_event_type;
ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
@@ -4031,7 +4036,7 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
ev->queue_id = rxq->ev.queue_id;
ev->priority = rxq->ev.priority;
- ev->event_ptr = sec_fd_to_mbuf(fd);
+ ev->event_ptr = sec_fd_to_mbuf(fd, qp);
dqrr_index = qbman_get_dqrr_idx(dq);
*dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
DPAA2_PER_LCORE_DQRR_SIZE++;
@@ -4047,6 +4052,7 @@ dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
struct rte_event *ev)
{
struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+ struct dpaa2_sec_qp *qp;
/* Prefetching mbuf */
rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
@@ -4055,6 +4061,7 @@ dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
/* Prefetching ipsec crypto_op stored in priv data of mbuf */
rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+ qp = container_of(rxq, struct dpaa2_sec_qp, rx_vq);
ev->flow_id = rxq->ev.flow_id;
ev->sub_event_type = rxq->ev.sub_event_type;
ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
@@ -4062,7 +4069,7 @@ dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
ev->sched_type = rxq->ev.sched_type;
ev->queue_id = rxq->ev.queue_id;
ev->priority = rxq->ev.priority;
- ev->event_ptr = sec_fd_to_mbuf(fd);
+ ev->event_ptr = sec_fd_to_mbuf(fd, qp);
*dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
@@ -4236,7 +4243,6 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev)
priv->hw = NULL;
rte_free(dpseci);
rte_free(dev->security_ctx);
- rte_mempool_free(priv->fle_pool);
DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
dev->data->name, rte_socket_id());
@@ -4304,7 +4310,6 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
uint16_t token;
struct dpseci_attr attr;
int retcode, hw_id;
- char str[30];
PMD_INIT_FUNC_TRACE();
dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
@@ -4380,19 +4385,6 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
internals->token = token;
internals->en_loose_ordered = true;
- snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
- getpid(), cryptodev->data->dev_id);
- internals->fle_pool = rte_mempool_create((const char *)str,
- FLE_POOL_NUM_BUFS,
- FLE_POOL_BUF_SIZE,
- FLE_POOL_CACHE_SIZE, 0,
- NULL, NULL, NULL, NULL,
- SOCKET_ID_ANY, 0);
- if (!internals->fle_pool) {
- DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
- goto init_error;
- }
-
dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE);
dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER);
DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 3094778a7a..63f4c64aab 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016,2020-2021 NXP
+ * Copyright 2016,2020-2022 NXP
*
*/
@@ -31,7 +31,6 @@ extern uint8_t cryptodev_driver_id;
struct dpaa2_sec_dev_private {
void *mc_portal; /**< MC Portal for configuring this device */
void *hw; /**< Hardware handle for this device.Used by NADK framework */
- struct rte_mempool *fle_pool; /* per device memory pool for FLE */
int32_t hw_id; /**< An unique ID of this device instance */
int32_t vfio_fd; /**< File descriptor received via VFIO */
uint16_t token; /**< Token required by DPxxx objects */
@@ -44,6 +43,7 @@ struct dpaa2_sec_dev_private {
struct dpaa2_sec_qp {
struct dpaa2_queue rx_vq;
struct dpaa2_queue tx_vq;
+ struct rte_mempool *fle_pool; /* per device memory pool for FLE */
};
enum shr_desc_type {
@@ -127,7 +127,6 @@ struct sec_flc_desc {
};
struct ctxt_priv {
- struct rte_mempool *fle_pool; /* per device memory pool for FLE */
struct sec_flc_desc flc_desc[0];
};
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index e79a7fc2e2..a459181139 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2021 NXP
+ * Copyright 2016-2022 NXP
*
*/
@@ -264,7 +264,7 @@ __rte_internal
uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
struct rte_mbuf **bufs, uint16_t nb_pkts);
-void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci);
+void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci, struct dpaa2_queue *dpaa2_q);
void dpaa2_flow_clean(struct rte_eth_dev *dev);
uint16_t dpaa2_dev_tx_conf(void *queue) __rte_unused;
int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev);
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index b8844fbdf1..39bfddd804 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -1408,7 +1408,8 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
void
-dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
+dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci,
+ __rte_unused struct dpaa2_queue *dpaa2_q)
{
struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
struct qbman_fd *fd;
--
2.25.1
next prev parent reply other threads:[~2022-04-28 11:49 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-22 3:50 [PATCH 01/14] crypto/dpaa2_sec: fix fle buffer leak Gagandeep Singh
2022-04-22 3:50 ` [PATCH 02/14] crypto/dpaa2_sec: fix buffer pool ID check Gagandeep Singh
2022-04-22 3:50 ` [PATCH 03/14] crypto/dpaa_sec: fix length for chain fd in raw sec driver Gagandeep Singh
2022-04-22 3:50 ` [PATCH 04/14] crypto/dpaa2_sec: " Gagandeep Singh
2022-04-22 3:50 ` [PATCH 05/14] crypto/dpaa_sec: physically enable QI Gagandeep Singh
2022-04-22 3:50 ` [PATCH 06/14] crypto/dpaa_sec: replace use of old build macros Gagandeep Singh
2022-04-22 3:50 ` [PATCH 07/14] dpaax/caamflib: remove obsolete code Gagandeep Singh
2022-04-22 3:50 ` [PATCH 08/14] crypto/dpaa_sec : fix secondary process probe Gagandeep Singh
2022-04-22 3:50 ` [PATCH 09/14] crypto/dpaa2_sec: per queue pair fle pool Gagandeep Singh
2022-04-22 3:50 ` [PATCH 10/14] crypto/dpaa2_sec: fix crypto op pointer for atomic and ordered queues Gagandeep Singh
2022-04-22 3:50 ` [PATCH 11/14] crypto/dpaa2_sec: fix operation status for simple fd Gagandeep Singh
2022-04-22 3:50 ` [PATCH 12/14] crypto/dpaa_sec: remove unused thread specific variables Gagandeep Singh
2022-04-22 3:50 ` [PATCH 13/14] crypto/dpaa_sec: move cdb prepration to session create Gagandeep Singh
2022-04-22 3:51 ` [PATCH 14/14] common/dpaax: fix short MAC-I IV calculation for zuc Gagandeep Singh
2022-04-28 7:15 ` [EXT] [PATCH 01/14] crypto/dpaa2_sec: fix fle buffer leak Akhil Goyal
2022-04-28 9:23 ` Gagandeep Singh
2022-04-28 9:29 ` Akhil Goyal
2022-04-28 11:47 ` [PATCH v2 00/14] DPAA1 and DPAA2 crypto drivers changes Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 01/14] crypto/dpaa2_sec: fix fle buffer leak Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 02/14] crypto/dpaa2_sec: fix buffer pool ID check Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 03/14] crypto/dpaa_sec: fix length for chain FD in raw sec driver Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 04/14] crypto/dpaa2_sec: " Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 05/14] crypto/dpaa_sec: physically enable QI Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 06/14] crypto/dpaa_sec: replace use of old build macros Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 07/14] common/dpaax: remove obsolete code Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 08/14] crypto/dpaa_sec: fix secondary process probe Gagandeep Singh
2022-04-28 11:47 ` Gagandeep Singh [this message]
2022-04-28 11:47 ` [PATCH v2 10/14] crypto/dpaa2_sec: fix crypto op pointer value Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 11/14] crypto/dpaa2_sec: fix operation status for simple FD Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 12/14] bus/dpaa: remove unused thread specific variables Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 13/14] crypto/dpaa_sec: move cdb prepration to session create Gagandeep Singh
2022-04-28 11:47 ` [PATCH v2 14/14] common/dpaax: fix short MAC-I IV calculation for zuc Gagandeep Singh
2022-04-29 9:29 ` [EXT] [PATCH v2 00/14] DPAA1 and DPAA2 crypto drivers changes Akhil Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220428114733.1419957-10-g.singh@nxp.com \
--to=g.singh@nxp.com \
--cc=dev@dpdk.org \
--cc=gakhil@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).