DPDK patches and discussions
 help / color / mirror / Atom feed
From: Akhil Goyal <akhil.goyal@nxp.com>
To: dev@dpdk.org
Cc: hemant.agrawal@nxp.com, Akhil Goyal <akhil.goyal@nxp.com>
Subject: [dpdk-dev] [PATCH] crypto/dpaa_sec: change per cryptodev pool to per qp
Date: Mon, 30 Sep 2019 17:24:51 +0530	[thread overview]
Message-ID: <20190930115452.5178-3-akhil.goyal@nxp.com> (raw)
In-Reply-To: <20190930115452.5178-1-akhil.goyal@nxp.com>

In cases where single cryptodev is used by multiple cores
using multiple queues, there will be contention for mempool
resources and may eventually get exhuasted.
Basically, mempool should be defined per core.
Now since qp is used per core, mempools are defined in qp setup.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 58 ++++++++++++------------------
 drivers/crypto/dpaa_sec/dpaa_sec.h |  3 +-
 2 files changed, 24 insertions(+), 37 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 34e6e4f0e..ea3ef5f1e 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -70,7 +70,9 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
 	struct dpaa_sec_op_ctx *ctx;
 	int i, retval;
 
-	retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
+	retval = rte_mempool_get(
+			ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
+			(void **)(&ctx));
 	if (!ctx || retval) {
 		DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
 		return NULL;
@@ -84,7 +86,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
 	for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
 		dcbz_64(&ctx->job.sg[i]);
 
-	ctx->ctx_pool = ses->ctx_pool;
+	ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
 	ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
 
 	return ctx;
@@ -1939,6 +1941,7 @@ dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
 	}
 
 	qp = &internals->qps[qp_id];
+	rte_mempool_free(qp->ctx_pool);
 	qp->internals = NULL;
 	dev->data->queue_pairs[qp_id] = NULL;
 
@@ -1953,6 +1956,7 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 {
 	struct dpaa_sec_dev_private *internals;
 	struct dpaa_sec_qp *qp = NULL;
+	char str[20];
 
 	DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
 
@@ -1965,6 +1969,22 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
 	qp = &internals->qps[qp_id];
 	qp->internals = internals;
+	snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
+			dev->data->dev_id, qp_id);
+	if (!qp->ctx_pool) {
+		qp->ctx_pool = rte_mempool_create((const char *)str,
+							CTX_POOL_NUM_BUFS,
+							CTX_POOL_BUF_SIZE,
+							CTX_POOL_CACHE_SIZE, 0,
+							NULL, NULL, NULL, NULL,
+							SOCKET_ID_ANY, 0);
+		if (!qp->ctx_pool) {
+			DPAA_SEC_ERR("%s create failed\n", str);
+			return -ENOMEM;
+		}
+	} else
+		DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
+				dev->data->dev_id, qp_id);
 	dev->data->queue_pairs[qp_id] = qp;
 
 	return 0;
@@ -2181,7 +2201,6 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
 		DPAA_SEC_ERR("Invalid crypto type");
 		return -EINVAL;
 	}
-	session->ctx_pool = internals->ctx_pool;
 	rte_spinlock_lock(&internals->lock);
 	for (i = 0; i < MAX_DPAA_CORES; i++) {
 		session->inq[i] = dpaa_sec_attach_rxq(internals);
@@ -2436,7 +2455,6 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
 		session->dir = DIR_DEC;
 	} else
 		goto out;
-	session->ctx_pool = internals->ctx_pool;
 	rte_spinlock_lock(&internals->lock);
 	for (i = 0; i < MAX_DPAA_CORES; i++) {
 		session->inq[i] = dpaa_sec_attach_rxq(internals);
@@ -2547,7 +2565,6 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
 	session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
 	session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
 
-	session->ctx_pool = dev_priv->ctx_pool;
 	rte_spinlock_lock(&dev_priv->lock);
 	for (i = 0; i < MAX_DPAA_CORES; i++) {
 		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
@@ -2624,32 +2641,11 @@ dpaa_sec_security_session_destroy(void *dev __rte_unused,
 }
 
 static int
-dpaa_sec_dev_configure(struct rte_cryptodev *dev,
+dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
 		       struct rte_cryptodev_config *config __rte_unused)
 {
-
-	char str[20];
-	struct dpaa_sec_dev_private *internals;
-
 	PMD_INIT_FUNC_TRACE();
 
-	internals = dev->data->dev_private;
-	snprintf(str, sizeof(str), "ctx_pool_%d", dev->data->dev_id);
-	if (!internals->ctx_pool) {
-		internals->ctx_pool = rte_mempool_create((const char *)str,
-							CTX_POOL_NUM_BUFS,
-							CTX_POOL_BUF_SIZE,
-							CTX_POOL_CACHE_SIZE, 0,
-							NULL, NULL, NULL, NULL,
-							SOCKET_ID_ANY, 0);
-		if (!internals->ctx_pool) {
-			DPAA_SEC_ERR("%s create failed\n", str);
-			return -ENOMEM;
-		}
-	} else
-		DPAA_SEC_INFO("mempool already created for dev_id : %d",
-				dev->data->dev_id);
-
 	return 0;
 }
 
@@ -2669,17 +2665,11 @@ dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
 static int
 dpaa_sec_dev_close(struct rte_cryptodev *dev)
 {
-	struct dpaa_sec_dev_private *internals;
-
 	PMD_INIT_FUNC_TRACE();
 
 	if (dev == NULL)
 		return -ENOMEM;
 
-	internals = dev->data->dev_private;
-	rte_mempool_free(internals->ctx_pool);
-	internals->ctx_pool = NULL;
-
 	return 0;
 }
 
@@ -2919,8 +2909,6 @@ dpaa_sec_uninit(struct rte_cryptodev *dev)
 	internals = dev->data->dev_private;
 	rte_free(dev->security_ctx);
 
-	/* In case close has been called, internals->ctx_pool would be NULL */
-	rte_mempool_free(internals->ctx_pool);
 	rte_free(internals);
 
 	DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index a2d588e1c..77daa79d5 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -153,11 +153,11 @@ typedef struct dpaa_sec_session_entry {
 	struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
 	struct qman_fq *inq[MAX_DPAA_CORES];
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
-	struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
 } dpaa_sec_session;
 
 struct dpaa_sec_qp {
 	struct dpaa_sec_dev_private *internals;
+	struct rte_mempool *ctx_pool; /* mempool for dpaa_sec_op_ctx */
 	struct qman_fq outq;
 	int rx_pkts;
 	int rx_errs;
@@ -172,7 +172,6 @@ struct dpaa_sec_qp {
 /* internal sec queue interface */
 struct dpaa_sec_dev_private {
 	void *sec_hw;
-	struct rte_mempool *ctx_pool; /* per dev mempool for dpaa_sec_op_ctx */
 	struct dpaa_sec_qp qps[RTE_DPAA_MAX_NB_SEC_QPS]; /* i/o queue for sec */
 	struct qman_fq inq[RTE_DPAA_MAX_RX_QUEUE];
 	unsigned char inq_attach[RTE_DPAA_MAX_RX_QUEUE];
-- 
2.17.1


  parent reply	other threads:[~2019-09-30 12:11 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-30 11:54 [dpdk-dev] [PATCH] crypto/dpaa2_sec: allocate context as per num segs Akhil Goyal
2019-09-30 11:54 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: dynamic contxt buffer for SG cases Akhil Goyal
2019-09-30 11:54 ` Akhil Goyal [this message]
2019-09-30 11:54 ` [dpdk-dev] [PATCH] crypto/dpaa2_sec: improve debug logging Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190930115452.5178-3-akhil.goyal@nxp.com \
    --to=akhil.goyal@nxp.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).