DPDK patches and discussions
 help / color / mirror / Atom feed
From: Akhil Goyal <akhil.goyal@nxp.com>
To: "dev@dpdk.org" <dev@dpdk.org>
Cc: Hemant Agrawal <hemant.agrawal@nxp.com>,
	"pablo.de.lara.guarch@intel.com" <pablo.de.lara.guarch@intel.com>,
	Akhil Goyal <akhil.goyal@nxp.com>
Subject: [dpdk-dev] [PATCH] crypto/dpaa_sec: support same session flows on multi cores
Date: Wed, 9 Jan 2019 15:14:17 +0000	[thread overview]
Message-ID: <20190109150612.20803-2-akhil.goyal@nxp.com> (raw)
In-Reply-To: <20190109150612.20803-1-akhil.goyal@nxp.com>

In dpaa_sec, each session should have a separate in-queue,
and each in-queue should be mapped to an outq.
So if multiple flows of same SA comes to different cores
due to rss distribution of NIC, enqueue from any core will
try to do re-assignment of inq - outq mapping which will fail.

In this patch, for each core a separate inq is allocated and
used from each of the core. But the number of outq will remain
the same and we save the pointer to outq in session for each
of the cores.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 65 +++++++++++++++++++-----------
 drivers/crypto/dpaa_sec/dpaa_sec.h |  9 +++--
 2 files changed, 46 insertions(+), 28 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index b5896c4f7..6b05ac8bd 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -1683,15 +1683,18 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 				nb_ops = loop;
 				goto send_pkts;
 			}
-			if (unlikely(!ses->qp)) {
+			if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
 				if (dpaa_sec_attach_sess_q(qp, ses)) {
 					frames_to_send = loop;
 					nb_ops = loop;
 					goto send_pkts;
 				}
-			} else if (unlikely(ses->qp != qp)) {
+			} else if (unlikely(ses->qp[rte_lcore_id() %
+						MAX_DPAA_CORES] != qp)) {
 				DPAA_SEC_DP_ERR("Old:sess->qp = %p"
-					" New qp = %p\n", ses->qp, qp);
+					" New qp = %p\n",
+					ses->qp[rte_lcore_id() %
+					MAX_DPAA_CORES], qp);
 				frames_to_send = loop;
 				nb_ops = loop;
 				goto send_pkts;
@@ -1743,7 +1746,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 			}
 
 			fd = &fds[loop];
-			inq[loop] = ses->inq;
+			inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
 			fd->opaque_addr = 0;
 			fd->cmd = 0;
 			qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
@@ -1970,7 +1973,7 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
 {
 	int ret;
 
-	sess->qp = qp;
+	sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
 	ret = dpaa_sec_prep_cdb(sess);
 	if (ret) {
 		DPAA_SEC_ERR("Unable to prepare sec cdb");
@@ -1983,7 +1986,8 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
 			return ret;
 		}
 	}
-	ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
+	ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
+			       dpaa_mem_vtop(&sess->cdb),
 			       qman_fq_fqid(&qp->outq));
 	if (ret)
 		DPAA_SEC_ERR("Unable to init sec queue");
@@ -1997,6 +2001,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
 {
 	struct dpaa_sec_dev_private *internals = dev->data->dev_private;
 	dpaa_sec_session *session = sess;
+	uint32_t i;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -2053,12 +2058,15 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
 	}
 	session->ctx_pool = internals->ctx_pool;
 	rte_spinlock_lock(&internals->lock);
-	session->inq = dpaa_sec_attach_rxq(internals);
-	rte_spinlock_unlock(&internals->lock);
-	if (session->inq == NULL) {
-		DPAA_SEC_ERR("unable to attach sec queue");
-		goto err1;
+	for (i = 0; i < MAX_DPAA_CORES; i++) {
+		session->inq[i] = dpaa_sec_attach_rxq(internals);
+		if (session->inq[i] == NULL) {
+			DPAA_SEC_ERR("unable to attach sec queue");
+			rte_spinlock_unlock(&internals->lock);
+			goto err1;
+		}
 	}
+	rte_spinlock_unlock(&internals->lock);
 
 	return 0;
 
@@ -2118,8 +2126,9 @@ dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
 	if (sess_priv) {
 		struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
 
-		if (s->inq)
-			dpaa_sec_detach_rxq(qi, s->inq);
+		if (s->inq[rte_lcore_id() % MAX_DPAA_CORES])
+			dpaa_sec_detach_rxq(qi,
+				s->inq[rte_lcore_id() % MAX_DPAA_CORES]);
 		rte_free(s->cipher_key.data);
 		rte_free(s->auth_key.data);
 		memset(s, 0, sizeof(dpaa_sec_session));
@@ -2138,6 +2147,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
 	struct rte_crypto_auth_xform *auth_xform = NULL;
 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
+	uint32_t i;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -2257,12 +2267,15 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
 		goto out;
 	session->ctx_pool = internals->ctx_pool;
 	rte_spinlock_lock(&internals->lock);
-	session->inq = dpaa_sec_attach_rxq(internals);
-	rte_spinlock_unlock(&internals->lock);
-	if (session->inq == NULL) {
-		DPAA_SEC_ERR("unable to attach sec queue");
-		goto out;
+	for (i = 0; i < MAX_DPAA_CORES; i++) {
+		session->inq[i] = dpaa_sec_attach_rxq(internals);
+		if (session->inq[i] == NULL) {
+			DPAA_SEC_ERR("unable to attach sec queue");
+			rte_spinlock_unlock(&internals->lock);
+			goto out;
+		}
 	}
+	rte_spinlock_unlock(&internals->lock);
 
 	return 0;
 out:
@@ -2283,6 +2296,7 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
 	struct rte_crypto_cipher_xform *cipher_xform = NULL;
 	dpaa_sec_session *session = (dpaa_sec_session *)sess;
 	struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
+	uint32_t i;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -2364,12 +2378,15 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
 
 	session->ctx_pool = dev_priv->ctx_pool;
 	rte_spinlock_lock(&dev_priv->lock);
-	session->inq = dpaa_sec_attach_rxq(dev_priv);
-	rte_spinlock_unlock(&dev_priv->lock);
-	if (session->inq == NULL) {
-		DPAA_SEC_ERR("unable to attach sec queue");
-		goto out;
+	for (i = 0; i < MAX_DPAA_CORES; i++) {
+		session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
+		if (session->inq[i] == NULL) {
+			DPAA_SEC_ERR("unable to attach sec queue");
+			rte_spinlock_unlock(&dev_priv->lock);
+			goto out;
+		}
 	}
+	rte_spinlock_unlock(&dev_priv->lock);
 	return 0;
 out:
 	rte_free(session->auth_key.data);
@@ -2631,7 +2648,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 
 	flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
 		QMAN_FQ_FLAG_TO_DCPORTAL;
-	for (i = 0; i < internals->max_nb_sessions; i++) {
+	for (i = 0; i < MAX_DPAA_CORES * internals->max_nb_sessions; i++) {
 		/* create rx qman fq for sessions*/
 		ret = qman_create_fq(0, flags, &internals->inq[i]);
 		if (unlikely(ret != 0)) {
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 6049c1d52..75c0960a9 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -10,6 +10,7 @@
 #define CRYPTODEV_NAME_DPAA_SEC_PMD	crypto_dpaa_sec
 /**< NXP DPAA - SEC PMD device name */
 
+#define MAX_DPAA_CORES		4
 #define NUM_POOL_CHANNELS	4
 #define DPAA_SEC_BURST		7
 #define DPAA_SEC_ALG_UNSUPPORT	(-1)
@@ -26,7 +27,7 @@
 #define CTX_POOL_NUM_BUFS	32000
 #define CTX_POOL_BUF_SIZE	sizeof(struct dpaa_sec_op_ctx)
 #define CTX_POOL_CACHE_SIZE	512
-#define RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS 2048
+#define RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS 1024
 
 #define DIR_ENC                 1
 #define DIR_DEC                 0
@@ -142,8 +143,8 @@ typedef struct dpaa_sec_session_entry {
 		};
 		struct sec_pdcp_ctxt pdcp;
 	};
-	struct dpaa_sec_qp *qp;
-	struct qman_fq *inq;
+	struct dpaa_sec_qp *qp[MAX_DPAA_CORES];
+	struct qman_fq *inq[MAX_DPAA_CORES];
 	struct sec_cdb cdb;	/**< cmd block associated with qp */
 	struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
 } dpaa_sec_session;
@@ -158,7 +159,7 @@ struct dpaa_sec_qp {
 };
 
 #define RTE_DPAA_MAX_NB_SEC_QPS 2
-#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+#define RTE_DPAA_MAX_RX_QUEUE (MAX_DPAA_CORES * RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS)
 #define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
 
 /* internal sec queue interface */
-- 
2.17.1

  reply	other threads:[~2019-01-09 15:14 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-09 15:13 [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload Akhil Goyal
2019-01-09 15:14 ` Akhil Goyal [this message]
2019-01-11  0:06   ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support same session flows on multi cores De Lara Guarch, Pablo
2019-01-09 15:14 ` [dpdk-dev] [PATCH] crypto/dpaa2_sec: fix FLC address for physical mode Akhil Goyal
2019-01-11  0:05   ` De Lara Guarch, Pablo
2019-01-11  0:06 ` [dpdk-dev] [PATCH] crypto/dpaa_sec: support PDCP offload De Lara Guarch, Pablo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190109150612.20803-2-akhil.goyal@nxp.com \
    --to=akhil.goyal@nxp.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=pablo.de.lara.guarch@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).