DPDK patches and discussions
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
	"Kiran Kumar K" <kirankumark@marvell.com>,
	John McNamara <john.mcnamara@intel.com>,
	Marko Kovacevic <marko.kovacevic@intel.com>
Cc: <dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Subject: [dpdk-dev] [PATCH 2/2] net/octeontx2: add devargs to lock Rx/Tx ctx
Date: Fri, 6 Mar 2020 22:05:23 +0530	[thread overview]
Message-ID: <20200306163524.1650-2-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20200306163524.1650-1-pbhagavatula@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add device arguments to lock Rx/Tx contexts.
Application can either choose to lock Rx or Tx contexts by using
'lock_rx_ctx' or 'lock_tx_ctx' respectively per each port.

Example:
	-w 0002:02:00.0,lock_rx_ctx=1 -w 0002:03:00.0,lock_tx_ctx=1

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 doc/guides/nics/octeontx2.rst               |  14 ++
 drivers/net/octeontx2/otx2_ethdev.c         | 179 +++++++++++++++++++-
 drivers/net/octeontx2/otx2_ethdev.h         |   2 +
 drivers/net/octeontx2/otx2_ethdev_devargs.c |  16 +-
 drivers/net/octeontx2/otx2_rss.c            |  23 +++
 5 files changed, 231 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/octeontx2.rst b/doc/guides/nics/octeontx2.rst
index 819d09e11..6a13c9b26 100644
--- a/doc/guides/nics/octeontx2.rst
+++ b/doc/guides/nics/octeontx2.rst
@@ -207,6 +207,20 @@ Runtime Config Options
    With the above configuration, application can enable inline IPsec processing
    on 128 SAs (SPI 0-127).
 
+- ``Lock Rx contexts in NDC cache``
+
+   Lock Rx contexts in NDC cache by using ``lock_rx_ctx`` parameter.
+
+   For example::
+      -w 0002:02:00.0,lock_rx_ctx=1
+
+- ``Lock Tx contexts in NDC cache``
+
+   Lock Tx contexts in NDC cache by using ``lock_tx_ctx`` parameter.
+
+   For example::
+      -w 0002:02:00.0,lock_tx_ctx=1
+
 .. note::
 
    Above devarg parameters are configurable per device, user needs to pass the
diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c
index e60f4901c..592fef458 100644
--- a/drivers/net/octeontx2/otx2_ethdev.c
+++ b/drivers/net/octeontx2/otx2_ethdev.c
@@ -381,6 +381,38 @@ nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
 		goto fail;
 	}
 
+	if (dev->lock_rx_ctx) {
+		aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+		aq->qidx = qid;
+		aq->ctype = NIX_AQ_CTYPE_CQ;
+		aq->op = NIX_AQ_INSTOP_LOCK;
+
+		aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+		if (!aq) {
+			/* The shared memory buffer can be full.
+			 * Flush it and retry
+			 */
+			otx2_mbox_msg_send(mbox, 0);
+			rc = otx2_mbox_wait_for_rsp(mbox, 0);
+			if (rc < 0) {
+				otx2_err("Failed to LOCK cq context");
+				return 0;
+			}
+
+			aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+			if (!aq) {
+				otx2_err("Failed to LOCK rq context");
+				return 0;
+			}
+		}
+		aq->qidx = qid;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_LOCK;
+		rc = otx2_mbox_process(mbox);
+		if (rc < 0)
+			otx2_err("Failed to LOCK rq context");
+	}
+
 	return 0;
 fail:
 	return rc;
@@ -430,6 +462,38 @@ nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
 		return rc;
 	}
 
+	if (dev->lock_rx_ctx) {
+		aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+		aq->qidx = rxq->rq;
+		aq->ctype = NIX_AQ_CTYPE_CQ;
+		aq->op = NIX_AQ_INSTOP_UNLOCK;
+
+		aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+		if (!aq) {
+			/* The shared memory buffer can be full.
+			 * Flush it and retry
+			 */
+			otx2_mbox_msg_send(mbox, 0);
+			rc = otx2_mbox_wait_for_rsp(mbox, 0);
+			if (rc < 0) {
+				otx2_err("Failed to UNLOCK cq context");
+				return 0;
+			}
+
+			aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+			if (!aq) {
+				otx2_err("Failed to UNLOCK rq context");
+				return 0;
+			}
+		}
+		aq->qidx = rxq->rq;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_UNLOCK;
+		rc = otx2_mbox_process(mbox);
+		if (rc < 0)
+			otx2_err("Failed to UNLOCK rq context");
+	}
+
 	return 0;
 }
 
@@ -715,6 +779,90 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	return flags;
 }
 
+static int
+nix_sqb_lock(struct rte_mempool *mp)
+{
+	struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+	struct npa_aq_enq_req *req;
+	int rc;
+
+	req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+	req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+	req->ctype = NPA_AQ_CTYPE_AURA;
+	req->op = NPA_AQ_INSTOP_LOCK;
+
+	req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+	if (!req) {
+		/* The shared memory buffer can be full.
+		 * Flush it and retry
+		 */
+		otx2_mbox_msg_send(npa_lf->mbox, 0);
+		rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
+		if (rc < 0) {
+			otx2_err("Failed to LOCK AURA context");
+			return 0;
+		}
+
+		req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+		if (!req) {
+			otx2_err("Failed to LOCK POOL context");
+			return 0;
+		}
+	}
+
+	req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+	req->ctype = NPA_AQ_CTYPE_POOL;
+	req->op = NPA_AQ_INSTOP_LOCK;
+
+	rc = otx2_mbox_process(npa_lf->mbox);
+	if (rc < 0)
+		otx2_err("Unable to lock POOL in NDC");
+
+	return 0;
+}
+
+static int
+nix_sqb_unlock(struct rte_mempool *mp)
+{
+	struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+	struct npa_aq_enq_req *req;
+	int rc;
+
+	req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+	req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+	req->ctype = NPA_AQ_CTYPE_AURA;
+	req->op = NPA_AQ_INSTOP_UNLOCK;
+
+	req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+	if (!req) {
+		/* The shared memory buffer can be full.
+		 * Flush it and retry
+		 */
+		otx2_mbox_msg_send(npa_lf->mbox, 0);
+		rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
+		if (rc < 0) {
+			otx2_err("Failed to UNLOCK AURA context");
+			return 0;
+		}
+
+		req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+		if (!req) {
+			otx2_err("Failed to UNLOCK POOL context");
+			return 0;
+		}
+	}
+	req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+	req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+	req->ctype = NPA_AQ_CTYPE_POOL;
+	req->op = NPA_AQ_INSTOP_UNLOCK;
+
+	rc = otx2_mbox_process(npa_lf->mbox);
+	if (rc < 0)
+		otx2_err("Unable to UNLOCK AURA in NDC");
+
+	return 0;
+}
+
 static int
 nix_sq_init(struct otx2_eth_txq *txq)
 {
@@ -757,7 +905,20 @@ nix_sq_init(struct otx2_eth_txq *txq)
 	/* Many to one reduction */
 	sq->sq.qint_idx = txq->sq % dev->qints;
 
-	return otx2_mbox_process(mbox);
+	rc = otx2_mbox_process(mbox);
+	if (rc < 0)
+		return rc;
+
+	if (dev->lock_tx_ctx) {
+		sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+		sq->qidx = txq->sq;
+		sq->ctype = NIX_AQ_CTYPE_SQ;
+		sq->op = NIX_AQ_INSTOP_LOCK;
+
+		rc = otx2_mbox_process(mbox);
+	}
+
+	return rc;
 }
 
 static int
@@ -800,6 +961,20 @@ nix_sq_uninit(struct otx2_eth_txq *txq)
 	if (rc)
 		return rc;
 
+	if (dev->lock_tx_ctx) {
+		/* Unlock sq */
+		aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+		aq->qidx = txq->sq;
+		aq->ctype = NIX_AQ_CTYPE_SQ;
+		aq->op = NIX_AQ_INSTOP_UNLOCK;
+
+		rc = otx2_mbox_process(mbox);
+		if (rc < 0)
+			return rc;
+
+		nix_sqb_unlock(txq->sqb_pool);
+	}
+
 	/* Read SQ and free sqb's */
 	aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 	aq->qidx = txq->sq;
@@ -921,6 +1096,8 @@ nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
 	}
 
 	nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
+	if (dev->lock_tx_ctx)
+		nix_sqb_lock(txq->sqb_pool);
 
 	return 0;
 fail:
diff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h
index e5684f9f0..71f8cf729 100644
--- a/drivers/net/octeontx2/otx2_ethdev.h
+++ b/drivers/net/octeontx2/otx2_ethdev.h
@@ -287,6 +287,8 @@ struct otx2_eth_dev {
 	uint16_t scalar_ena;
 	uint16_t rss_tag_as_xor;
 	uint16_t max_sqb_count;
+	uint16_t lock_rx_ctx;
+	uint16_t lock_tx_ctx;
 	uint16_t rx_offload_flags; /* Selected Rx offload flags(NIX_RX_*_F) */
 	uint64_t rx_offloads;
 	uint16_t tx_offload_flags; /* Selected Tx offload flags(NIX_TX_*_F) */
diff --git a/drivers/net/octeontx2/otx2_ethdev_devargs.c b/drivers/net/octeontx2/otx2_ethdev_devargs.c
index bc11f54b5..0857d8247 100644
--- a/drivers/net/octeontx2/otx2_ethdev_devargs.c
+++ b/drivers/net/octeontx2/otx2_ethdev_devargs.c
@@ -124,6 +124,8 @@ parse_switch_header_type(const char *key, const char *value, void *extra_args)
 #define OTX2_FLOW_MAX_PRIORITY "flow_max_priority"
 #define OTX2_SWITCH_HEADER_TYPE "switch_header"
 #define OTX2_RSS_TAG_AS_XOR "tag_as_xor"
+#define OTX2_LOCK_RX_CTX "lock_rx_ctx"
+#define OTX2_LOCK_TX_CTX "lock_tx_ctx"
 
 int
 otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
@@ -134,9 +136,11 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
 	uint16_t switch_header_type = 0;
 	uint16_t flow_max_priority = 3;
 	uint16_t ipsec_in_max_spi = 1;
-	uint16_t scalar_enable = 0;
 	uint16_t rss_tag_as_xor = 0;
+	uint16_t scalar_enable = 0;
 	struct rte_kvargs *kvlist;
+	uint16_t lock_rx_ctx = 0;
+	uint16_t lock_tx_ctx = 0;
 
 	if (devargs == NULL)
 		goto null_devargs;
@@ -161,6 +165,10 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
 			   &parse_switch_header_type, &switch_header_type);
 	rte_kvargs_process(kvlist, OTX2_RSS_TAG_AS_XOR,
 			   &parse_flag, &rss_tag_as_xor);
+	rte_kvargs_process(kvlist, OTX2_LOCK_RX_CTX,
+			   &parse_flag, &lock_rx_ctx);
+	rte_kvargs_process(kvlist, OTX2_LOCK_TX_CTX,
+			   &parse_flag, &lock_tx_ctx);
 	otx2_parse_common_devargs(kvlist);
 	rte_kvargs_free(kvlist);
 
@@ -169,6 +177,8 @@ otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
 	dev->scalar_ena = scalar_enable;
 	dev->rss_tag_as_xor = rss_tag_as_xor;
 	dev->max_sqb_count = sqb_count;
+	dev->lock_rx_ctx = lock_rx_ctx;
+	dev->lock_tx_ctx = lock_tx_ctx;
 	dev->rss_info.rss_size = rss_size;
 	dev->npc_flow.flow_prealloc_size = flow_prealloc_size;
 	dev->npc_flow.flow_max_priority = flow_max_priority;
@@ -187,4 +197,6 @@ RTE_PMD_REGISTER_PARAM_STRING(net_octeontx2,
 			      OTX2_FLOW_PREALLOC_SIZE "=<1-32>"
 			      OTX2_FLOW_MAX_PRIORITY "=<1-32>"
 			      OTX2_SWITCH_HEADER_TYPE "=<higig2|dsa>"
-			      OTX2_RSS_TAG_AS_XOR "=1");
+			      OTX2_RSS_TAG_AS_XOR "=1"
+			      OTX2_LOCK_RX_CTX "=<1-65535>"
+			      OTX2_LOCK_TX_CTX "=<1-65535>");
diff --git a/drivers/net/octeontx2/otx2_rss.c b/drivers/net/octeontx2/otx2_rss.c
index 7a8c8f3de..34005ef02 100644
--- a/drivers/net/octeontx2/otx2_rss.c
+++ b/drivers/net/octeontx2/otx2_rss.c
@@ -33,6 +33,29 @@ otx2_nix_rss_tbl_init(struct otx2_eth_dev *dev,
 		req->qidx = (group * rss->rss_size) + idx;
 		req->ctype = NIX_AQ_CTYPE_RSS;
 		req->op = NIX_AQ_INSTOP_INIT;
+
+		if (!dev->lock_rx_ctx)
+			continue;
+
+		req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+		if (!req) {
+			/* The shared memory buffer can be full.
+			 * Flush it and retry
+			 */
+			otx2_mbox_msg_send(mbox, 0);
+			rc = otx2_mbox_wait_for_rsp(mbox, 0);
+			if (rc < 0)
+				return rc;
+
+			req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+			if (!req)
+				return -ENOMEM;
+		}
+		req->rss.rq = ind_tbl[idx];
+		/* Fill AQ info */
+		req->qidx = (group * rss->rss_size) + idx;
+		req->ctype = NIX_AQ_CTYPE_RSS;
+		req->op = NIX_AQ_INSTOP_LOCK;
 	}
 
 	otx2_mbox_msg_send(mbox, 0);
-- 
2.17.1


  reply	other threads:[~2020-03-06 16:35 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-06 16:35 [dpdk-dev] [PATCH 1/2] mempool/octeontx2: add devargs to lock ctx in cache pbhagavatula
2020-03-06 16:35 ` pbhagavatula [this message]
2020-03-19  9:36   ` [dpdk-dev] [PATCH 2/2] net/octeontx2: add devargs to lock Rx/Tx ctx Andrzej Ostruszka
2020-03-19 13:56     ` Pavan Nikhilesh Bhagavatula
2020-03-19  9:36 ` [dpdk-dev] [PATCH 1/2] mempool/octeontx2: add devargs to lock ctx in cache Andrzej Ostruszka
2020-03-19 13:35   ` Pavan Nikhilesh Bhagavatula
2020-03-24 16:53 ` [dpdk-dev] [dpdk-dev v2] " pbhagavatula
2020-03-24 16:53   ` [dpdk-dev] [dpdk-dev v2] [PATCH 2/2] net/octeontx2: add devargs to lock Rx/Tx ctx pbhagavatula
2020-03-25  6:51   ` [dpdk-dev] [dpdk-dev v2] [PATCH 1/2] mempool/octeontx2: add devargs to lock ctx in cache Jerin Jacob
2020-03-26  6:33   ` [dpdk-dev] [dpdk-dev v3] [PATCH] net/octeontx2: add devargs to lock Rx/Tx ctx pbhagavatula
2020-03-26 15:56     ` Andrzej Ostruszka [C]
2020-03-27  9:53     ` [dpdk-dev] [PATCH v4] " pbhagavatula
2020-03-27 16:19       ` Andrzej Ostruszka
2020-03-27 17:49         ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2020-03-31 13:58       ` [dpdk-dev] [PATCH v5] " pbhagavatula
2020-06-26  5:00         ` Jerin Jacob
2020-06-28 22:18         ` [dpdk-dev] [PATCH v6] " pbhagavatula
2020-07-02  9:46           ` Jerin Jacob
2020-03-26  6:34   ` [dpdk-dev] [dpdk-dev v3] [PATCH] mempool/octeontx2: add devargs to lock ctx in cache pbhagavatula
2020-04-06  8:39     ` Jerin Jacob
2020-04-16 22:33       ` Thomas Monjalon
2020-04-21  7:37         ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2020-04-22  8:06     ` [dpdk-dev] [PATCH v4] " pbhagavatula
2020-05-01 10:21       ` Pavan Nikhilesh Bhagavatula
2020-05-04 22:43         ` Thomas Monjalon
2020-05-10 22:35           ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2020-05-11 10:07       ` [dpdk-dev] [PATCH v5] " pbhagavatula
2020-05-19 16:15         ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200306163524.1650-2-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=john.mcnamara@intel.com \
    --cc=kirankumark@marvell.com \
    --cc=marko.kovacevic@intel.com \
    --cc=ndabilpuram@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).