DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/2] common/cnxk: add SSO XAQ pool create and free
@ 2021-09-02  7:00 pbhagavatula
  2021-09-02  7:00 ` [dpdk-dev] [PATCH 2/2] event/cnxk: use common XAQ pool APIs pbhagavatula
  2021-11-03  0:52 ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free pbhagavatula
  0 siblings, 2 replies; 8+ messages in thread
From: pbhagavatula @ 2021-09-02  7:00 UTC (permalink / raw)
  To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Ray Kinsella
  Cc: dev, Pavan Nikhilesh

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add common API to create and free SSO XAQ pool.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 Depends-on: series-18612 ("net/cnxk: support for inline ipsec")

 drivers/common/cnxk/roc_sso.c      | 122 +++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_sso.h      |  14 ++++
 drivers/common/cnxk/roc_sso_priv.h |   5 ++
 drivers/common/cnxk/version.map    |   2 +
 4 files changed, 143 insertions(+)

diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index bdf973fc2a..31cae30c88 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -5,6 +5,8 @@
 #include "roc_api.h"
 #include "roc_priv.h"

+#define SSO_XAQ_CACHE_CNT (0x7)
+
 /* Private functions. */
 int
 sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf,
@@ -387,6 +389,126 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
 	return mbox_process(dev->mbox);
 }

+int
+sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+			uint32_t nb_xae, uint32_t xae_waes,
+			uint32_t xaq_buf_size, uint16_t nb_hwgrp)
+{
+	struct npa_pool_s pool;
+	struct npa_aura_s aura;
+	plt_iova_t iova;
+	uint32_t i;
+	int rc;
+
+	if (xaq->mem != NULL) {
+		rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
+		if (rc < 0) {
+			plt_err("Failed to release XAQ %d", rc);
+			return rc;
+		}
+		roc_npa_pool_destroy(xaq->aura_handle);
+		plt_free(xaq->fc);
+		plt_free(xaq->mem);
+		memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+	}
+
+	xaq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
+	if (xaq->fc == NULL) {
+		plt_err("Failed to allocate XAQ FC");
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	/* Taken from HRM 14.3.3(4) */
+	nb_xae += (xae_waes * SSO_XAQ_CACHE_CNT * nb_hwgrp);
+	xaq->nb_xae = nb_xae;
+	xaq->nb_xaq = xaq->nb_xae / xae_waes;
+
+	xaq->mem = plt_zmalloc(xaq_buf_size * xaq->nb_xaq, xaq_buf_size);
+	if (xaq->mem == NULL) {
+		plt_err("Failed to allocate XAQ mem");
+		rc = -ENOMEM;
+		goto free_fc;
+	}
+
+	memset(&pool, 0, sizeof(struct npa_pool_s));
+	pool.nat_align = 1;
+
+	memset(&aura, 0, sizeof(aura));
+	aura.fc_ena = 1;
+	aura.fc_addr = (uint64_t)xaq->fc;
+	aura.fc_hyst_bits = 0; /* Store count on all updates */
+	rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq,
+				 &aura, &pool);
+	if (rc) {
+		plt_err("Failed to create XAQ pool");
+		goto npa_fail;
+	}
+
+	iova = (uint64_t)xaq->mem;
+	for (i = 0; i < xaq->nb_xaq; i++) {
+		roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
+		iova += xaq_buf_size;
+	}
+	roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+
+	/* When SW does addwork (enqueue) check if there is space in XAQ by
+	 * comparing fc_addr above against the xaq_lmt calculated below.
+	 * There should be a minimum headroom of one XAQ per HWGRP for SSO
+	 * to request XAQ to cache them even before enqueue is called.
+	 */
+	xaq->xaq_lmt = xaq->nb_xaq - nb_hwgrp;
+	return 0;
+npa_fail:
+	plt_free(xaq->mem);
+free_fc:
+	plt_free(xaq->fc);
+fail:
+	memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+	return rc;
+}
+
+int
+roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae)
+{
+	struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+
+	return sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae,
+				       roc_sso->xae_waes, roc_sso->xaq_buf_size,
+				       roc_sso->nb_hwgrp);
+}
+
+int
+sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+			uint16_t nb_hwgrp)
+{
+	int rc;
+
+	if (xaq->mem != NULL) {
+		if (nb_hwgrp) {
+			rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
+			if (rc < 0) {
+				plt_err("Failed to release XAQ %d", rc);
+				return rc;
+			}
+		}
+		roc_npa_pool_destroy(xaq->aura_handle);
+		plt_free(xaq->fc);
+		plt_free(xaq->mem);
+	}
+	memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+
+	return 0;
+}
+
+int
+roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp)
+{
+	struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+
+	return sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp);
+}
+
 int
 sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps)
 {
diff --git a/drivers/common/cnxk/roc_sso.h b/drivers/common/cnxk/roc_sso.h
index b28f6089cc..27d49c6c68 100644
--- a/drivers/common/cnxk/roc_sso.h
+++ b/drivers/common/cnxk/roc_sso.h
@@ -27,6 +27,15 @@ struct roc_sso_hwgrp_stats {
 	uint64_t page_cnt;
 };

+struct roc_sso_xaq_data {
+	uint32_t nb_xaq;
+	uint32_t nb_xae;
+	uint32_t xaq_lmt;
+	uint64_t aura_handle;
+	void *fc;
+	void *mem;
+};
+
 struct roc_sso {
 	struct plt_pci_device *pci_dev;
 	/* Public data. */
@@ -35,6 +44,7 @@ struct roc_sso {
 	uint16_t nb_hwgrp;
 	uint8_t nb_hws;
 	uintptr_t lmt_base;
+	struct roc_sso_xaq_data xaq;
 	/* HW Const. */
 	uint32_t xae_waes;
 	uint32_t xaq_buf_size;
@@ -95,6 +105,10 @@ int __roc_api roc_sso_hwgrp_hws_link_status(struct roc_sso *roc_sso,
 uintptr_t __roc_api roc_sso_hws_base_get(struct roc_sso *roc_sso, uint8_t hws);
 uintptr_t __roc_api roc_sso_hwgrp_base_get(struct roc_sso *roc_sso,
 					   uint16_t hwgrp);
+int __roc_api roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso,
+					  uint32_t nb_xae);
+int __roc_api roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso,
+					  uint16_t nb_hwgrp);

 /* Debug */
 void __roc_api roc_sso_dump(struct roc_sso *roc_sso, uint8_t nb_hws,
diff --git a/drivers/common/cnxk/roc_sso_priv.h b/drivers/common/cnxk/roc_sso_priv.h
index 8dffa3fbf4..2e1b025d1c 100644
--- a/drivers/common/cnxk/roc_sso_priv.h
+++ b/drivers/common/cnxk/roc_sso_priv.h
@@ -47,6 +47,11 @@ void sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp,
 			 uint16_t hwgrp[], uint16_t n, uint16_t enable);
 int sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps);
 int sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps);
+int sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+			    uint32_t nb_xae, uint32_t xae_waes,
+			    uint32_t xaq_buf_size, uint16_t nb_hwgrp);
+int sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+			    uint16_t nb_hwgrp);

 /* SSO IRQ */
 int sso_register_irqs_priv(struct roc_sso *roc_sso,
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 9fcc677e34..153c45b910 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -280,7 +280,9 @@ INTERNAL {
 	roc_sso_dump;
 	roc_sso_hwgrp_alloc_xaq;
 	roc_sso_hwgrp_base_get;
+	roc_sso_hwgrp_free_xaq_aura;
 	roc_sso_hwgrp_hws_link_status;
+	roc_sso_hwgrp_init_xaq_aura;
 	roc_sso_hwgrp_qos_config;
 	roc_sso_hwgrp_release_xaq;
 	roc_sso_hwgrp_set_priority;
--
2.32.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH 2/2] event/cnxk: use common XAQ pool APIs
  2021-09-02  7:00 [dpdk-dev] [PATCH 1/2] common/cnxk: add SSO XAQ pool create and free pbhagavatula
@ 2021-09-02  7:00 ` pbhagavatula
  2021-11-03  0:52 ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free pbhagavatula
  1 sibling, 0 replies; 8+ messages in thread
From: pbhagavatula @ 2021-09-02  7:00 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Use the common APIs to create and fre XAQ pool.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c |   4 +-
 drivers/event/cnxk/cn9k_eventdev.c  |   6 +-
 drivers/event/cnxk/cnxk_eventdev.c  | 127 ++++------------------------
 drivers/event/cnxk/cnxk_eventdev.h  |   3 -
 4 files changed, 21 insertions(+), 119 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index bd1cf55d2c..ed185262d1 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -98,7 +98,7 @@ cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 
 	rte_memcpy(ws->grps_base, grps_base,
 		   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
-	ws->fc_mem = dev->fc_mem;
+	ws->fc_mem = (uint64_t *)dev->fc_iova;
 	ws->xaq_lmt = dev->xaq_lmt;
 
 	/* Set get_work timeout for HWS */
@@ -467,8 +467,6 @@ cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
 		return -EINVAL;
 	}
 
-	roc_sso_rsrc_fini(&dev->sso);
-
 	rc = cn10k_sso_rsrc_init(dev, dev->nb_event_ports,
 				 dev->nb_event_queues);
 	if (rc < 0) {
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 0c7206cb96..7a2dbcbe35 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -100,7 +100,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 		dws = hws;
 		rte_memcpy(dws->grps_base, grps_base,
 			   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
-		dws->fc_mem = dev->fc_mem;
+		dws->fc_mem = (uint64_t *)dev->fc_iova;
 		dws->xaq_lmt = dev->xaq_lmt;
 
 		plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
@@ -109,7 +109,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 		ws = hws;
 		rte_memcpy(ws->grps_base, grps_base,
 			   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
-		ws->fc_mem = dev->fc_mem;
+		ws->fc_mem = (uint64_t *)dev->fc_iova;
 		ws->xaq_lmt = dev->xaq_lmt;
 
 		plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
@@ -728,8 +728,6 @@ cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
 		return -EINVAL;
 	}
 
-	roc_sso_rsrc_fini(&dev->sso);
-
 	rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
 	if (rc < 0) {
 		plt_err("Failed to initialize SSO resources");
diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c
index 9a87239a59..84bf8cb6d1 100644
--- a/drivers/event/cnxk/cnxk_eventdev.c
+++ b/drivers/event/cnxk/cnxk_eventdev.c
@@ -125,101 +125,30 @@ cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
 int
 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
 {
-	char pool_name[RTE_MEMZONE_NAMESIZE];
-	uint32_t xaq_cnt, npa_aura_id;
-	const struct rte_memzone *mz;
-	struct npa_aura_s *aura;
-	static int reconfig_cnt;
+	uint32_t xae_cnt;
 	int rc;
 
-	if (dev->xaq_pool) {
-		rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
-		if (rc < 0) {
-			plt_err("Failed to release XAQ %d", rc);
-			return rc;
-		}
-		rte_mempool_free(dev->xaq_pool);
-		dev->xaq_pool = NULL;
-	}
-
-	/*
-	 * Allocate memory for Add work backpressure.
-	 */
-	mz = rte_memzone_lookup(CNXK_SSO_FC_NAME);
-	if (mz == NULL)
-		mz = rte_memzone_reserve_aligned(CNXK_SSO_FC_NAME,
-						 sizeof(struct npa_aura_s) +
-							 RTE_CACHE_LINE_SIZE,
-						 0, 0, RTE_CACHE_LINE_SIZE);
-	if (mz == NULL) {
-		plt_err("Failed to allocate mem for fcmem");
-		return -ENOMEM;
-	}
-
-	dev->fc_iova = mz->iova;
-	dev->fc_mem = mz->addr;
-
-	aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem +
-				     RTE_CACHE_LINE_SIZE);
-	memset(aura, 0, sizeof(struct npa_aura_s));
-
-	aura->fc_ena = 1;
-	aura->fc_addr = dev->fc_iova;
-	aura->fc_hyst_bits = 0; /* Store count on all updates */
-
-	/* Taken from HRM 14.3.3(4) */
-	xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
+	xae_cnt = 0;
 	if (dev->xae_cnt)
-		xaq_cnt += dev->xae_cnt / dev->sso.xae_waes;
+		xae_cnt += dev->xae_cnt;
 	else if (dev->adptr_xae_cnt)
-		xaq_cnt += (dev->adptr_xae_cnt / dev->sso.xae_waes) +
-			   (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
+		xae_cnt += (dev->adptr_xae_cnt);
 	else
-		xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
-			   (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
-
-	plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
-	/* Setup XAQ based on number of nb queues. */
-	snprintf(pool_name, 30, "cnxk_xaq_buf_pool_%d", reconfig_cnt);
-	dev->xaq_pool = (void *)rte_mempool_create_empty(
-		pool_name, xaq_cnt, dev->sso.xaq_buf_size, 0, 0,
-		rte_socket_id(), 0);
-
-	if (dev->xaq_pool == NULL) {
-		plt_err("Unable to create empty mempool.");
-		rte_memzone_free(mz);
-		return -ENOMEM;
-	}
-
-	rc = rte_mempool_set_ops_byname(dev->xaq_pool,
-					rte_mbuf_platform_mempool_ops(), aura);
-	if (rc != 0) {
-		plt_err("Unable to set xaqpool ops.");
-		goto alloc_fail;
-	}
+		xae_cnt += dev->sso.iue;
 
-	rc = rte_mempool_populate_default(dev->xaq_pool);
+	plt_sso_dbg("Configuring %d xae buffers", xae_cnt);
+	rc = roc_sso_hwgrp_init_xaq_aura(&dev->sso, xae_cnt);
 	if (rc < 0) {
-		plt_err("Unable to set populate xaqpool.");
-		goto alloc_fail;
+		plt_err("Failed to configure XAQ aura");
+		return rc;
 	}
-	reconfig_cnt++;
-	/* When SW does addwork (enqueue) check if there is space in XAQ by
-	 * comparing fc_addr above against the xaq_lmt calculated below.
-	 * There should be a minimum headroom (CNXK_SSO_XAQ_SLACK / 2) for SSO
-	 * to request XAQ to cache them even before enqueue is called.
-	 */
-	dev->xaq_lmt =
-		xaq_cnt - (CNXK_SSO_XAQ_SLACK / 2 * dev->nb_event_queues);
-	dev->nb_xaq_cfg = xaq_cnt;
-
-	npa_aura_id = roc_npa_aura_handle_to_aura(dev->xaq_pool->pool_id);
-	return roc_sso_hwgrp_alloc_xaq(&dev->sso, npa_aura_id,
-				       dev->nb_event_queues);
-alloc_fail:
-	rte_mempool_free(dev->xaq_pool);
-	rte_memzone_free(mz);
-	return rc;
+	dev->xaq_lmt = dev->sso.xaq.xaq_lmt;
+	dev->fc_iova = (uint64_t)dev->sso.xaq.fc;
+
+	return roc_sso_hwgrp_alloc_xaq(
+		&dev->sso,
+		roc_npa_aura_handle_to_aura(dev->sso.xaq.aura_handle),
+		dev->nb_event_queues);
 }
 
 int
@@ -231,14 +160,6 @@ cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev)
 	if (event_dev->data->dev_started)
 		event_dev->dev_ops->dev_stop(event_dev);
 
-	rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
-	if (rc < 0) {
-		plt_err("Failed to release XAQ %d", rc);
-		return rc;
-	}
-
-	rte_mempool_free(dev->xaq_pool);
-	dev->xaq_pool = NULL;
 	rc = cnxk_sso_xaq_allocate(dev);
 	if (rc < 0) {
 		plt_err("Failed to alloc XAQ %d", rc);
@@ -320,7 +241,6 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
 	struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 	uint32_t deq_tmo_ns;
-	int rc;
 
 	deq_tmo_ns = conf->dequeue_timeout_ns;
 
@@ -354,15 +274,8 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
 		return -EINVAL;
 	}
 
-	if (dev->xaq_pool) {
-		rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
-		if (rc < 0) {
-			plt_err("Failed to release XAQ %d", rc);
-			return rc;
-		}
-		rte_mempool_free(dev->xaq_pool);
-		dev->xaq_pool = NULL;
-	}
+	roc_sso_rsrc_fini(&dev->sso);
+	roc_sso_hwgrp_free_xaq_aura(&dev->sso, dev->sso.nb_hwgrp);
 
 	dev->nb_event_queues = conf->nb_event_queues;
 	dev->nb_event_ports = conf->nb_event_ports;
@@ -556,12 +469,8 @@ cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn)
 	}
 
 	roc_sso_rsrc_fini(&dev->sso);
-	rte_mempool_free(dev->xaq_pool);
-	rte_memzone_free(rte_memzone_lookup(CNXK_SSO_FC_NAME));
 
 	dev->fc_iova = 0;
-	dev->fc_mem = NULL;
-	dev->xaq_pool = NULL;
 	dev->configured = false;
 	dev->is_timeout_deq = 0;
 	dev->nb_event_ports = 0;
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 8a5c737e4b..ccd09b1d82 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -91,11 +91,8 @@ struct cnxk_sso_evdev {
 	uint32_t min_dequeue_timeout_ns;
 	uint32_t max_dequeue_timeout_ns;
 	int32_t max_num_events;
-	uint64_t *fc_mem;
 	uint64_t xaq_lmt;
-	uint64_t nb_xaq_cfg;
 	rte_iova_t fc_iova;
-	struct rte_mempool *xaq_pool;
 	uint64_t rx_offloads;
 	uint64_t tx_offloads;
 	uint64_t adptr_xae_cnt;
-- 
2.32.0


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free
  2021-09-02  7:00 [dpdk-dev] [PATCH 1/2] common/cnxk: add SSO XAQ pool create and free pbhagavatula
  2021-09-02  7:00 ` [dpdk-dev] [PATCH 2/2] event/cnxk: use common XAQ pool APIs pbhagavatula
@ 2021-11-03  0:52 ` pbhagavatula
  2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 2/5] event/cnxk: use common XAQ pool APIs pbhagavatula
                     ` (4 more replies)
  1 sibling, 5 replies; 8+ messages in thread
From: pbhagavatula @ 2021-11-03  0:52 UTC (permalink / raw)
  To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Ray Kinsella, Pavan Nikhilesh, Shijith Thotton
  Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add common API to create and free SSO XAQ pool.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 v2 Changes:
 - Merge patchsets 19356,18614 to avoid merge conflicts.
 - Rebase onto main.

 drivers/common/cnxk/roc_sso.c       | 124 ++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_sso.h       |  14 ++++
 drivers/common/cnxk/roc_sso_priv.h  |   5 ++
 drivers/common/cnxk/version.map     |   2 +
 drivers/event/cnxk/cn10k_eventdev.c |   2 +
 5 files changed, 147 insertions(+)

diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 762893f3dc..45ff16ca0e 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -5,6 +5,8 @@
 #include "roc_api.h"
 #include "roc_priv.h"

+#define SSO_XAQ_CACHE_CNT (0x7)
+
 /* Private functions. */
 int
 sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf,
@@ -387,6 +389,128 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
 	return mbox_process(dev->mbox);
 }

+int
+sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+			uint32_t nb_xae, uint32_t xae_waes,
+			uint32_t xaq_buf_size, uint16_t nb_hwgrp)
+{
+	struct npa_pool_s pool;
+	struct npa_aura_s aura;
+	plt_iova_t iova;
+	uint32_t i;
+	int rc;
+
+	if (xaq->mem != NULL) {
+		rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
+		if (rc < 0) {
+			plt_err("Failed to release XAQ %d", rc);
+			return rc;
+		}
+		roc_npa_pool_destroy(xaq->aura_handle);
+		plt_free(xaq->fc);
+		plt_free(xaq->mem);
+		memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+	}
+
+	xaq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
+	if (xaq->fc == NULL) {
+		plt_err("Failed to allocate XAQ FC");
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	xaq->nb_xae = nb_xae;
+
+	/* Taken from HRM 14.3.3(4) */
+	xaq->nb_xaq = (SSO_XAQ_CACHE_CNT * nb_hwgrp);
+	xaq->nb_xaq += PLT_MAX(1 + ((xaq->nb_xae - 1) / xae_waes), xaq->nb_xaq);
+
+	xaq->mem = plt_zmalloc(xaq_buf_size * xaq->nb_xaq, xaq_buf_size);
+	if (xaq->mem == NULL) {
+		plt_err("Failed to allocate XAQ mem");
+		rc = -ENOMEM;
+		goto free_fc;
+	}
+
+	memset(&pool, 0, sizeof(struct npa_pool_s));
+	pool.nat_align = 1;
+
+	memset(&aura, 0, sizeof(aura));
+	aura.fc_ena = 1;
+	aura.fc_addr = (uint64_t)xaq->fc;
+	aura.fc_hyst_bits = 0; /* Store count on all updates */
+	rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq,
+				 &aura, &pool);
+	if (rc) {
+		plt_err("Failed to create XAQ pool");
+		goto npa_fail;
+	}
+
+	iova = (uint64_t)xaq->mem;
+	for (i = 0; i < xaq->nb_xaq; i++) {
+		roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
+		iova += xaq_buf_size;
+	}
+	roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+
+	/* When SW does addwork (enqueue) check if there is space in XAQ by
+	 * comparing fc_addr above against the xaq_lmt calculated below.
+	 * There should be a minimum headroom of 7 XAQs per HWGRP for SSO
+	 * to request XAQ to cache them even before enqueue is called.
+	 */
+	xaq->xaq_lmt = xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT);
+
+	return 0;
+npa_fail:
+	plt_free(xaq->mem);
+free_fc:
+	plt_free(xaq->fc);
+fail:
+	memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+	return rc;
+}
+
+int
+roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae)
+{
+	struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+
+	return sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae,
+				       roc_sso->xae_waes, roc_sso->xaq_buf_size,
+				       roc_sso->nb_hwgrp);
+}
+
+int
+sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+			uint16_t nb_hwgrp)
+{
+	int rc;
+
+	if (xaq->mem != NULL) {
+		if (nb_hwgrp) {
+			rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
+			if (rc < 0) {
+				plt_err("Failed to release XAQ %d", rc);
+				return rc;
+			}
+		}
+		roc_npa_pool_destroy(xaq->aura_handle);
+		plt_free(xaq->fc);
+		plt_free(xaq->mem);
+	}
+	memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
+
+	return 0;
+}
+
+int
+roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp)
+{
+	struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+
+	return sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp);
+}
+
 int
 sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps)
 {
diff --git a/drivers/common/cnxk/roc_sso.h b/drivers/common/cnxk/roc_sso.h
index b28f6089cc..27d49c6c68 100644
--- a/drivers/common/cnxk/roc_sso.h
+++ b/drivers/common/cnxk/roc_sso.h
@@ -27,6 +27,15 @@ struct roc_sso_hwgrp_stats {
 	uint64_t page_cnt;
 };

+struct roc_sso_xaq_data {
+	uint32_t nb_xaq;
+	uint32_t nb_xae;
+	uint32_t xaq_lmt;
+	uint64_t aura_handle;
+	void *fc;
+	void *mem;
+};
+
 struct roc_sso {
 	struct plt_pci_device *pci_dev;
 	/* Public data. */
@@ -35,6 +44,7 @@ struct roc_sso {
 	uint16_t nb_hwgrp;
 	uint8_t nb_hws;
 	uintptr_t lmt_base;
+	struct roc_sso_xaq_data xaq;
 	/* HW Const. */
 	uint32_t xae_waes;
 	uint32_t xaq_buf_size;
@@ -95,6 +105,10 @@ int __roc_api roc_sso_hwgrp_hws_link_status(struct roc_sso *roc_sso,
 uintptr_t __roc_api roc_sso_hws_base_get(struct roc_sso *roc_sso, uint8_t hws);
 uintptr_t __roc_api roc_sso_hwgrp_base_get(struct roc_sso *roc_sso,
 					   uint16_t hwgrp);
+int __roc_api roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso,
+					  uint32_t nb_xae);
+int __roc_api roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso,
+					  uint16_t nb_hwgrp);

 /* Debug */
 void __roc_api roc_sso_dump(struct roc_sso *roc_sso, uint8_t nb_hws,
diff --git a/drivers/common/cnxk/roc_sso_priv.h b/drivers/common/cnxk/roc_sso_priv.h
index 8dffa3fbf4..2e1b025d1c 100644
--- a/drivers/common/cnxk/roc_sso_priv.h
+++ b/drivers/common/cnxk/roc_sso_priv.h
@@ -47,6 +47,11 @@ void sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp,
 			 uint16_t hwgrp[], uint16_t n, uint16_t enable);
 int sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps);
 int sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps);
+int sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+			    uint32_t nb_xae, uint32_t xae_waes,
+			    uint32_t xaq_buf_size, uint16_t nb_hwgrp);
+int sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
+			    uint16_t nb_hwgrp);

 /* SSO IRQ */
 int sso_register_irqs_priv(struct roc_sso *roc_sso,
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 8d4d42f476..cbf4a4137b 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -310,7 +310,9 @@ INTERNAL {
 	roc_sso_dump;
 	roc_sso_hwgrp_alloc_xaq;
 	roc_sso_hwgrp_base_get;
+	roc_sso_hwgrp_free_xaq_aura;
 	roc_sso_hwgrp_hws_link_status;
+	roc_sso_hwgrp_init_xaq_aura;
 	roc_sso_hwgrp_qos_config;
 	roc_sso_hwgrp_release_xaq;
 	roc_sso_hwgrp_set_priority;
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index e287448189..2fb4ea878e 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -132,6 +132,7 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,

 	plt_write64(0, base + SSO_LF_GGRP_QCTL);

+	plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
 	req = queue_id;	    /* GGRP ID */
 	req |= BIT_ULL(18); /* Grouped */
 	req |= BIT_ULL(16); /* WAIT */
@@ -177,6 +178,7 @@ cn10k_sso_hws_reset(void *arg, void *hws)
 	} gw;
 	uint8_t pend_tt;

+	plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
 	/* Wait till getwork/swtp/waitw/desched completes. */
 	do {
 		pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
--
2.17.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v2 2/5] event/cnxk: use common XAQ pool APIs
  2021-11-03  0:52 ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free pbhagavatula
@ 2021-11-03  0:52   ` pbhagavatula
  2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 3/5] event/cnxk: fix packet Tx overflow pbhagavatula
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: pbhagavatula @ 2021-11-03  0:52 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Use the common APIs to create and fre XAQ pool.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c |   4 +-
 drivers/event/cnxk/cn9k_eventdev.c  |   6 +-
 drivers/event/cnxk/cnxk_eventdev.c  | 129 ++++------------------------
 drivers/event/cnxk/cnxk_eventdev.h  |   3 -
 4 files changed, 21 insertions(+), 121 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 2fb4ea878e..2431875766 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -99,7 +99,7 @@ cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 
 	rte_memcpy(ws->grps_base, grps_base,
 		   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
-	ws->fc_mem = dev->fc_mem;
+	ws->fc_mem = (uint64_t *)dev->fc_iova;
 	ws->xaq_lmt = dev->xaq_lmt;
 
 	/* Set get_work timeout for HWS */
@@ -470,8 +470,6 @@ cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
 		return -EINVAL;
 	}
 
-	roc_sso_rsrc_fini(&dev->sso);
-
 	rc = cn10k_sso_rsrc_init(dev, dev->nb_event_ports,
 				 dev->nb_event_queues);
 	if (rc < 0) {
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index d757da7c37..c364336023 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -100,7 +100,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 		dws = hws;
 		rte_memcpy(dws->grps_base, grps_base,
 			   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
-		dws->fc_mem = dev->fc_mem;
+		dws->fc_mem = (uint64_t *)dev->fc_iova;
 		dws->xaq_lmt = dev->xaq_lmt;
 
 		plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
@@ -109,7 +109,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 		ws = hws;
 		rte_memcpy(ws->grps_base, grps_base,
 			   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
-		ws->fc_mem = dev->fc_mem;
+		ws->fc_mem = (uint64_t *)dev->fc_iova;
 		ws->xaq_lmt = dev->xaq_lmt;
 
 		plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
@@ -729,8 +729,6 @@ cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
 		return -EINVAL;
 	}
 
-	roc_sso_rsrc_fini(&dev->sso);
-
 	rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
 	if (rc < 0) {
 		plt_err("Failed to initialize SSO resources");
diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c
index 9deab0829a..2b9235687a 100644
--- a/drivers/event/cnxk/cnxk_eventdev.c
+++ b/drivers/event/cnxk/cnxk_eventdev.c
@@ -125,101 +125,28 @@ cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
 int
 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
 {
-	char pool_name[RTE_MEMZONE_NAMESIZE];
-	uint32_t xaq_cnt, npa_aura_id;
-	const struct rte_memzone *mz;
-	struct npa_aura_s *aura;
-	static int reconfig_cnt;
+	uint32_t xae_cnt;
 	int rc;
 
-	if (dev->xaq_pool) {
-		rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
-		if (rc < 0) {
-			plt_err("Failed to release XAQ %d", rc);
-			return rc;
-		}
-		rte_mempool_free(dev->xaq_pool);
-		dev->xaq_pool = NULL;
-	}
-
-	/*
-	 * Allocate memory for Add work backpressure.
-	 */
-	mz = rte_memzone_lookup(CNXK_SSO_FC_NAME);
-	if (mz == NULL)
-		mz = rte_memzone_reserve_aligned(CNXK_SSO_FC_NAME,
-						 sizeof(struct npa_aura_s) +
-							 RTE_CACHE_LINE_SIZE,
-						 0, 0, RTE_CACHE_LINE_SIZE);
-	if (mz == NULL) {
-		plt_err("Failed to allocate mem for fcmem");
-		return -ENOMEM;
-	}
-
-	dev->fc_iova = mz->iova;
-	dev->fc_mem = mz->addr;
-
-	aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem +
-				     RTE_CACHE_LINE_SIZE);
-	memset(aura, 0, sizeof(struct npa_aura_s));
-
-	aura->fc_ena = 1;
-	aura->fc_addr = dev->fc_iova;
-	aura->fc_hyst_bits = 0; /* Store count on all updates */
-
-	/* Taken from HRM 14.3.3(4) */
-	xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
+	xae_cnt = dev->sso.iue;
 	if (dev->xae_cnt)
-		xaq_cnt += dev->xae_cnt / dev->sso.xae_waes;
-	else if (dev->adptr_xae_cnt)
-		xaq_cnt += (dev->adptr_xae_cnt / dev->sso.xae_waes) +
-			   (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
-	else
-		xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
-			   (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
-
-	plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
-	/* Setup XAQ based on number of nb queues. */
-	snprintf(pool_name, 30, "cnxk_xaq_buf_pool_%d", reconfig_cnt);
-	dev->xaq_pool = (void *)rte_mempool_create_empty(
-		pool_name, xaq_cnt, dev->sso.xaq_buf_size, 0, 0,
-		rte_socket_id(), 0);
-
-	if (dev->xaq_pool == NULL) {
-		plt_err("Unable to create empty mempool.");
-		rte_memzone_free(mz);
-		return -ENOMEM;
-	}
-
-	rc = rte_mempool_set_ops_byname(dev->xaq_pool,
-					rte_mbuf_platform_mempool_ops(), aura);
-	if (rc != 0) {
-		plt_err("Unable to set xaqpool ops.");
-		goto alloc_fail;
-	}
+		xae_cnt += dev->xae_cnt;
+	if (dev->adptr_xae_cnt)
+		xae_cnt += (dev->adptr_xae_cnt);
 
-	rc = rte_mempool_populate_default(dev->xaq_pool);
+	plt_sso_dbg("Configuring %d xae buffers", xae_cnt);
+	rc = roc_sso_hwgrp_init_xaq_aura(&dev->sso, xae_cnt);
 	if (rc < 0) {
-		plt_err("Unable to set populate xaqpool.");
-		goto alloc_fail;
+		plt_err("Failed to configure XAQ aura");
+		return rc;
 	}
-	reconfig_cnt++;
-	/* When SW does addwork (enqueue) check if there is space in XAQ by
-	 * comparing fc_addr above against the xaq_lmt calculated below.
-	 * There should be a minimum headroom (CNXK_SSO_XAQ_SLACK / 2) for SSO
-	 * to request XAQ to cache them even before enqueue is called.
-	 */
-	dev->xaq_lmt =
-		xaq_cnt - (CNXK_SSO_XAQ_SLACK / 2 * dev->nb_event_queues);
-	dev->nb_xaq_cfg = xaq_cnt;
-
-	npa_aura_id = roc_npa_aura_handle_to_aura(dev->xaq_pool->pool_id);
-	return roc_sso_hwgrp_alloc_xaq(&dev->sso, npa_aura_id,
-				       dev->nb_event_queues);
-alloc_fail:
-	rte_mempool_free(dev->xaq_pool);
-	rte_memzone_free(mz);
-	return rc;
+	dev->xaq_lmt = dev->sso.xaq.xaq_lmt;
+	dev->fc_iova = (uint64_t)dev->sso.xaq.fc;
+
+	return roc_sso_hwgrp_alloc_xaq(
+		&dev->sso,
+		roc_npa_aura_handle_to_aura(dev->sso.xaq.aura_handle),
+		dev->nb_event_queues);
 }
 
 int
@@ -231,14 +158,6 @@ cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev)
 	if (event_dev->data->dev_started)
 		event_dev->dev_ops->dev_stop(event_dev);
 
-	rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
-	if (rc < 0) {
-		plt_err("Failed to release XAQ %d", rc);
-		return rc;
-	}
-
-	rte_mempool_free(dev->xaq_pool);
-	dev->xaq_pool = NULL;
 	rc = cnxk_sso_xaq_allocate(dev);
 	if (rc < 0) {
 		plt_err("Failed to alloc XAQ %d", rc);
@@ -320,7 +239,6 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
 	struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 	uint32_t deq_tmo_ns;
-	int rc;
 
 	deq_tmo_ns = conf->dequeue_timeout_ns;
 
@@ -354,15 +272,8 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
 		return -EINVAL;
 	}
 
-	if (dev->xaq_pool) {
-		rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
-		if (rc < 0) {
-			plt_err("Failed to release XAQ %d", rc);
-			return rc;
-		}
-		rte_mempool_free(dev->xaq_pool);
-		dev->xaq_pool = NULL;
-	}
+	roc_sso_rsrc_fini(&dev->sso);
+	roc_sso_hwgrp_free_xaq_aura(&dev->sso, dev->sso.nb_hwgrp);
 
 	dev->nb_event_queues = conf->nb_event_queues;
 	dev->nb_event_ports = conf->nb_event_ports;
@@ -556,12 +467,8 @@ cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn)
 	}
 
 	roc_sso_rsrc_fini(&dev->sso);
-	rte_mempool_free(dev->xaq_pool);
-	rte_memzone_free(rte_memzone_lookup(CNXK_SSO_FC_NAME));
 
 	dev->fc_iova = 0;
-	dev->fc_mem = NULL;
-	dev->xaq_pool = NULL;
 	dev->configured = false;
 	dev->is_timeout_deq = 0;
 	dev->nb_event_ports = 0;
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index b57004c0dc..957dcf04a4 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -91,11 +91,8 @@ struct cnxk_sso_evdev {
 	uint32_t min_dequeue_timeout_ns;
 	uint32_t max_dequeue_timeout_ns;
 	int32_t max_num_events;
-	uint64_t *fc_mem;
 	uint64_t xaq_lmt;
-	uint64_t nb_xaq_cfg;
 	rte_iova_t fc_iova;
-	struct rte_mempool *xaq_pool;
 	uint64_t rx_offloads;
 	uint64_t tx_offloads;
 	uint64_t adptr_xae_cnt;
-- 
2.17.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v2 3/5] event/cnxk: fix packet Tx overflow
  2021-11-03  0:52 ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free pbhagavatula
  2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 2/5] event/cnxk: use common XAQ pool APIs pbhagavatula
@ 2021-11-03  0:52   ` pbhagavatula
  2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 4/5] event/cnxk: reduce workslot memory consumption pbhagavatula
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: pbhagavatula @ 2021-11-03  0:52 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

The transmit loop incorrectly assumes that nb_mbufs is always
a multiple of 4 when transmitting an event vector. The max
size of the vector might not be reached and pushed out early
due to timeout.

Fixes: 761a321acf91 ("event/cnxk: support vectorized Tx event fast path")

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_worker.h | 180 +++++++++++++-----------------
 1 file changed, 77 insertions(+), 103 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 1255662b6c..657ab91ac8 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -7,10 +7,10 @@
 
 #include <rte_vect.h>
 
+#include "cn10k_cryptodev_ops.h"
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
-#include "cn10k_cryptodev_ops.h"
 
 #include "cn10k_ethdev.h"
 #include "cn10k_rx.h"
@@ -237,18 +237,16 @@ cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
 
 				cq_w1 = *(uint64_t *)(gw.u64[1] + 8);
 
-				sa_base = cnxk_nix_sa_base_get(port,
-							       lookup_mem);
+				sa_base =
+					cnxk_nix_sa_base_get(port, lookup_mem);
 				sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 
-				mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(cq_w1,
-						sa_base, (uintptr_t)&iova,
-						&loff, (struct rte_mbuf *)mbuf,
-						d_off);
+				mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(
+					cq_w1, sa_base, (uintptr_t)&iova, &loff,
+					(struct rte_mbuf *)mbuf, d_off);
 				if (loff)
 					roc_npa_aura_op_free(m->pool->pool_id,
 							     0, iova);
-
 			}
 
 			gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
@@ -396,6 +394,56 @@ cn10k_sso_hws_xtract_meta(struct rte_mbuf *m,
 		txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
 }
 
+static __rte_always_inline void
+cn10k_sso_tx_one(struct rte_mbuf *m, uint64_t *cmd, uint16_t lmt_id,
+		 uintptr_t lmt_addr, uint8_t sched_type, uintptr_t base,
+		 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
+		 const uint32_t flags)
+{
+	uint8_t lnum = 0, loff = 0, shft = 0;
+	struct cn10k_eth_txq *txq;
+	uintptr_t laddr;
+	uint16_t segdw;
+	uintptr_t pa;
+	bool sec;
+
+	txq = cn10k_sso_hws_xtract_meta(m, txq_data);
+	cn10k_nix_tx_skeleton(txq, cmd, flags);
+	/* Perform header writes before barrier
+	 * for TSO
+	 */
+	if (flags & NIX_TX_OFFLOAD_TSO_F)
+		cn10k_nix_xmit_prepare_tso(m, flags);
+
+	cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec);
+
+	laddr = lmt_addr;
+	/* Prepare CPT instruction and get nixtx addr if
+	 * it is for CPT on same lmtline.
+	 */
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+		cn10k_nix_prep_sec(m, cmd, &laddr, lmt_addr, &lnum, &loff,
+				   &shft, txq->sa_base, flags);
+
+	/* Move NIX desc to LMT/NIXTX area */
+	cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
+
+	if (flags & NIX_TX_MULTI_SEG_F)
+		segdw = cn10k_nix_prepare_mseg(m, (uint64_t *)laddr, flags);
+	else
+		segdw = cn10k_nix_tx_ext_subs(flags) + 2;
+
+	if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
+		pa = txq->cpt_io_addr | 3 << 4;
+	else
+		pa = txq->io_addr | ((segdw - 1) << 4);
+
+	if (!sched_type)
+		roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
+
+	roc_lmt_submit_steorl(lmt_id, pa);
+}
+
 static __rte_always_inline void
 cn10k_sso_vwqe_split_tx(struct rte_mbuf **mbufs, uint16_t nb_mbufs,
 			uint64_t *cmd, uint16_t lmt_id, uintptr_t lmt_addr,
@@ -404,11 +452,13 @@ cn10k_sso_vwqe_split_tx(struct rte_mbuf **mbufs, uint16_t nb_mbufs,
 			const uint32_t flags)
 {
 	uint16_t port[4], queue[4];
+	uint16_t i, j, pkts, scalar;
 	struct cn10k_eth_txq *txq;
-	uint16_t i, j;
-	uintptr_t pa;
 
-	for (i = 0; i < nb_mbufs; i += 4) {
+	scalar = nb_mbufs & (NIX_DESCS_PER_LOOP - 1);
+	pkts = RTE_ALIGN_FLOOR(nb_mbufs, NIX_DESCS_PER_LOOP);
+
+	for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
 		port[0] = mbufs[i]->port;
 		port[1] = mbufs[i + 1]->port;
 		port[2] = mbufs[i + 2]->port;
@@ -421,66 +471,25 @@ cn10k_sso_vwqe_split_tx(struct rte_mbuf **mbufs, uint16_t nb_mbufs,
 
 		if (((port[0] ^ port[1]) & (port[2] ^ port[3])) ||
 		    ((queue[0] ^ queue[1]) & (queue[2] ^ queue[3]))) {
-
-			for (j = 0; j < 4; j++) {
-				uint8_t lnum = 0, loff = 0, shft = 0;
-				struct rte_mbuf *m = mbufs[i + j];
-				uintptr_t laddr;
-				uint16_t segdw;
-				bool sec;
-
-				txq = (struct cn10k_eth_txq *)
-					txq_data[port[j]][queue[j]];
-				cn10k_nix_tx_skeleton(txq, cmd, flags);
-				/* Perform header writes before barrier
-				 * for TSO
-				 */
-				if (flags & NIX_TX_OFFLOAD_TSO_F)
-					cn10k_nix_xmit_prepare_tso(m, flags);
-
-				cn10k_nix_xmit_prepare(m, cmd, flags,
-						       txq->lso_tun_fmt, &sec);
-
-				laddr = lmt_addr;
-				/* Prepare CPT instruction and get nixtx addr if
-				 * it is for CPT on same lmtline.
-				 */
-				if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
-					cn10k_nix_prep_sec(m, cmd, &laddr,
-							   lmt_addr, &lnum,
-							   &loff, &shft,
-							   txq->sa_base, flags);
-
-				/* Move NIX desc to LMT/NIXTX area */
-				cn10k_nix_xmit_mv_lmt_base(laddr, cmd, flags);
-
-				if (flags & NIX_TX_MULTI_SEG_F) {
-					segdw = cn10k_nix_prepare_mseg(m,
-						(uint64_t *)laddr, flags);
-				} else {
-					segdw = cn10k_nix_tx_ext_subs(flags) +
-						2;
-				}
-
-				if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
-					pa = txq->cpt_io_addr | 3 << 4;
-				else
-					pa = txq->io_addr | ((segdw - 1) << 4);
-
-				if (!sched_type)
-					roc_sso_hws_head_wait(base +
-							      SSOW_LF_GWS_TAG);
-
-				roc_lmt_submit_steorl(lmt_id, pa);
-			}
+			for (j = 0; j < 4; j++)
+				cn10k_sso_tx_one(mbufs[i + j], cmd, lmt_id,
+						 lmt_addr, sched_type, base,
+						 txq_data, flags);
 		} else {
 			txq = (struct cn10k_eth_txq *)
 				txq_data[port[0]][queue[0]];
-			cn10k_nix_xmit_pkts_vector(txq, &mbufs[i], 4, cmd, base
-					+ SSOW_LF_GWS_TAG,
+			cn10k_nix_xmit_pkts_vector(txq, &mbufs[i], 4, cmd,
+						   base + SSOW_LF_GWS_TAG,
 						   flags | NIX_TX_VWQE_F);
 		}
 	}
+
+	mbufs += i;
+
+	for (i = 0; i < scalar; i++) {
+		cn10k_sso_tx_one(mbufs[i], cmd, lmt_id, lmt_addr, sched_type,
+				 base, txq_data, flags);
+	}
 }
 
 static __rte_always_inline uint16_t
@@ -489,19 +498,14 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
 		       const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
 		       const uint32_t flags)
 {
-	uint8_t lnum = 0, loff = 0, shft = 0;
 	struct cn10k_eth_txq *txq;
-	uint16_t ref_cnt, segdw;
 	struct rte_mbuf *m;
 	uintptr_t lmt_addr;
-	uintptr_t c_laddr;
+	uint16_t ref_cnt;
 	uint16_t lmt_id;
-	uintptr_t pa;
-	bool sec;
 
 	lmt_addr = ws->lmt_base;
 	ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
-	c_laddr = lmt_addr;
 
 	if (ev->event_type & RTE_EVENT_TYPE_VECTOR) {
 		struct rte_mbuf **mbufs = ev->vec->mbufs;
@@ -526,38 +530,8 @@ cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
 
 	m = ev->mbuf;
 	ref_cnt = m->refcnt;
-	txq = cn10k_sso_hws_xtract_meta(m, txq_data);
-	cn10k_nix_tx_skeleton(txq, cmd, flags);
-	/* Perform header writes before barrier for TSO */
-	if (flags & NIX_TX_OFFLOAD_TSO_F)
-		cn10k_nix_xmit_prepare_tso(m, flags);
-
-	cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec);
-
-	/* Prepare CPT instruction and get nixtx addr if
-	 * it is for CPT on same lmtline.
-	 */
-	if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
-		cn10k_nix_prep_sec(m, cmd, &lmt_addr, c_laddr, &lnum, &loff,
-				   &shft, txq->sa_base, flags);
-
-	/* Move NIX desc to LMT/NIXTX area */
-	cn10k_nix_xmit_mv_lmt_base(lmt_addr, cmd, flags);
-	if (flags & NIX_TX_MULTI_SEG_F) {
-		segdw = cn10k_nix_prepare_mseg(m, (uint64_t *)lmt_addr, flags);
-	} else {
-		segdw = cn10k_nix_tx_ext_subs(flags) + 2;
-	}
-
-	if (flags & NIX_TX_OFFLOAD_SECURITY_F && sec)
-		pa = txq->cpt_io_addr | 3 << 4;
-	else
-		pa = txq->io_addr | ((segdw - 1) << 4);
-
-	if (!ev->sched_type)
-		roc_sso_hws_head_wait(ws->tx_base + SSOW_LF_GWS_TAG);
-
-	roc_lmt_submit_steorl(lmt_id, pa);
+	cn10k_sso_tx_one(m, cmd, lmt_id, lmt_addr, ev->sched_type, ws->tx_base,
+			 txq_data, flags);
 
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
 		if (ref_cnt > 1)
-- 
2.17.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v2 4/5] event/cnxk: reduce workslot memory consumption
  2021-11-03  0:52 ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free pbhagavatula
  2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 2/5] event/cnxk: use common XAQ pool APIs pbhagavatula
  2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 3/5] event/cnxk: fix packet Tx overflow pbhagavatula
@ 2021-11-03  0:52   ` pbhagavatula
  2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 5/5] event/cnxk: rework enqueue path pbhagavatula
  2021-11-04  7:41   ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free Jerin Jacob
  4 siblings, 0 replies; 8+ messages in thread
From: pbhagavatula @ 2021-11-03  0:52 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

SSO group base addresses are always are always contiguous we
need not store all the base addresses in workslot memory, instead
just store the base address and compute the group address offset
when required.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c |  5 ++---
 drivers/event/cnxk/cn10k_worker.h   |  3 ++-
 drivers/event/cnxk/cn9k_eventdev.c  |  8 +++-----
 drivers/event/cnxk/cn9k_worker.h    |  6 ++++--
 drivers/event/cnxk/cnxk_eventdev.c  | 15 ++++++---------
 drivers/event/cnxk/cnxk_eventdev.h  |  8 ++++----
 6 files changed, 21 insertions(+), 24 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 2431875766..c5a8c1ae8f 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -91,14 +91,13 @@ cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
 }
 
 static void
-cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
+cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
 {
 	struct cnxk_sso_evdev *dev = arg;
 	struct cn10k_sso_hws *ws = hws;
 	uint64_t val;
 
-	rte_memcpy(ws->grps_base, grps_base,
-		   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
+	ws->grp_base = grp_base;
 	ws->fc_mem = (uint64_t *)dev->fc_iova;
 	ws->xaq_lmt = dev->xaq_lmt;
 
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 657ab91ac8..f8331e88d7 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -30,7 +30,8 @@ cn10k_sso_hws_new_event(struct cn10k_sso_hws *ws, const struct rte_event *ev)
 	if (ws->xaq_lmt <= *ws->fc_mem)
 		return 0;
 
-	cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
+	cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
+			      ws->grp_base + (grp << 12));
 	return 1;
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index c364336023..6e2787252e 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -87,7 +87,7 @@ cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
 }
 
 static void
-cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
+cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
 {
 	struct cnxk_sso_evdev *dev = arg;
 	struct cn9k_sso_hws_dual *dws;
@@ -98,8 +98,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 	val = NSEC2USEC(dev->deq_tmo_ns) - 1;
 	if (dev->dual_ws) {
 		dws = hws;
-		rte_memcpy(dws->grps_base, grps_base,
-			   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
+		dws->grp_base = grp_base;
 		dws->fc_mem = (uint64_t *)dev->fc_iova;
 		dws->xaq_lmt = dev->xaq_lmt;
 
@@ -107,8 +106,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 		plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
 	} else {
 		ws = hws;
-		rte_memcpy(ws->grps_base, grps_base,
-			   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
+		ws->grp_base = grp_base;
 		ws->fc_mem = (uint64_t *)dev->fc_iova;
 		ws->xaq_lmt = dev->xaq_lmt;
 
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index d536c0a8ca..aaf612e814 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -31,7 +31,8 @@ cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
 	if (ws->xaq_lmt <= *ws->fc_mem)
 		return 0;
 
-	cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
+	cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
+			      ws->grp_base + (grp << 12));
 	return 1;
 }
 
@@ -108,7 +109,8 @@ cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
 	if (dws->xaq_lmt <= *dws->fc_mem)
 		return 0;
 
-	cnxk_sso_hws_add_work(event_ptr, tag, new_tt, dws->grps_base[grp]);
+	cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
+			      dws->grp_base + (grp << 12));
 	return 1;
 }
 
diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c
index 2b9235687a..50d5c351bc 100644
--- a/drivers/event/cnxk/cnxk_eventdev.c
+++ b/drivers/event/cnxk/cnxk_eventdev.c
@@ -330,8 +330,7 @@ cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
 		    cnxk_sso_hws_setup_t hws_setup_fn)
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
-	uintptr_t grps_base[CNXK_SSO_MAX_HWGRP] = {0};
-	uint16_t q;
+	uintptr_t grp_base = 0;
 
 	plt_sso_dbg("Port=%d", port_id);
 	if (event_dev->data->ports[port_id] == NULL) {
@@ -339,15 +338,13 @@ cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
 		return -EINVAL;
 	}
 
-	for (q = 0; q < dev->nb_event_queues; q++) {
-		grps_base[q] = roc_sso_hwgrp_base_get(&dev->sso, q);
-		if (grps_base[q] == 0) {
-			plt_err("Failed to get grp[%d] base addr", q);
-			return -EINVAL;
-		}
+	grp_base = roc_sso_hwgrp_base_get(&dev->sso, 0);
+	if (grp_base == 0) {
+		plt_err("Failed to get grp base addr");
+		return -EINVAL;
 	}
 
-	hws_setup_fn(dev, event_dev->data->ports[port_id], grps_base);
+	hws_setup_fn(dev, event_dev->data->ports[port_id], grp_base);
 	plt_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
 	rte_mb();
 
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 957dcf04a4..d9f52d03e0 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -61,7 +61,7 @@
 	} while (0)
 
 typedef void *(*cnxk_sso_init_hws_mem_t)(void *dev, uint8_t port_id);
-typedef void (*cnxk_sso_hws_setup_t)(void *dev, void *ws, uintptr_t *grp_base);
+typedef void (*cnxk_sso_hws_setup_t)(void *dev, void *ws, uintptr_t grp_base);
 typedef void (*cnxk_sso_hws_release_t)(void *dev, void *ws);
 typedef int (*cnxk_sso_link_t)(void *dev, void *ws, uint16_t *map,
 			       uint16_t nb_link);
@@ -129,7 +129,7 @@ struct cn10k_sso_hws {
 	/* Add Work Fastpath data */
 	uint64_t xaq_lmt __rte_cache_aligned;
 	uint64_t *fc_mem;
-	uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
+	uintptr_t grp_base;
 	/* Tx Fastpath data */
 	uint64_t tx_base __rte_cache_aligned;
 	uintptr_t lmt_base;
@@ -157,7 +157,7 @@ struct cn9k_sso_hws {
 	/* Add Work Fastpath data */
 	uint64_t xaq_lmt __rte_cache_aligned;
 	uint64_t *fc_mem;
-	uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
+	uintptr_t grp_base;
 	/* Tx Fastpath data */
 	uint64_t base __rte_cache_aligned;
 	uint8_t tx_adptr_data[];
@@ -179,7 +179,7 @@ struct cn9k_sso_hws_dual {
 	/* Add Work Fastpath data */
 	uint64_t xaq_lmt __rte_cache_aligned;
 	uint64_t *fc_mem;
-	uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
+	uintptr_t grp_base;
 	/* Tx Fastpath data */
 	uint64_t base[2] __rte_cache_aligned;
 	uint8_t tx_adptr_data[];
-- 
2.17.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-dev] [PATCH v2 5/5] event/cnxk: rework enqueue path
  2021-11-03  0:52 ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free pbhagavatula
                     ` (2 preceding siblings ...)
  2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 4/5] event/cnxk: reduce workslot memory consumption pbhagavatula
@ 2021-11-03  0:52   ` pbhagavatula
  2021-11-04  7:41   ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free Jerin Jacob
  4 siblings, 0 replies; 8+ messages in thread
From: pbhagavatula @ 2021-11-03  0:52 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh, Shijith Thotton; +Cc: dev

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Rework SSO enqueue path for CN9K make it similar to CN10K
enqueue interface.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn9k_eventdev.c            | 28 ++-----
 drivers/event/cnxk/cn9k_worker.c              | 21 ++---
 drivers/event/cnxk/cn9k_worker.h              | 78 +++++++++----------
 drivers/event/cnxk/cn9k_worker_deq.c          |  4 +-
 drivers/event/cnxk/cn9k_worker_deq_ca.c       |  4 +-
 drivers/event/cnxk/cn9k_worker_deq_tmo.c      |  4 +-
 drivers/event/cnxk/cn9k_worker_dual_deq.c     | 16 ++--
 drivers/event/cnxk/cn9k_worker_dual_deq_ca.c  | 19 +++--
 drivers/event/cnxk/cn9k_worker_dual_deq_tmo.c | 26 +++----
 drivers/event/cnxk/cnxk_eventdev.h            | 25 +-----
 10 files changed, 96 insertions(+), 129 deletions(-)

diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 6e2787252e..b68ce6c0a4 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -27,17 +27,6 @@
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
 			[!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
 
-static void
-cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
-{
-	ws->tag_op = base + SSOW_LF_GWS_TAG;
-	ws->wqp_op = base + SSOW_LF_GWS_WQP;
-	ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
-	ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
-	ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
-	ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
-}
-
 static int
 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
 {
@@ -95,7 +84,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
 	uint64_t val;
 
 	/* Set get_work tmo for HWS */
-	val = NSEC2USEC(dev->deq_tmo_ns) - 1;
+	val = dev->deq_tmo_ns ? NSEC2USEC(dev->deq_tmo_ns) - 1 : 0;
 	if (dev->dual_ws) {
 		dws = hws;
 		dws->grp_base = grp_base;
@@ -148,7 +137,6 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 {
 	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
 	struct cn9k_sso_hws_dual *dws;
-	struct cn9k_sso_hws_state *st;
 	struct cn9k_sso_hws *ws;
 	uint64_t cq_ds_cnt = 1;
 	uint64_t aq_cnt = 1;
@@ -170,22 +158,21 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 
 	if (dev->dual_ws) {
 		dws = hws;
-		st = &dws->ws_state[0];
 		ws_base = dws->base[0];
 	} else {
 		ws = hws;
-		st = (struct cn9k_sso_hws_state *)ws;
 		ws_base = ws->base;
 	}
 
 	while (aq_cnt || cq_ds_cnt || ds_cnt) {
-		plt_write64(req, st->getwrk_op);
-		cn9k_sso_hws_get_work_empty(st, &ev);
+		plt_write64(req, ws_base + SSOW_LF_GWS_OP_GET_WORK0);
+		cn9k_sso_hws_get_work_empty(ws_base, &ev);
 		if (fn != NULL && ev.u64 != 0)
 			fn(arg, ev);
 		if (ev.sched_type != SSO_TT_EMPTY)
-			cnxk_sso_hws_swtag_flush(st->tag_op,
-						 st->swtag_flush_op);
+			cnxk_sso_hws_swtag_flush(
+				ws_base + SSOW_LF_GWS_TAG,
+				ws_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
 		do {
 			val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
 		} while (val & BIT_ULL(56));
@@ -674,8 +661,6 @@ cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
 			&dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
 		dws->base[1] = roc_sso_hws_base_get(
 			&dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
-		cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
-		cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
 		dws->hws_id = port_id;
 		dws->swtag_req = 0;
 		dws->vws = 0;
@@ -695,7 +680,6 @@ cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
 		/* First cache line is reserved for cookie */
 		ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
 		ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
-		cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
 		ws->hws_id = port_id;
 		ws->swtag_req = 0;
 
diff --git a/drivers/event/cnxk/cn9k_worker.c b/drivers/event/cnxk/cn9k_worker.c
index 32f7cc0343..a981bc986f 100644
--- a/drivers/event/cnxk/cn9k_worker.c
+++ b/drivers/event/cnxk/cn9k_worker.c
@@ -19,7 +19,8 @@ cn9k_sso_hws_enq(void *port, const struct rte_event *ev)
 		cn9k_sso_hws_forward_event(ws, ev);
 		break;
 	case RTE_EVENT_OP_RELEASE:
-		cnxk_sso_hws_swtag_flush(ws->tag_op, ws->swtag_flush_op);
+		cnxk_sso_hws_swtag_flush(ws->base + SSOW_LF_GWS_TAG,
+					 ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
 		break;
 	default:
 		return 0;
@@ -67,17 +68,18 @@ uint16_t __rte_hot
 cn9k_sso_hws_dual_enq(void *port, const struct rte_event *ev)
 {
 	struct cn9k_sso_hws_dual *dws = port;
-	struct cn9k_sso_hws_state *vws;
+	uint64_t base;
 
-	vws = &dws->ws_state[!dws->vws];
+	base = dws->base[!dws->vws];
 	switch (ev->op) {
 	case RTE_EVENT_OP_NEW:
 		return cn9k_sso_hws_dual_new_event(dws, ev);
 	case RTE_EVENT_OP_FORWARD:
-		cn9k_sso_hws_dual_forward_event(dws, vws, ev);
+		cn9k_sso_hws_dual_forward_event(dws, base, ev);
 		break;
 	case RTE_EVENT_OP_RELEASE:
-		cnxk_sso_hws_swtag_flush(vws->tag_op, vws->swtag_flush_op);
+		cnxk_sso_hws_swtag_flush(base + SSOW_LF_GWS_TAG,
+					 base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
 		break;
 	default:
 		return 0;
@@ -114,7 +116,7 @@ cn9k_sso_hws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
 	struct cn9k_sso_hws_dual *dws = port;
 
 	RTE_SET_USED(nb_events);
-	cn9k_sso_hws_dual_forward_event(dws, &dws->ws_state[!dws->vws], ev);
+	cn9k_sso_hws_dual_forward_event(dws, dws->base[!dws->vws], ev);
 
 	return 1;
 }
@@ -126,7 +128,8 @@ cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
 
 	RTE_SET_USED(nb_events);
 
-	return cn9k_cpt_crypto_adapter_enqueue(ws->tag_op, ev->event_ptr);
+	return cn9k_cpt_crypto_adapter_enqueue(ws->base + SSOW_LF_GWS_TAG,
+					       ev->event_ptr);
 }
 
 uint16_t __rte_hot
@@ -136,6 +139,6 @@ cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
 
 	RTE_SET_USED(nb_events);
 
-	return cn9k_cpt_crypto_adapter_enqueue(dws->ws_state[!dws->vws].tag_op,
-					       ev->event_ptr);
+	return cn9k_cpt_crypto_adapter_enqueue(
+		dws->base[!dws->vws] + SSOW_LF_GWS_TAG, ev->event_ptr);
 }
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index aaf612e814..9377fa50e7 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -37,12 +37,12 @@ cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
 }
 
 static __rte_always_inline void
-cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws,
-		       const struct rte_event *ev)
+cn9k_sso_hws_fwd_swtag(uint64_t base, const struct rte_event *ev)
 {
 	const uint32_t tag = (uint32_t)ev->event;
 	const uint8_t new_tt = ev->sched_type;
-	const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(vws->tag_op));
+	const uint8_t cur_tt =
+		CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG));
 
 	/* CNXK model
 	 * cur_tt/new_tt     SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
@@ -54,24 +54,24 @@ cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws,
 
 	if (new_tt == SSO_TT_UNTAGGED) {
 		if (cur_tt != SSO_TT_UNTAGGED)
-			cnxk_sso_hws_swtag_untag(
-				CN9K_SSOW_GET_BASE_ADDR(vws->getwrk_op) +
-				SSOW_LF_GWS_OP_SWTAG_UNTAG);
+			cnxk_sso_hws_swtag_untag(base +
+						 SSOW_LF_GWS_OP_SWTAG_UNTAG);
 	} else {
-		cnxk_sso_hws_swtag_norm(tag, new_tt, vws->swtag_norm_op);
+		cnxk_sso_hws_swtag_norm(tag, new_tt,
+					base + SSOW_LF_GWS_OP_SWTAG_NORM);
 	}
 }
 
 static __rte_always_inline void
-cn9k_sso_hws_fwd_group(struct cn9k_sso_hws_state *ws,
-		       const struct rte_event *ev, const uint16_t grp)
+cn9k_sso_hws_fwd_group(uint64_t base, const struct rte_event *ev,
+		       const uint16_t grp)
 {
 	const uint32_t tag = (uint32_t)ev->event;
 	const uint8_t new_tt = ev->sched_type;
 
-	plt_write64(ev->u64, CN9K_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
-				     SSOW_LF_GWS_OP_UPD_WQP_GRP1);
-	cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
+	plt_write64(ev->u64, base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
+	cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
+				   base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
 }
 
 static __rte_always_inline void
@@ -80,8 +80,8 @@ cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
 	const uint8_t grp = ev->queue_id;
 
 	/* Group hasn't changed, Use SWTAG to forward the event */
-	if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_op)) == grp) {
-		cn9k_sso_hws_fwd_swtag((struct cn9k_sso_hws_state *)ws, ev);
+	if (CNXK_GRP_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_TAG)) == grp) {
+		cn9k_sso_hws_fwd_swtag(ws->base, ev);
 		ws->swtag_req = 1;
 	} else {
 		/*
@@ -89,8 +89,7 @@ cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
 		 * Use deschedule/add_work operation to transfer the event to
 		 * new group/core
 		 */
-		cn9k_sso_hws_fwd_group((struct cn9k_sso_hws_state *)ws, ev,
-				       grp);
+		cn9k_sso_hws_fwd_group(ws->base, ev, grp);
 	}
 }
 
@@ -115,15 +114,14 @@ cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
 }
 
 static __rte_always_inline void
-cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
-				struct cn9k_sso_hws_state *vws,
+cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
 				const struct rte_event *ev)
 {
 	const uint8_t grp = ev->queue_id;
 
 	/* Group hasn't changed, Use SWTAG to forward the event */
-	if (CNXK_GRP_FROM_TAG(plt_read64(vws->tag_op)) == grp) {
-		cn9k_sso_hws_fwd_swtag(vws, ev);
+	if (CNXK_GRP_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG)) == grp) {
+		cn9k_sso_hws_fwd_swtag(base, ev);
 		dws->swtag_req = 1;
 	} else {
 		/*
@@ -131,7 +129,7 @@ cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
 		 * Use deschedule/add_work operation to transfer the event to
 		 * new group/core
 		 */
-		cn9k_sso_hws_fwd_group(vws, ev, grp);
+		cn9k_sso_hws_fwd_group(base, ev, grp);
 	}
 }
 
@@ -149,8 +147,7 @@ cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
 }
 
 static __rte_always_inline uint16_t
-cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
-			   struct cn9k_sso_hws_state *ws_pair,
+cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
 			   struct rte_event *ev, const uint32_t flags,
 			   const void *const lookup_mem,
 			   struct cnxk_timesync_info *const tstamp)
@@ -177,14 +174,15 @@ cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
 		     "		prfm pldl1keep, [%[mbuf]]	\n"
 		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
 		       [mbuf] "=&r"(mbuf)
-		     : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op),
-		       [gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op));
+		     : [tag_loc] "r"(base + SSOW_LF_GWS_TAG),
+		       [wqp_loc] "r"(base + SSOW_LF_GWS_WQP), [gw] "r"(set_gw),
+		       [pong] "r"(pair_base + SSOW_LF_GWS_OP_GET_WORK0));
 #else
-	gw.u64[0] = plt_read64(ws->tag_op);
+	gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
 	while ((BIT_ULL(63)) & gw.u64[0])
-		gw.u64[0] = plt_read64(ws->tag_op);
-	gw.u64[1] = plt_read64(ws->wqp_op);
-	plt_write64(set_gw, ws_pair->getwrk_op);
+		gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
+	gw.u64[1] = plt_read64(base + SSOW_LF_GWS_WQP);
+	plt_write64(set_gw, pair_base + SSOW_LF_GWS_OP_GET_WORK0);
 	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
@@ -236,7 +234,7 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
 
 	plt_write64(BIT_ULL(16) | /* wait for work. */
 			    1,	  /* Use Mask set 0. */
-		    ws->getwrk_op);
+		    ws->base + SSOW_LF_GWS_OP_GET_WORK0);
 
 	if (flags & NIX_RX_OFFLOAD_PTYPE_F)
 		rte_prefetch_non_temporal(lookup_mem);
@@ -255,13 +253,14 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
 		     "		prfm pldl1keep, [%[mbuf]]	\n"
 		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
 		       [mbuf] "=&r"(mbuf)
-		     : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
+		     : [tag_loc] "r"(ws->base + SSOW_LF_GWS_TAG),
+		       [wqp_loc] "r"(ws->base + SSOW_LF_GWS_WQP));
 #else
-	gw.u64[0] = plt_read64(ws->tag_op);
+	gw.u64[0] = plt_read64(ws->base + SSOW_LF_GWS_TAG);
 	while ((BIT_ULL(63)) & gw.u64[0])
-		gw.u64[0] = plt_read64(ws->tag_op);
+		gw.u64[0] = plt_read64(ws->base + SSOW_LF_GWS_TAG);
 
-	gw.u64[1] = plt_read64(ws->wqp_op);
+	gw.u64[1] = plt_read64(ws->base + SSOW_LF_GWS_WQP);
 	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
@@ -303,7 +302,7 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
 
 /* Used in cleaning up workslot. */
 static __rte_always_inline uint16_t
-cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
+cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev)
 {
 	union {
 		__uint128_t get_work;
@@ -325,13 +324,14 @@ cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
 		     "		sub %[mbuf], %[wqp], #0x80	\n"
 		     : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
 		       [mbuf] "=&r"(mbuf)
-		     : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
+		     : [tag_loc] "r"(base + SSOW_LF_GWS_TAG),
+		       [wqp_loc] "r"(base + SSOW_LF_GWS_WQP));
 #else
-	gw.u64[0] = plt_read64(ws->tag_op);
+	gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
 	while ((BIT_ULL(63)) & gw.u64[0])
-		gw.u64[0] = plt_read64(ws->tag_op);
+		gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
 
-	gw.u64[1] = plt_read64(ws->wqp_op);
+	gw.u64[1] = plt_read64(base + SSOW_LF_GWS_WQP);
 	mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
 #endif
 
diff --git a/drivers/event/cnxk/cn9k_worker_deq.c b/drivers/event/cnxk/cn9k_worker_deq.c
index d65c72af7a..ba6fd05381 100644
--- a/drivers/event/cnxk/cn9k_worker_deq.c
+++ b/drivers/event/cnxk/cn9k_worker_deq.c
@@ -16,7 +16,7 @@
 									       \
 		if (ws->swtag_req) {                                           \
 			ws->swtag_req = 0;                                     \
-			cnxk_sso_hws_swtag_wait(ws->tag_op);                   \
+			cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG);   \
 			return 1;                                              \
 		}                                                              \
 									       \
@@ -32,7 +32,7 @@
 									       \
 		if (ws->swtag_req) {                                           \
 			ws->swtag_req = 0;                                     \
-			cnxk_sso_hws_swtag_wait(ws->tag_op);                   \
+			cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG);   \
 			return 1;                                              \
 		}                                                              \
 									       \
diff --git a/drivers/event/cnxk/cn9k_worker_deq_ca.c b/drivers/event/cnxk/cn9k_worker_deq_ca.c
index b5d0263559..ffe7a7c9e2 100644
--- a/drivers/event/cnxk/cn9k_worker_deq_ca.c
+++ b/drivers/event/cnxk/cn9k_worker_deq_ca.c
@@ -16,7 +16,7 @@
 									       \
 		if (ws->swtag_req) {                                           \
 			ws->swtag_req = 0;                                     \
-			cnxk_sso_hws_swtag_wait(ws->tag_op);                   \
+			cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG);   \
 			return 1;                                              \
 		}                                                              \
 									       \
@@ -42,7 +42,7 @@
 									       \
 		if (ws->swtag_req) {                                           \
 			ws->swtag_req = 0;                                     \
-			cnxk_sso_hws_swtag_wait(ws->tag_op);                   \
+			cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG);   \
 			return 1;                                              \
 		}                                                              \
 									       \
diff --git a/drivers/event/cnxk/cn9k_worker_deq_tmo.c b/drivers/event/cnxk/cn9k_worker_deq_tmo.c
index b41a590fb7..5147c1933a 100644
--- a/drivers/event/cnxk/cn9k_worker_deq_tmo.c
+++ b/drivers/event/cnxk/cn9k_worker_deq_tmo.c
@@ -16,7 +16,7 @@
 									       \
 		if (ws->swtag_req) {                                           \
 			ws->swtag_req = 0;                                     \
-			cnxk_sso_hws_swtag_wait(ws->tag_op);                   \
+			cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG);   \
 			return ret;                                            \
 		}                                                              \
 									       \
@@ -46,7 +46,7 @@
 									       \
 		if (ws->swtag_req) {                                           \
 			ws->swtag_req = 0;                                     \
-			cnxk_sso_hws_swtag_wait(ws->tag_op);                   \
+			cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG);   \
 			return ret;                                            \
 		}                                                              \
 									       \
diff --git a/drivers/event/cnxk/cn9k_worker_dual_deq.c b/drivers/event/cnxk/cn9k_worker_dual_deq.c
index 440b66edca..ed134ab779 100644
--- a/drivers/event/cnxk/cn9k_worker_dual_deq.c
+++ b/drivers/event/cnxk/cn9k_worker_dual_deq.c
@@ -16,14 +16,14 @@
 		RTE_SET_USED(timeout_ticks);                                   \
 		if (dws->swtag_req) {                                          \
 			dws->swtag_req = 0;                                    \
-			cnxk_sso_hws_swtag_wait(                               \
-				dws->ws_state[!dws->vws].tag_op);              \
+			cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] +         \
+						SSOW_LF_GWS_TAG);              \
 			return 1;                                              \
 		}                                                              \
 									       \
 		gw = cn9k_sso_hws_dual_get_work(                               \
-			&dws->ws_state[dws->vws], &dws->ws_state[!dws->vws],   \
-			ev, flags, dws->lookup_mem, dws->tstamp);              \
+			dws->base[dws->vws], dws->base[!dws->vws], ev, flags,  \
+			dws->lookup_mem, dws->tstamp);                         \
 		dws->vws = !dws->vws;                                          \
 		return gw;                                                     \
 	}                                                                      \
@@ -37,14 +37,14 @@
 		RTE_SET_USED(timeout_ticks);                                   \
 		if (dws->swtag_req) {                                          \
 			dws->swtag_req = 0;                                    \
-			cnxk_sso_hws_swtag_wait(                               \
-				dws->ws_state[!dws->vws].tag_op);              \
+			cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] +         \
+						SSOW_LF_GWS_TAG);              \
 			return 1;                                              \
 		}                                                              \
 									       \
 		gw = cn9k_sso_hws_dual_get_work(                               \
-			&dws->ws_state[dws->vws], &dws->ws_state[!dws->vws],   \
-			ev, flags, dws->lookup_mem, dws->tstamp);              \
+			dws->base[dws->vws], dws->base[!dws->vws], ev, flags,  \
+			dws->lookup_mem, dws->tstamp);                         \
 		dws->vws = !dws->vws;                                          \
 		return gw;                                                     \
 	}
diff --git a/drivers/event/cnxk/cn9k_worker_dual_deq_ca.c b/drivers/event/cnxk/cn9k_worker_dual_deq_ca.c
index b66e2cfc08..22e148be73 100644
--- a/drivers/event/cnxk/cn9k_worker_dual_deq_ca.c
+++ b/drivers/event/cnxk/cn9k_worker_dual_deq_ca.c
@@ -16,15 +16,14 @@
 		RTE_SET_USED(timeout_ticks);                                   \
 		if (dws->swtag_req) {                                          \
 			dws->swtag_req = 0;                                    \
-			cnxk_sso_hws_swtag_wait(                               \
-				dws->ws_state[!dws->vws].tag_op);              \
+			cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] +         \
+						SSOW_LF_GWS_TAG);              \
 			return 1;                                              \
 		}                                                              \
 									       \
-		gw = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws],      \
-						&dws->ws_state[!dws->vws], ev, \
-						flags | CPT_RX_WQE_F,          \
-						dws->lookup_mem, dws->tstamp); \
+		gw = cn9k_sso_hws_dual_get_work(                               \
+			dws->base[dws->vws], dws->base[!dws->vws], ev,         \
+			flags | CPT_RX_WQE_F, dws->lookup_mem, dws->tstamp);   \
 		dws->vws = !dws->vws;                                          \
 		return gw;                                                     \
 	}                                                                      \
@@ -48,14 +47,14 @@
 		RTE_SET_USED(timeout_ticks);                                   \
 		if (dws->swtag_req) {                                          \
 			dws->swtag_req = 0;                                    \
-			cnxk_sso_hws_swtag_wait(                               \
-				dws->ws_state[!dws->vws].tag_op);              \
+			cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] +         \
+						SSOW_LF_GWS_TAG);              \
 			return 1;                                              \
 		}                                                              \
 									       \
 		gw = cn9k_sso_hws_dual_get_work(                               \
-			&dws->ws_state[dws->vws], &dws->ws_state[!dws->vws],   \
-			ev, flags | NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F,         \
+			dws->base[dws->vws], dws->base[!dws->vws], ev,         \
+			flags | NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F,             \
 			dws->lookup_mem, dws->tstamp);                         \
 		dws->vws = !dws->vws;                                          \
 		return gw;                                                     \
diff --git a/drivers/event/cnxk/cn9k_worker_dual_deq_tmo.c b/drivers/event/cnxk/cn9k_worker_dual_deq_tmo.c
index 78a4b3d127..e5ba3feb22 100644
--- a/drivers/event/cnxk/cn9k_worker_dual_deq_tmo.c
+++ b/drivers/event/cnxk/cn9k_worker_dual_deq_tmo.c
@@ -16,20 +16,19 @@
 									       \
 		if (dws->swtag_req) {                                          \
 			dws->swtag_req = 0;                                    \
-			cnxk_sso_hws_swtag_wait(                               \
-				dws->ws_state[!dws->vws].tag_op);              \
+			cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] +         \
+						SSOW_LF_GWS_TAG);              \
 			return ret;                                            \
 		}                                                              \
 									       \
 		ret = cn9k_sso_hws_dual_get_work(                              \
-			&dws->ws_state[dws->vws], &dws->ws_state[!dws->vws],   \
-			ev, flags, dws->lookup_mem, dws->tstamp);              \
+			dws->base[dws->vws], dws->base[!dws->vws], ev, flags,  \
+			dws->lookup_mem, dws->tstamp);                         \
 		dws->vws = !dws->vws;                                          \
 		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) {   \
 			ret = cn9k_sso_hws_dual_get_work(                      \
-				&dws->ws_state[dws->vws],                      \
-				&dws->ws_state[!dws->vws], ev, flags,          \
-				dws->lookup_mem, dws->tstamp);                 \
+				dws->base[dws->vws], dws->base[!dws->vws], ev, \
+				flags, dws->lookup_mem, dws->tstamp);          \
 			dws->vws = !dws->vws;                                  \
 		}                                                              \
 									       \
@@ -55,20 +54,19 @@
 									       \
 		if (dws->swtag_req) {                                          \
 			dws->swtag_req = 0;                                    \
-			cnxk_sso_hws_swtag_wait(                               \
-				dws->ws_state[!dws->vws].tag_op);              \
+			cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] +         \
+						SSOW_LF_GWS_TAG);              \
 			return ret;                                            \
 		}                                                              \
 									       \
 		ret = cn9k_sso_hws_dual_get_work(                              \
-			&dws->ws_state[dws->vws], &dws->ws_state[!dws->vws],   \
-			ev, flags, dws->lookup_mem, dws->tstamp);              \
+			dws->base[dws->vws], dws->base[!dws->vws], ev, flags,  \
+			dws->lookup_mem, dws->tstamp);                         \
 		dws->vws = !dws->vws;                                          \
 		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) {   \
 			ret = cn9k_sso_hws_dual_get_work(                      \
-				&dws->ws_state[dws->vws],                      \
-				&dws->ws_state[!dws->vws], ev, flags,          \
-				dws->lookup_mem, dws->tstamp);                 \
+				dws->base[dws->vws], dws->base[!dws->vws], ev, \
+				flags, dws->lookup_mem, dws->tstamp);          \
 			dws->vws = !dws->vws;                                  \
 		}                                                              \
 									       \
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index d9f52d03e0..305c6a3b9e 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -136,19 +136,9 @@ struct cn10k_sso_hws {
 	uint8_t tx_adptr_data[];
 } __rte_cache_aligned;
 
-/* CN9K HWS ops */
-#define CN9K_SSO_HWS_OPS                                                       \
-	uintptr_t swtag_desched_op;                                            \
-	uintptr_t swtag_flush_op;                                              \
-	uintptr_t swtag_norm_op;                                               \
-	uintptr_t getwrk_op;                                                   \
-	uintptr_t tag_op;                                                      \
-	uintptr_t wqp_op
-
 /* Event port a.k.a GWS */
 struct cn9k_sso_hws {
-	/* Get Work Fastpath data */
-	CN9K_SSO_HWS_OPS;
+	uint64_t base;
 	/* PTP timestamp */
 	struct cnxk_timesync_info *tstamp;
 	void *lookup_mem;
@@ -159,17 +149,11 @@ struct cn9k_sso_hws {
 	uint64_t *fc_mem;
 	uintptr_t grp_base;
 	/* Tx Fastpath data */
-	uint64_t base __rte_cache_aligned;
-	uint8_t tx_adptr_data[];
+	uint8_t tx_adptr_data[] __rte_cache_aligned;
 } __rte_cache_aligned;
 
-struct cn9k_sso_hws_state {
-	CN9K_SSO_HWS_OPS;
-};
-
 struct cn9k_sso_hws_dual {
-	/* Get Work Fastpath data */
-	struct cn9k_sso_hws_state ws_state[2]; /* Ping and Pong */
+	uint64_t base[2]; /* Ping and Pong */
 	/* PTP timestamp */
 	struct cnxk_timesync_info *tstamp;
 	void *lookup_mem;
@@ -181,8 +165,7 @@ struct cn9k_sso_hws_dual {
 	uint64_t *fc_mem;
 	uintptr_t grp_base;
 	/* Tx Fastpath data */
-	uint64_t base[2] __rte_cache_aligned;
-	uint8_t tx_adptr_data[];
+	uint8_t tx_adptr_data[] __rte_cache_aligned;
 } __rte_cache_aligned;
 
 struct cnxk_sso_hws_cookie {
-- 
2.17.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free
  2021-11-03  0:52 ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free pbhagavatula
                     ` (3 preceding siblings ...)
  2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 5/5] event/cnxk: rework enqueue path pbhagavatula
@ 2021-11-04  7:41   ` Jerin Jacob
  4 siblings, 0 replies; 8+ messages in thread
From: Jerin Jacob @ 2021-11-04  7:41 UTC (permalink / raw)
  To: Pavan Nikhilesh
  Cc: Jerin Jacob, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Ray Kinsella, Shijith Thotton, dpdk-dev

On Wed, Nov 3, 2021 at 2:07 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Add common API to create and free SSO XAQ pool.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>


Series applied to dpdk-next-net-eventdev/for-main. Thanks


> ---
>  v2 Changes:
>  - Merge patchsets 19356,18614 to avoid merge conflicts.
>  - Rebase onto main.
>
>  drivers/common/cnxk/roc_sso.c       | 124 ++++++++++++++++++++++++++++
>  drivers/common/cnxk/roc_sso.h       |  14 ++++
>  drivers/common/cnxk/roc_sso_priv.h  |   5 ++
>  drivers/common/cnxk/version.map     |   2 +
>  drivers/event/cnxk/cn10k_eventdev.c |   2 +
>  5 files changed, 147 insertions(+)
>
> diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
> index 762893f3dc..45ff16ca0e 100644
> --- a/drivers/common/cnxk/roc_sso.c
> +++ b/drivers/common/cnxk/roc_sso.c
> @@ -5,6 +5,8 @@
>  #include "roc_api.h"
>  #include "roc_priv.h"
>
> +#define SSO_XAQ_CACHE_CNT (0x7)
> +
>  /* Private functions. */
>  int
>  sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf,
> @@ -387,6 +389,128 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
>         return mbox_process(dev->mbox);
>  }
>
> +int
> +sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
> +                       uint32_t nb_xae, uint32_t xae_waes,
> +                       uint32_t xaq_buf_size, uint16_t nb_hwgrp)
> +{
> +       struct npa_pool_s pool;
> +       struct npa_aura_s aura;
> +       plt_iova_t iova;
> +       uint32_t i;
> +       int rc;
> +
> +       if (xaq->mem != NULL) {
> +               rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
> +               if (rc < 0) {
> +                       plt_err("Failed to release XAQ %d", rc);
> +                       return rc;
> +               }
> +               roc_npa_pool_destroy(xaq->aura_handle);
> +               plt_free(xaq->fc);
> +               plt_free(xaq->mem);
> +               memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
> +       }
> +
> +       xaq->fc = plt_zmalloc(ROC_ALIGN, ROC_ALIGN);
> +       if (xaq->fc == NULL) {
> +               plt_err("Failed to allocate XAQ FC");
> +               rc = -ENOMEM;
> +               goto fail;
> +       }
> +
> +       xaq->nb_xae = nb_xae;
> +
> +       /* Taken from HRM 14.3.3(4) */
> +       xaq->nb_xaq = (SSO_XAQ_CACHE_CNT * nb_hwgrp);
> +       xaq->nb_xaq += PLT_MAX(1 + ((xaq->nb_xae - 1) / xae_waes), xaq->nb_xaq);
> +
> +       xaq->mem = plt_zmalloc(xaq_buf_size * xaq->nb_xaq, xaq_buf_size);
> +       if (xaq->mem == NULL) {
> +               plt_err("Failed to allocate XAQ mem");
> +               rc = -ENOMEM;
> +               goto free_fc;
> +       }
> +
> +       memset(&pool, 0, sizeof(struct npa_pool_s));
> +       pool.nat_align = 1;
> +
> +       memset(&aura, 0, sizeof(aura));
> +       aura.fc_ena = 1;
> +       aura.fc_addr = (uint64_t)xaq->fc;
> +       aura.fc_hyst_bits = 0; /* Store count on all updates */
> +       rc = roc_npa_pool_create(&xaq->aura_handle, xaq_buf_size, xaq->nb_xaq,
> +                                &aura, &pool);
> +       if (rc) {
> +               plt_err("Failed to create XAQ pool");
> +               goto npa_fail;
> +       }
> +
> +       iova = (uint64_t)xaq->mem;
> +       for (i = 0; i < xaq->nb_xaq; i++) {
> +               roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
> +               iova += xaq_buf_size;
> +       }
> +       roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
> +
> +       /* When SW does addwork (enqueue) check if there is space in XAQ by
> +        * comparing fc_addr above against the xaq_lmt calculated below.
> +        * There should be a minimum headroom of 7 XAQs per HWGRP for SSO
> +        * to request XAQ to cache them even before enqueue is called.
> +        */
> +       xaq->xaq_lmt = xaq->nb_xaq - (nb_hwgrp * SSO_XAQ_CACHE_CNT);
> +
> +       return 0;
> +npa_fail:
> +       plt_free(xaq->mem);
> +free_fc:
> +       plt_free(xaq->fc);
> +fail:
> +       memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
> +       return rc;
> +}
> +
> +int
> +roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae)
> +{
> +       struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
> +
> +       return sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae,
> +                                      roc_sso->xae_waes, roc_sso->xaq_buf_size,
> +                                      roc_sso->nb_hwgrp);
> +}
> +
> +int
> +sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
> +                       uint16_t nb_hwgrp)
> +{
> +       int rc;
> +
> +       if (xaq->mem != NULL) {
> +               if (nb_hwgrp) {
> +                       rc = sso_hwgrp_release_xaq(dev, nb_hwgrp);
> +                       if (rc < 0) {
> +                               plt_err("Failed to release XAQ %d", rc);
> +                               return rc;
> +                       }
> +               }
> +               roc_npa_pool_destroy(xaq->aura_handle);
> +               plt_free(xaq->fc);
> +               plt_free(xaq->mem);
> +       }
> +       memset(xaq, 0, sizeof(struct roc_sso_xaq_data));
> +
> +       return 0;
> +}
> +
> +int
> +roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp)
> +{
> +       struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
> +
> +       return sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp);
> +}
> +
>  int
>  sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps)
>  {
> diff --git a/drivers/common/cnxk/roc_sso.h b/drivers/common/cnxk/roc_sso.h
> index b28f6089cc..27d49c6c68 100644
> --- a/drivers/common/cnxk/roc_sso.h
> +++ b/drivers/common/cnxk/roc_sso.h
> @@ -27,6 +27,15 @@ struct roc_sso_hwgrp_stats {
>         uint64_t page_cnt;
>  };
>
> +struct roc_sso_xaq_data {
> +       uint32_t nb_xaq;
> +       uint32_t nb_xae;
> +       uint32_t xaq_lmt;
> +       uint64_t aura_handle;
> +       void *fc;
> +       void *mem;
> +};
> +
>  struct roc_sso {
>         struct plt_pci_device *pci_dev;
>         /* Public data. */
> @@ -35,6 +44,7 @@ struct roc_sso {
>         uint16_t nb_hwgrp;
>         uint8_t nb_hws;
>         uintptr_t lmt_base;
> +       struct roc_sso_xaq_data xaq;
>         /* HW Const. */
>         uint32_t xae_waes;
>         uint32_t xaq_buf_size;
> @@ -95,6 +105,10 @@ int __roc_api roc_sso_hwgrp_hws_link_status(struct roc_sso *roc_sso,
>  uintptr_t __roc_api roc_sso_hws_base_get(struct roc_sso *roc_sso, uint8_t hws);
>  uintptr_t __roc_api roc_sso_hwgrp_base_get(struct roc_sso *roc_sso,
>                                            uint16_t hwgrp);
> +int __roc_api roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso,
> +                                         uint32_t nb_xae);
> +int __roc_api roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso,
> +                                         uint16_t nb_hwgrp);
>
>  /* Debug */
>  void __roc_api roc_sso_dump(struct roc_sso *roc_sso, uint8_t nb_hws,
> diff --git a/drivers/common/cnxk/roc_sso_priv.h b/drivers/common/cnxk/roc_sso_priv.h
> index 8dffa3fbf4..2e1b025d1c 100644
> --- a/drivers/common/cnxk/roc_sso_priv.h
> +++ b/drivers/common/cnxk/roc_sso_priv.h
> @@ -47,6 +47,11 @@ void sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp,
>                          uint16_t hwgrp[], uint16_t n, uint16_t enable);
>  int sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps);
>  int sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps);
> +int sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
> +                           uint32_t nb_xae, uint32_t xae_waes,
> +                           uint32_t xaq_buf_size, uint16_t nb_hwgrp);
> +int sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
> +                           uint16_t nb_hwgrp);
>
>  /* SSO IRQ */
>  int sso_register_irqs_priv(struct roc_sso *roc_sso,
> diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
> index 8d4d42f476..cbf4a4137b 100644
> --- a/drivers/common/cnxk/version.map
> +++ b/drivers/common/cnxk/version.map
> @@ -310,7 +310,9 @@ INTERNAL {
>         roc_sso_dump;
>         roc_sso_hwgrp_alloc_xaq;
>         roc_sso_hwgrp_base_get;
> +       roc_sso_hwgrp_free_xaq_aura;
>         roc_sso_hwgrp_hws_link_status;
> +       roc_sso_hwgrp_init_xaq_aura;
>         roc_sso_hwgrp_qos_config;
>         roc_sso_hwgrp_release_xaq;
>         roc_sso_hwgrp_set_priority;
> diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
> index e287448189..2fb4ea878e 100644
> --- a/drivers/event/cnxk/cn10k_eventdev.c
> +++ b/drivers/event/cnxk/cn10k_eventdev.c
> @@ -132,6 +132,7 @@ cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
>
>         plt_write64(0, base + SSO_LF_GGRP_QCTL);
>
> +       plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
>         req = queue_id;     /* GGRP ID */
>         req |= BIT_ULL(18); /* Grouped */
>         req |= BIT_ULL(16); /* WAIT */
> @@ -177,6 +178,7 @@ cn10k_sso_hws_reset(void *arg, void *hws)
>         } gw;
>         uint8_t pend_tt;
>
> +       plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
>         /* Wait till getwork/swtp/waitw/desched completes. */
>         do {
>                 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
> --
> 2.17.1
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2021-11-04  7:41 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-02  7:00 [dpdk-dev] [PATCH 1/2] common/cnxk: add SSO XAQ pool create and free pbhagavatula
2021-09-02  7:00 ` [dpdk-dev] [PATCH 2/2] event/cnxk: use common XAQ pool APIs pbhagavatula
2021-11-03  0:52 ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free pbhagavatula
2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 2/5] event/cnxk: use common XAQ pool APIs pbhagavatula
2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 3/5] event/cnxk: fix packet Tx overflow pbhagavatula
2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 4/5] event/cnxk: reduce workslot memory consumption pbhagavatula
2021-11-03  0:52   ` [dpdk-dev] [PATCH v2 5/5] event/cnxk: rework enqueue path pbhagavatula
2021-11-04  7:41   ` [dpdk-dev] [PATCH v2 1/5] common/cnxk: add SSO XAQ pool create and free Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).