patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [dpdk-dev] [PATCH] mempool/octeontx: fix pool to aura mapping
@ 2018-06-27 11:47 Pavan Nikhilesh
  2018-06-27 14:35 ` santosh
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Pavan Nikhilesh @ 2018-06-27 11:47 UTC (permalink / raw)
  To: jerin.jacob, santosh.shukla, olivier.matz; +Cc: dev, stable, Pavan Nikhilesh

HW needs each pool to be mapped to an aura set of 16 auras.
Previously, pool to aura mapping was considered to be 1:1.

Fixes: 02fd6c744350 ("mempool/octeontx: support allocation")
Cc: stable@dpdk.org

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 drivers/event/octeontx/timvf_evdev.c      |  2 +-
 drivers/mempool/octeontx/octeontx_fpavf.c | 45 ++++++++++++++---------
 drivers/mempool/octeontx/octeontx_fpavf.h |  8 ++++
 drivers/net/octeontx/octeontx_ethdev.c    |  4 +-
 drivers/net/octeontx/octeontx_rxtx.c      |  2 +-
 5 files changed, 40 insertions(+), 21 deletions(-)

diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index c4fbd2d86..8a045c250 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -174,7 +174,7 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
 	if (use_fpa) {
 		pool = (uintptr_t)((struct rte_mempool *)
 				timr->chunk_pool)->pool_id;
-		ret = octeontx_fpa_bufpool_gpool(pool);
+		ret = octeontx_fpa_bufpool_gaura(pool);
 		if (ret < 0) {
 			timvf_log_dbg("Unable to get gaura id");
 			ret = -ENOMEM;
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index 7aecaa85d..e931cf055 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -243,7 +243,7 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
 		POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
 		POOL_ENA;

-	cfg.aid = 0;
+	cfg.aid = gpool << FPA_GAURA_SHIFT;
 	cfg.pool_cfg = reg;
 	cfg.pool_stack_base = phys_addr;
 	cfg.pool_stack_end = phys_addr + memsz;
@@ -327,7 +327,7 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index)
 	hdr.vfid = gpool_index;
 	hdr.res_code = 0;
 	memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
-	cfg.aid = gpool_index; /* gpool is guara */
+	cfg.aid = gpool_index << FPA_GAURA_SHIFT;

 	ret = octeontx_mbox_send(&hdr, &cfg,
 					sizeof(struct octeontx_mbox_fpa_cfg),
@@ -335,7 +335,8 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index)
 	if (ret < 0) {
 		fpavf_log_err("Could not attach fpa ");
 		fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
-			      gpool_index, gpool_index, ret, hdr.res_code);
+			      gpool_index << FPA_GAURA_SHIFT, gpool_index, ret,
+			      hdr.res_code);
 		ret = -EACCES;
 		goto err;
 	}
@@ -355,14 +356,15 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
 		goto err;
 	}

-	cfg.aid = gpool_index; /* gpool is gaura */
+	cfg.aid = gpool_index << FPA_GAURA_SHIFT;
 	hdr.coproc = FPA_COPROC;
 	hdr.msg = FPA_DETACHAURA;
 	hdr.vfid = gpool_index;
 	ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
 	if (ret < 0) {
 		fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
-			      gpool_index, ret, hdr.res_code);
+			      gpool_index << FPA_GAURA_SHIFT, ret,
+			      hdr.res_code);
 		ret = -EINVAL;
 	}

@@ -469,6 +471,7 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
 {
 	uint64_t cnt, limit, avail;
 	uint8_t gpool;
+	uint8_t gaura;
 	uintptr_t pool_bar;

 	if (unlikely(!octeontx_fpa_handle_valid(handle)))
@@ -476,14 +479,16 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)

 	/* get the gpool */
 	gpool = octeontx_fpa_bufpool_gpool(handle);
+	/* get the aura */
+	gaura = octeontx_fpa_bufpool_gaura(handle);

 	/* Get pool bar address from handle */
 	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;

 	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
-				FPA_VF_VHAURA_CNT(gpool)));
+				FPA_VF_VHAURA_CNT(gaura)));
 	limit = fpavf_read64((void *)((uintptr_t)pool_bar +
-				FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+				FPA_VF_VHAURA_CNT_LIMIT(gaura)));

 	avail = fpavf_read64((void *)((uintptr_t)pool_bar +
 				FPA_VF_VHPOOL_AVAILABLE(gpool)));
@@ -496,6 +501,7 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 				unsigned int buf_offset, int node_id)
 {
 	unsigned int gpool;
+	unsigned int gaura;
 	uintptr_t gpool_handle;
 	uintptr_t pool_bar;
 	int res;
@@ -545,16 +551,18 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
 		goto error_pool_destroy;
 	}

+	gaura = gpool << FPA_GAURA_SHIFT;
+
 	/* Release lock */
 	rte_spinlock_unlock(&fpadev.lock);

 	/* populate AURA registers */
 	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
-			 FPA_VF_VHAURA_CNT(gpool)));
+			 FPA_VF_VHAURA_CNT(gaura)));
 	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
-			 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+			 FPA_VF_VHAURA_CNT_LIMIT(gaura)));
 	fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
-			 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+			 FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));

 	octeontx_fpapf_start_count(gpool);

@@ -581,6 +589,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
 	uint64_t sz;
 	uint64_t cnt, avail;
 	uint8_t gpool;
+	uint8_t gaura;
 	uintptr_t pool_bar;
 	int ret;

@@ -594,13 +603,15 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)

 	/* get the pool */
 	gpool = octeontx_fpa_bufpool_gpool(handle);
+	/* get the aura */
+	gaura = octeontx_fpa_bufpool_gaura(handle);

 	/* Get pool bar address from handle */
 	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;

 	 /* Check for no outstanding buffers */
 	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
-					FPA_VF_VHAURA_CNT(gpool)));
+					FPA_VF_VHAURA_CNT(gaura)));
 	if (cnt) {
 		fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
 		return -EBUSY;
@@ -613,9 +624,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)

 	/* Prepare to empty the entire POOL */
 	fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
-			 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+			 FPA_VF_VHAURA_CNT_LIMIT(gaura)));
 	fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
-			 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+			 FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));

 	/* Empty the pool */
 	/* Invalidate the POOL */
@@ -627,11 +638,11 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
 		/* Yank a buffer from the pool */
 		node = (void *)(uintptr_t)
 			fpavf_read64((void *)
-				    (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
+				    (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura)));

 		if (node == NULL) {
 			fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
-				      gpool, avail);
+				      gaura, avail);
 			break;
 		}

@@ -665,9 +676,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)

 	/* Deactivate the AURA */
 	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
-			FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+			FPA_VF_VHAURA_CNT_LIMIT(gaura)));
 	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
-			FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+			FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));

 	ret = octeontx_fpapf_aura_detach(gpool);
 	if (ret) {
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index b76f40e75..b97ce98c3 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -14,6 +14,7 @@

 #define	FPA_VF_MAX			32
 #define FPA_GPOOL_MASK			(FPA_VF_MAX-1)
+#define FPA_GAURA_SHIFT			4

 /* FPA VF register offsets */
 #define FPA_VF_INT(x)			(0x200ULL | ((x) << 22))
@@ -102,4 +103,11 @@ octeontx_fpa_bufpool_gpool(uintptr_t handle)
 {
 	return (uint8_t)handle & FPA_GPOOL_MASK;
 }
+
+static __rte_always_inline uint8_t
+octeontx_fpa_bufpool_gaura(uintptr_t handle)
+{
+	return octeontx_fpa_bufpool_gpool(handle) << FPA_GAURA_SHIFT;
+}
+
 #endif	/* __OCTEONTX_FPAVF_H__ */
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 1eb453b21..a3f2b471f 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -898,8 +898,8 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,

 		pool = (uintptr_t)mb_pool->pool_id;

-		/* Get the gpool Id */
-		gaura = octeontx_fpa_bufpool_gpool(pool);
+		/* Get the gaura Id */
+		gaura = octeontx_fpa_bufpool_gaura(pool);

 		pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
 		pki_qos.num_entry = 1;
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index 2502d90e9..a9149b4e1 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -31,7 +31,7 @@ __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
 		return -ENOSPC;

 	/* Get the gaura Id */
-	gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)tx_pkt->pool->pool_id);
+	gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)tx_pkt->pool->pool_id);

 	/* Setup PKO_SEND_HDR_S */
 	cmd_buf[0] = tx_pkt->data_len & 0xffff;
--
2.17.1

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-07-12 20:26 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-27 11:47 [dpdk-stable] [dpdk-dev] [PATCH] mempool/octeontx: fix pool to aura mapping Pavan Nikhilesh
2018-06-27 14:35 ` santosh
2018-07-02  6:29 ` [dpdk-stable] [dpdk-dev] [PATCH v2] " Pavan Nikhilesh
2018-07-02  9:15   ` Jerin Jacob
2018-07-03  4:50 ` [dpdk-stable] [dpdk-dev] [PATCH v3] " Pavan Nikhilesh
2018-07-12 20:26   ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).