patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Jerin Jacob <jerin.jacob@caviumnetworks.com>
To: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Cc: santosh.shukla@caviumnetworks.com, olivier.matz@6wind.com,
	dev@dpdk.org, stable@dpdk.org
Subject: Re: [dpdk-stable] [dpdk-dev] [PATCH v2] mempool/octeontx: fix pool to aura mapping
Date: Mon, 2 Jul 2018 14:45:06 +0530	[thread overview]
Message-ID: <20180702091504.GA13965@jerin> (raw)
In-Reply-To: <20180702062932.1548-1-pbhagavatula@caviumnetworks.com>

-----Original Message-----
> Date: Mon,  2 Jul 2018 11:59:32 +0530
> From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> To: jerin.jacob@caviumnetworks.com, santosh.shukla@caviumnetworks.com,
>  olivier.matz@6wind.com
> Cc: dev@dpdk.org, stable@dpdk.org, Pavan Nikhilesh
>  <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH v2] mempool/octeontx: fix pool to aura mapping
> X-Mailer: git-send-email 2.18.0
> 
> HW needs each pool to be mapped to an aura set of 16 auras.
> Previously, pool to aura mapping was considered to be 1:1.
> 
> Fixes: 02fd6c744350 ("mempool/octeontx: support allocation")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Acked-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>


Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>

> ---
>  v2 Changes:
>  - use macro to avoid code duplication (Santosh).
>  - use uint16_t for gaura id.
> 
>  drivers/event/octeontx/timvf_evdev.c      |  2 +-
>  drivers/mempool/octeontx/octeontx_fpavf.c | 45 ++++++++++++++---------
>  drivers/mempool/octeontx/octeontx_fpavf.h |  9 +++++
>  drivers/net/octeontx/octeontx_ethdev.c    |  6 +--
>  drivers/net/octeontx/octeontx_rxtx.c      |  2 +-
>  5 files changed, 42 insertions(+), 22 deletions(-)
> 
> diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
> index c4fbd2d86..8a045c250 100644
> --- a/drivers/event/octeontx/timvf_evdev.c
> +++ b/drivers/event/octeontx/timvf_evdev.c
> @@ -174,7 +174,7 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
>  	if (use_fpa) {
>  		pool = (uintptr_t)((struct rte_mempool *)
>  				timr->chunk_pool)->pool_id;
> -		ret = octeontx_fpa_bufpool_gpool(pool);
> +		ret = octeontx_fpa_bufpool_gaura(pool);
>  		if (ret < 0) {
>  			timvf_log_dbg("Unable to get gaura id");
>  			ret = -ENOMEM;
> diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
> index 7aecaa85d..e5918c866 100644
> --- a/drivers/mempool/octeontx/octeontx_fpavf.c
> +++ b/drivers/mempool/octeontx/octeontx_fpavf.c
> @@ -243,7 +243,7 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
>  		POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
>  		POOL_ENA;
> 
> -	cfg.aid = 0;
> +	cfg.aid = FPA_AURA_IDX(gpool);
>  	cfg.pool_cfg = reg;
>  	cfg.pool_stack_base = phys_addr;
>  	cfg.pool_stack_end = phys_addr + memsz;
> @@ -327,7 +327,7 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index)
>  	hdr.vfid = gpool_index;
>  	hdr.res_code = 0;
>  	memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
> -	cfg.aid = gpool_index; /* gpool is guara */
> +	cfg.aid = gpool_index << FPA_GAURA_SHIFT;
> 
>  	ret = octeontx_mbox_send(&hdr, &cfg,
>  					sizeof(struct octeontx_mbox_fpa_cfg),
> @@ -335,7 +335,8 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index)
>  	if (ret < 0) {
>  		fpavf_log_err("Could not attach fpa ");
>  		fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
> -			      gpool_index, gpool_index, ret, hdr.res_code);
> +			      gpool_index << FPA_GAURA_SHIFT, gpool_index, ret,
> +			      hdr.res_code);
>  		ret = -EACCES;
>  		goto err;
>  	}
> @@ -355,14 +356,15 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
>  		goto err;
>  	}
> 
> -	cfg.aid = gpool_index; /* gpool is gaura */
> +	cfg.aid = gpool_index << FPA_GAURA_SHIFT;
>  	hdr.coproc = FPA_COPROC;
>  	hdr.msg = FPA_DETACHAURA;
>  	hdr.vfid = gpool_index;
>  	ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
>  	if (ret < 0) {
>  		fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
> -			      gpool_index, ret, hdr.res_code);
> +			      gpool_index << FPA_GAURA_SHIFT, ret,
> +			      hdr.res_code);
>  		ret = -EINVAL;
>  	}
> 
> @@ -469,6 +471,7 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
>  {
>  	uint64_t cnt, limit, avail;
>  	uint8_t gpool;
> +	uint16_t gaura;
>  	uintptr_t pool_bar;
> 
>  	if (unlikely(!octeontx_fpa_handle_valid(handle)))
> @@ -476,14 +479,16 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
> 
>  	/* get the gpool */
>  	gpool = octeontx_fpa_bufpool_gpool(handle);
> +	/* get the aura */
> +	gaura = octeontx_fpa_bufpool_gaura(handle);
> 
>  	/* Get pool bar address from handle */
>  	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
> 
>  	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
> -				FPA_VF_VHAURA_CNT(gpool)));
> +				FPA_VF_VHAURA_CNT(gaura)));
>  	limit = fpavf_read64((void *)((uintptr_t)pool_bar +
> -				FPA_VF_VHAURA_CNT_LIMIT(gpool)));
> +				FPA_VF_VHAURA_CNT_LIMIT(gaura)));
> 
>  	avail = fpavf_read64((void *)((uintptr_t)pool_bar +
>  				FPA_VF_VHPOOL_AVAILABLE(gpool)));
> @@ -496,6 +501,7 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
>  				unsigned int buf_offset, int node_id)
>  {
>  	unsigned int gpool;
> +	unsigned int gaura;
>  	uintptr_t gpool_handle;
>  	uintptr_t pool_bar;
>  	int res;
> @@ -545,16 +551,18 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
>  		goto error_pool_destroy;
>  	}
> 
> +	gaura = FPA_AURA_IDX(gpool);
> +
>  	/* Release lock */
>  	rte_spinlock_unlock(&fpadev.lock);
> 
>  	/* populate AURA registers */
>  	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
> -			 FPA_VF_VHAURA_CNT(gpool)));
> +			 FPA_VF_VHAURA_CNT(gaura)));
>  	fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
> -			 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
> +			 FPA_VF_VHAURA_CNT_LIMIT(gaura)));
>  	fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
> -			 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
> +			 FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
> 
>  	octeontx_fpapf_start_count(gpool);
> 
> @@ -581,6 +589,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
>  	uint64_t sz;
>  	uint64_t cnt, avail;
>  	uint8_t gpool;
> +	uint16_t gaura;
>  	uintptr_t pool_bar;
>  	int ret;
> 
> @@ -594,13 +603,15 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
> 
>  	/* get the pool */
>  	gpool = octeontx_fpa_bufpool_gpool(handle);
> +	/* get the aura */
> +	gaura = octeontx_fpa_bufpool_gaura(handle);
> 
>  	/* Get pool bar address from handle */
>  	pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
> 
>  	 /* Check for no outstanding buffers */
>  	cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
> -					FPA_VF_VHAURA_CNT(gpool)));
> +					FPA_VF_VHAURA_CNT(gaura)));
>  	if (cnt) {
>  		fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
>  		return -EBUSY;
> @@ -613,9 +624,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
> 
>  	/* Prepare to empty the entire POOL */
>  	fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
> -			 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
> +			 FPA_VF_VHAURA_CNT_LIMIT(gaura)));
>  	fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
> -			 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
> +			 FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
> 
>  	/* Empty the pool */
>  	/* Invalidate the POOL */
> @@ -627,11 +638,11 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
>  		/* Yank a buffer from the pool */
>  		node = (void *)(uintptr_t)
>  			fpavf_read64((void *)
> -				    (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
> +				    (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura)));
> 
>  		if (node == NULL) {
>  			fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
> -				      gpool, avail);
> +				      gaura, avail);
>  			break;
>  		}
> 
> @@ -665,9 +676,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
> 
>  	/* Deactivate the AURA */
>  	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
> -			FPA_VF_VHAURA_CNT_LIMIT(gpool)));
> +			FPA_VF_VHAURA_CNT_LIMIT(gaura)));
>  	fpavf_write64(0, (void *)((uintptr_t)pool_bar +
> -			FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
> +			FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
> 
>  	ret = octeontx_fpapf_aura_detach(gpool);
>  	if (ret) {
> diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
> index b76f40e75..b00be137a 100644
> --- a/drivers/mempool/octeontx/octeontx_fpavf.h
> +++ b/drivers/mempool/octeontx/octeontx_fpavf.h
> @@ -14,6 +14,7 @@
> 
>  #define	FPA_VF_MAX			32
>  #define FPA_GPOOL_MASK			(FPA_VF_MAX-1)
> +#define FPA_GAURA_SHIFT			4
> 
>  /* FPA VF register offsets */
>  #define FPA_VF_INT(x)			(0x200ULL | ((x) << 22))
> @@ -36,6 +37,7 @@
>  #define FPA_VF_FREE_ADDRS_S(x, y, z)	\
>  	((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14))
> 
> +#define FPA_AURA_IDX(gpool)			(gpool << FPA_GAURA_SHIFT)
>  /* FPA VF register offsets from VF_BAR4, size 2 MByte */
>  #define	FPA_VF_MSIX_VEC_ADDR		0x00000
>  #define	FPA_VF_MSIX_VEC_CTL		0x00008
> @@ -102,4 +104,11 @@ octeontx_fpa_bufpool_gpool(uintptr_t handle)
>  {
>  	return (uint8_t)handle & FPA_GPOOL_MASK;
>  }
> +
> +static __rte_always_inline uint16_t
> +octeontx_fpa_bufpool_gaura(uintptr_t handle)
> +{
> +	return octeontx_fpa_bufpool_gpool(handle) << FPA_GAURA_SHIFT;
> +}
> +
>  #endif	/* __OCTEONTX_FPAVF_H__ */
> diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
> index 1eb453b21..705378186 100644
> --- a/drivers/net/octeontx/octeontx_ethdev.c
> +++ b/drivers/net/octeontx/octeontx_ethdev.c
> @@ -787,7 +787,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
>  	pki_qos_cfg_t pki_qos;
>  	uintptr_t pool;
>  	int ret, port;
> -	uint8_t gaura;
> +	uint16_t gaura;
>  	unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
>  	unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
> 
> @@ -898,8 +898,8 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
> 
>  		pool = (uintptr_t)mb_pool->pool_id;
> 
> -		/* Get the gpool Id */
> -		gaura = octeontx_fpa_bufpool_gpool(pool);
> +		/* Get the gaura Id */
> +		gaura = octeontx_fpa_bufpool_gaura(pool);
> 
>  		pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
>  		pki_qos.num_entry = 1;
> diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
> index 2502d90e9..a9149b4e1 100644
> --- a/drivers/net/octeontx/octeontx_rxtx.c
> +++ b/drivers/net/octeontx/octeontx_rxtx.c
> @@ -31,7 +31,7 @@ __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
>  		return -ENOSPC;
> 
>  	/* Get the gaura Id */
> -	gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)tx_pkt->pool->pool_id);
> +	gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)tx_pkt->pool->pool_id);
> 
>  	/* Setup PKO_SEND_HDR_S */
>  	cmd_buf[0] = tx_pkt->data_len & 0xffff;
> --
> 2.18.0
> 

  reply	other threads:[~2018-07-02  9:15 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-27 11:47 [dpdk-stable] [dpdk-dev] [PATCH] " Pavan Nikhilesh
2018-06-27 14:35 ` santosh
2018-07-02  6:29 ` [dpdk-stable] [dpdk-dev] [PATCH v2] " Pavan Nikhilesh
2018-07-02  9:15   ` Jerin Jacob [this message]
2018-07-03  4:50 ` [dpdk-stable] [dpdk-dev] [PATCH v3] " Pavan Nikhilesh
2018-07-12 20:26   ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180702091504.GA13965@jerin \
    --to=jerin.jacob@caviumnetworks.com \
    --cc=dev@dpdk.org \
    --cc=olivier.matz@6wind.com \
    --cc=pbhagavatula@caviumnetworks.com \
    --cc=santosh.shukla@caviumnetworks.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).