From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Cc: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH 4/5] event/octeontx2: update SSO buffers based on timer count
Date: Wed, 20 Nov 2019 10:26:24 +0530 [thread overview]
Message-ID: <20191120045626.10886-4-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20191120045626.10886-1-pbhagavatula@marvell.com>
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Update SSO internal XAQ buffers based on number of timers in event timer
adapter.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
drivers/event/octeontx2/otx2_evdev.h | 6 +-
drivers/event/octeontx2/otx2_evdev_adptr.c | 84 +++++++++++++++++-----
drivers/event/octeontx2/otx2_tim_evdev.c | 7 +-
drivers/event/octeontx2/otx2_tim_evdev.h | 1 +
4 files changed, 74 insertions(+), 24 deletions(-)
diff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h
index 530060f81..231a12a52 100644
--- a/drivers/event/octeontx2/otx2_evdev.h
+++ b/drivers/event/octeontx2/otx2_evdev.h
@@ -14,6 +14,7 @@
#include "otx2_dev.h"
#include "otx2_ethdev.h"
#include "otx2_mempool.h"
+#include "otx2_tim_evdev.h"
#define EVENTDEV_NAME_OCTEONTX2_PMD otx2_eventdev
@@ -137,9 +138,12 @@ struct otx2_sso_evdev {
struct rte_mempool *xaq_pool;
uint64_t rx_offloads;
uint64_t tx_offloads;
+ uint64_t adptr_xae_cnt;
uint16_t rx_adptr_pool_cnt;
- uint32_t adptr_xae_cnt;
uint64_t *rx_adptr_pools;
+ uint16_t tim_adptr_ring_cnt;
+ uint16_t *timer_adptr_rings;
+ uint64_t *timer_adptr_sz;
/* Dev args */
uint8_t dual_ws;
uint8_t selftest;
diff --git a/drivers/event/octeontx2/otx2_evdev_adptr.c b/drivers/event/octeontx2/otx2_evdev_adptr.c
index d8a06a593..233cba2aa 100644
--- a/drivers/event/octeontx2/otx2_evdev_adptr.c
+++ b/drivers/event/octeontx2/otx2_evdev_adptr.c
@@ -199,41 +199,87 @@ sso_rxq_disable(struct otx2_eth_dev *dev, uint16_t qid)
void
sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
{
+ int i;
+
switch (event_type) {
case RTE_EVENT_TYPE_ETHDEV:
{
struct otx2_eth_rxq *rxq = data;
- int i, match = false;
uint64_t *old_ptr;
for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
if ((uint64_t)rxq->pool == dev->rx_adptr_pools[i])
- match = true;
- }
-
- if (!match) {
- dev->rx_adptr_pool_cnt++;
- old_ptr = dev->rx_adptr_pools;
- dev->rx_adptr_pools = rte_realloc(dev->rx_adptr_pools,
- sizeof(uint64_t) *
- dev->rx_adptr_pool_cnt
- , 0);
- if (dev->rx_adptr_pools == NULL) {
- dev->adptr_xae_cnt += rxq->pool->size;
- dev->rx_adptr_pools = old_ptr;
- dev->rx_adptr_pool_cnt--;
return;
- }
- dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
- (uint64_t)rxq->pool;
+ }
+ dev->rx_adptr_pool_cnt++;
+ old_ptr = dev->rx_adptr_pools;
+ dev->rx_adptr_pools = rte_realloc(dev->rx_adptr_pools,
+ sizeof(uint64_t) *
+ dev->rx_adptr_pool_cnt, 0);
+ if (dev->rx_adptr_pools == NULL) {
dev->adptr_xae_cnt += rxq->pool->size;
+ dev->rx_adptr_pools = old_ptr;
+ dev->rx_adptr_pool_cnt--;
+ return;
}
+ dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
+ (uint64_t)rxq->pool;
+
+ dev->adptr_xae_cnt += rxq->pool->size;
break;
}
case RTE_EVENT_TYPE_TIMER:
{
- dev->adptr_xae_cnt += (*(uint64_t *)data);
+ struct otx2_tim_ring *timr = data;
+ uint16_t *old_ring_ptr;
+ uint64_t *old_sz_ptr;
+
+ for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
+ if (timr->ring_id != dev->timer_adptr_rings[i])
+ continue;
+ if (timr->nb_timers == dev->timer_adptr_sz[i])
+ return;
+ dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
+ dev->adptr_xae_cnt += timr->nb_timers;
+ dev->timer_adptr_sz[i] = timr->nb_timers;
+
+ return;
+ }
+
+ dev->tim_adptr_ring_cnt++;
+ old_ring_ptr = dev->timer_adptr_rings;
+ old_sz_ptr = dev->timer_adptr_sz;
+
+ dev->timer_adptr_rings = rte_realloc(dev->timer_adptr_rings,
+ sizeof(uint16_t) *
+ dev->tim_adptr_ring_cnt,
+ 0);
+ if (dev->timer_adptr_rings == NULL) {
+ dev->adptr_xae_cnt += timr->nb_timers;
+ dev->timer_adptr_rings = old_ring_ptr;
+ dev->tim_adptr_ring_cnt--;
+ return;
+ }
+
+ dev->timer_adptr_sz = rte_realloc(dev->timer_adptr_sz,
+ sizeof(uint64_t) *
+ dev->tim_adptr_ring_cnt,
+ 0);
+
+ if (dev->timer_adptr_sz == NULL) {
+ dev->adptr_xae_cnt += timr->nb_timers;
+ dev->timer_adptr_sz = old_sz_ptr;
+ dev->tim_adptr_ring_cnt--;
+ return;
+ }
+
+ dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
+ timr->ring_id;
+ dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
+ timr->nb_timers;
+
+ dev->adptr_xae_cnt += timr->nb_timers;
break;
}
default:
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.c b/drivers/event/octeontx2/otx2_tim_evdev.c
index 206ed4331..5f0233f44 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.c
+++ b/drivers/event/octeontx2/otx2_tim_evdev.c
@@ -254,7 +254,6 @@ otx2_tim_ring_create(struct rte_event_timer_adapter *adptr)
struct tim_ring_req *free_req;
struct tim_lf_alloc_req *req;
struct tim_lf_alloc_rsp *rsp;
- uint64_t nb_timers;
int i, rc;
if (dev == NULL)
@@ -300,7 +299,7 @@ otx2_tim_ring_create(struct rte_event_timer_adapter *adptr)
tim_ring->max_tout = rcfg->max_tmo_ns;
tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
tim_ring->chunk_sz = dev->chunk_sz;
- nb_timers = rcfg->nb_timers;
+ tim_ring->nb_timers = rcfg->nb_timers;
tim_ring->disable_npa = dev->disable_npa;
tim_ring->enable_stats = dev->enable_stats;
@@ -316,7 +315,7 @@ otx2_tim_ring_create(struct rte_event_timer_adapter *adptr)
}
}
- tim_ring->nb_chunks = nb_timers / OTX2_TIM_NB_CHUNK_SLOTS(
+ tim_ring->nb_chunks = tim_ring->nb_timers / OTX2_TIM_NB_CHUNK_SLOTS(
tim_ring->chunk_sz);
tim_ring->nb_chunk_slots = OTX2_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
@@ -373,7 +372,7 @@ otx2_tim_ring_create(struct rte_event_timer_adapter *adptr)
tim_set_fp_ops(tim_ring);
/* Update SSO xae count. */
- sso_updt_xae_cnt(sso_pmd_priv(dev->event_dev), (void *)&nb_timers,
+ sso_updt_xae_cnt(sso_pmd_priv(dev->event_dev), (void *)tim_ring,
RTE_EVENT_TYPE_TIMER);
sso_xae_reconfigure(dev->event_dev);
diff --git a/drivers/event/octeontx2/otx2_tim_evdev.h b/drivers/event/octeontx2/otx2_tim_evdev.h
index eec0189c1..f3fe9697a 100644
--- a/drivers/event/octeontx2/otx2_tim_evdev.h
+++ b/drivers/event/octeontx2/otx2_tim_evdev.h
@@ -154,6 +154,7 @@ struct otx2_tim_ring {
uint8_t ena_dfb;
uint16_t ring_id;
uint32_t aura;
+ uint64_t nb_timers;
uint64_t tck_nsec;
uint64_t max_tout;
uint64_t nb_chunks;
--
2.17.1
next prev parent reply other threads:[~2019-11-20 4:56 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-11-20 4:56 [dpdk-dev] [PATCH 1/5] event/octeontx2: fix TIM HW race condition pbhagavatula
2019-11-20 4:56 ` [dpdk-dev] [PATCH 2/5] event/octeontx2: use opposite bucket to store current chunk pbhagavatula
2019-11-20 4:56 ` [dpdk-dev] [PATCH 3/5] event/octeontx2: improve chunk pool performance pbhagavatula
2019-11-20 4:56 ` pbhagavatula [this message]
2019-11-20 4:56 ` [dpdk-dev] [PATCH 5/5] event/octeontx2: update start timestamp periodically pbhagavatula
2019-11-21 3:09 ` [dpdk-dev] [PATCH 1/5] event/octeontx2: fix TIM HW race condition Jerin Jacob
2019-11-21 7:13 ` [dpdk-dev] [PATCH v2 " pbhagavatula
2019-11-21 7:13 ` [dpdk-dev] [PATCH v2 2/5] event/octeontx2: use opposite bucket to store current chunk pbhagavatula
2019-11-21 7:13 ` [dpdk-dev] [PATCH v2 3/5] event/octeontx2: improve chunk pool performance pbhagavatula
2019-11-21 7:13 ` [dpdk-dev] [PATCH v2 4/5] event/octeontx2: update SSO buffers based on timer count pbhagavatula
2019-11-21 7:13 ` [dpdk-dev] [PATCH v2 5/5] event/octeontx2: update start timestamp periodically pbhagavatula
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191120045626.10886-4-pbhagavatula@marvell.com \
--to=pbhagavatula@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).