DPDK patches and discussions
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
	"Kiran Kumar K" <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>,
	Harman Kalra <hkalra@marvell.com>,
	"Pavan Nikhilesh" <pbhagavatula@marvell.com>,
	Shijith Thotton <sthotton@marvell.com>
Cc: <dev@dpdk.org>
Subject: [PATCH 20/20] event/cnxk: add CN20K timer adapter
Date: Thu, 3 Oct 2024 18:52:37 +0530	[thread overview]
Message-ID: <20241003132237.20193-20-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20241003132237.20193-1-pbhagavatula@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Add event timer adapter support for CN20K platform.
Implement new HWWQE insertion feature supported by CN20K platform.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/common/cnxk/roc_tim.c        |   6 +-
 drivers/event/cnxk/cn20k_eventdev.c  |  16 ++-
 drivers/event/cnxk/cn20k_worker.h    |   6 +
 drivers/event/cnxk/cnxk_tim_evdev.c  |  37 ++++-
 drivers/event/cnxk/cnxk_tim_evdev.h  |  14 ++
 drivers/event/cnxk/cnxk_tim_worker.c |  89 ++++++++++--
 drivers/event/cnxk/cnxk_tim_worker.h | 197 +++++++++++++++++++++++++++
 7 files changed, 349 insertions(+), 16 deletions(-)

diff --git a/drivers/common/cnxk/roc_tim.c b/drivers/common/cnxk/roc_tim.c
index db1c129806..fc93b5e247 100644
--- a/drivers/common/cnxk/roc_tim.c
+++ b/drivers/common/cnxk/roc_tim.c
@@ -409,7 +409,7 @@ tim_hw_info_get(struct roc_tim *roc_tim)
 	mbox_alloc_msg_tim_get_hw_info(mbox);
 	rc = mbox_process_msg(mbox, (void **)&rsp);
 	if (rc && rc != MBOX_MSG_INVALID) {
-		plt_err("Failed to get SSO HW info\n");
+		plt_err("Failed to get TIM HW info\n");
 		rc = -EIO;
 		goto exit;
 	}
@@ -443,6 +443,10 @@ roc_tim_init(struct roc_tim *roc_tim)
 	nb_lfs = roc_tim->nb_lfs;
 
 	rc = tim_hw_info_get(roc_tim);
+	if (rc) {
+		plt_tim_dbg("Failed to get TIM HW info");
+		return 0;
+	}
 
 	rc = tim_free_lf_count_get(dev, &nb_free_lfs);
 	if (rc) {
diff --git a/drivers/event/cnxk/cn20k_eventdev.c b/drivers/event/cnxk/cn20k_eventdev.c
index bb1c2ca18f..682647c389 100644
--- a/drivers/event/cnxk/cn20k_eventdev.c
+++ b/drivers/event/cnxk/cn20k_eventdev.c
@@ -1020,6 +1020,13 @@ cn20k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
 	return cn20k_sso_updt_tx_adptr_data(event_dev);
 }
 
+static int
+cn20k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags, uint32_t *caps,
+		   const struct event_timer_adapter_ops **ops)
+{
+	return cnxk_tim_caps_get(evdev, flags, caps, ops, cn20k_sso_set_priv_mem);
+}
+
 static struct eventdev_ops cn20k_sso_dev_ops = {
 	.dev_infos_get = cn20k_sso_info_get,
 	.dev_configure = cn20k_sso_dev_configure,
@@ -1054,6 +1061,8 @@ static struct eventdev_ops cn20k_sso_dev_ops = {
 	.eth_tx_adapter_stop = cnxk_sso_tx_adapter_stop,
 	.eth_tx_adapter_free = cnxk_sso_tx_adapter_free,
 
+	.timer_adapter_caps_get = cn20k_tim_caps_get,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
@@ -1133,4 +1142,9 @@ RTE_PMD_REGISTER_PARAM_STRING(event_cn20k,
 			      CNXK_SSO_GGRP_QOS "=<string>"
 			      CNXK_SSO_STASH "=<string>"
 			      CNXK_SSO_GW_MODE "=<int>"
-			      CNXK_SSO_FORCE_BP "=1");
+			      CNXK_SSO_FORCE_BP "=1"
+			      CNXK_TIM_DISABLE_NPA "=1"
+			      CNXK_TIM_CHNK_SLOTS "=<int>"
+			      CNXK_TIM_RINGS_LMT "=<int>"
+			      CNXK_TIM_STATS_ENA "=1"
+			      CNXK_TIM_EXT_CLK "=<string>");
diff --git a/drivers/event/cnxk/cn20k_worker.h b/drivers/event/cnxk/cn20k_worker.h
index fcd2944886..0f168404ba 100644
--- a/drivers/event/cnxk/cn20k_worker.h
+++ b/drivers/event/cnxk/cn20k_worker.h
@@ -5,6 +5,7 @@
 #ifndef __CN20K_WORKER_H__
 #define __CN20K_WORKER_H__
 
+#include <rte_event_timer_adapter.h>
 #include <rte_eventdev.h>
 
 #include "cn20k_eventdev.h"
@@ -128,6 +129,11 @@ cn20k_sso_hws_post_process(struct cn20k_sso_hws *ws, uint64_t *u64, const uint32
 		/* Mark vector mempool object as get */
 		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]), (void **)&u64[1], 1,
 					  1);
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_TIMER) {
+		struct rte_event_timer *tev = (struct rte_event_timer *)u64[1];
+
+		tev->state = RTE_EVENT_TIMER_NOT_ARMED;
+		u64[1] = tev->ev.u64;
 	}
 }
 
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c b/drivers/event/cnxk/cnxk_tim_evdev.c
index f8753b29ad..a158a4ad58 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -78,9 +78,25 @@ cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
 	return rc;
 }
 
+static int
+cnxk_tim_enable_hwwqe(struct cnxk_tim_evdev *dev, struct cnxk_tim_ring *tim_ring)
+{
+	struct roc_tim_hwwqe_cfg hwwqe_cfg;
+
+	memset(&hwwqe_cfg, 0, sizeof(hwwqe_cfg));
+	hwwqe_cfg.hwwqe_ena = 1;
+	hwwqe_cfg.grp_ena = 0;
+	hwwqe_cfg.flw_ctrl_ena = 0;
+	hwwqe_cfg.result_offset = CNXK_TIM_HWWQE_RES_OFFSET_B;
+
+	tim_ring->lmt_base = dev->tim.roc_sso->lmt_base;
+	return roc_tim_lf_config_hwwqe(&dev->tim, tim_ring->ring_id, &hwwqe_cfg);
+}
+
 static void
 cnxk_tim_set_fp_ops(struct cnxk_tim_ring *tim_ring)
 {
+	struct cnxk_tim_evdev *dev = cnxk_tim_priv_get();
 	uint8_t prod_flag = !tim_ring->prod_type_sp;
 
 	/* [STATS] [DFB/FB] [SP][MP]*/
@@ -98,6 +114,16 @@ cnxk_tim_set_fp_ops(struct cnxk_tim_ring *tim_ring)
 #undef FP
 	};
 
+	if (dev == NULL)
+		return;
+
+	if (dev->tim.feat.hwwqe) {
+		cnxk_tim_ops.arm_burst = cnxk_tim_arm_burst_hwwqe;
+		cnxk_tim_ops.arm_tmo_tick_burst = cnxk_tim_arm_tmo_burst_hwwqe;
+		cnxk_tim_ops.cancel_burst = cnxk_tim_timer_cancel_burst_hwwqe;
+		return;
+	}
+
 	cnxk_tim_ops.arm_burst =
 		arm_burst[tim_ring->enable_stats][tim_ring->ena_dfb][prod_flag];
 	cnxk_tim_ops.arm_tmo_tick_burst =
@@ -224,12 +250,13 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)
 		}
 	}
 
-	if (tim_ring->disable_npa) {
+	if (!dev->tim.feat.hwwqe && tim_ring->disable_npa) {
 		tim_ring->nb_chunks =
 			tim_ring->nb_timers /
 			CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
 		tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
 	} else {
+		tim_ring->disable_npa = 0;
 		tim_ring->nb_chunks = tim_ring->nb_timers;
 	}
 
@@ -255,6 +282,14 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)
 		goto tim_chnk_free;
 	}
 
+	if (dev->tim.feat.hwwqe) {
+		rc = cnxk_tim_enable_hwwqe(dev, tim_ring);
+		if (rc < 0) {
+			plt_err("Failed to enable hwwqe");
+			goto tim_chnk_free;
+		}
+	}
+
 	plt_write64((uint64_t)tim_ring->bkt, tim_ring->base + TIM_LF_RING_BASE);
 	plt_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
 
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h b/drivers/event/cnxk/cnxk_tim_evdev.h
index 9bd36158d8..a36084d714 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -15,6 +15,7 @@
 #include <rte_malloc.h>
 #include <rte_memzone.h>
 #include <rte_reciprocal.h>
+#include <rte_vect.h>
 
 #define NSECPERSEC		 1E9
 #define USECPERSEC		 1E6
@@ -29,6 +30,8 @@
 #define CNXK_TIM_MIN_CHUNK_SLOTS    (0x1)
 #define CNXK_TIM_MAX_CHUNK_SLOTS    (0x1FFE)
 #define CNXK_TIM_MAX_POOL_CACHE_SZ  (16)
+#define CNXK_TIM_HWWQE_RES_OFFSET_B (24)
+#define CNXK_TIM_ENT_PER_LMT	    (7)
 
 #define CN9K_TIM_MIN_TMO_TKS (256)
 
@@ -124,6 +127,7 @@ struct __rte_cache_aligned cnxk_tim_ring {
 	uintptr_t tbase;
 	uint64_t (*tick_fn)(uint64_t tbase);
 	uint64_t ring_start_cyc;
+	uint64_t lmt_base;
 	struct cnxk_tim_bkt *bkt;
 	struct rte_mempool *chunk_pool;
 	struct rte_reciprocal_u64 fast_div;
@@ -310,11 +314,21 @@ TIM_ARM_FASTPATH_MODES
 TIM_ARM_TMO_FASTPATH_MODES
 #undef FP
 
+uint16_t cnxk_tim_arm_burst_hwwqe(const struct rte_event_timer_adapter *adptr,
+				  struct rte_event_timer **tim, const uint16_t nb_timers);
+
+uint16_t cnxk_tim_arm_tmo_burst_hwwqe(const struct rte_event_timer_adapter *adptr,
+				      struct rte_event_timer **tim, const uint64_t timeout_tick,
+				      const uint16_t nb_timers);
+
 uint16_t
 cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 			    struct rte_event_timer **tim,
 			    const uint16_t nb_timers);
 
+uint16_t cnxk_tim_timer_cancel_burst_hwwqe(const struct rte_event_timer_adapter *adptr,
+					   struct rte_event_timer **tim, const uint16_t nb_timers);
+
 int cnxk_tim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter,
 				 const struct rte_event_timer *evtim, uint64_t *ticks_remaining);
 
diff --git a/drivers/event/cnxk/cnxk_tim_worker.c b/drivers/event/cnxk/cnxk_tim_worker.c
index 5dcf6085dc..16b6b4e33f 100644
--- a/drivers/event/cnxk/cnxk_tim_worker.c
+++ b/drivers/event/cnxk/cnxk_tim_worker.c
@@ -32,15 +32,6 @@ cnxk_tim_arm_checks(const struct cnxk_tim_ring *const tim_ring,
 	return -EINVAL;
 }
 
-static inline void
-cnxk_tim_format_event(const struct rte_event_timer *const tim,
-		      struct cnxk_tim_ent *const entry)
-{
-	entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
-		    (tim->ev.event & 0xFFFFFFFFF);
-	entry->wqe = tim->ev.u64;
-}
-
 static __rte_always_inline uint16_t
 cnxk_tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
 			 struct rte_event_timer **tim, const uint16_t nb_timers,
@@ -72,7 +63,25 @@ cnxk_tim_timer_arm_burst(const struct rte_event_timer_adapter *adptr,
 	}
 
 	if (flags & CNXK_TIM_ENA_STATS)
-		__atomic_fetch_add(&tim_ring->arm_cnt, index, __ATOMIC_RELAXED);
+		__atomic_fetch_add(&tim_ring->arm_cnt, index, rte_memory_order_relaxed);
+
+	return index;
+}
+
+uint16_t
+cnxk_tim_arm_burst_hwwqe(const struct rte_event_timer_adapter *adptr, struct rte_event_timer **tim,
+			 const uint16_t nb_timers)
+{
+	struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
+	uint16_t index;
+
+	for (index = 0; index < nb_timers; index++) {
+		if (cnxk_tim_arm_checks(tim_ring, tim[index]))
+			break;
+
+		if (cnxk_tim_add_entry_hwwqe(tim_ring, tim[index]))
+			break;
+	}
 
 	return index;
 }
@@ -126,12 +135,34 @@ cnxk_tim_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
 	}
 
 	if (flags & CNXK_TIM_ENA_STATS)
-		__atomic_fetch_add(&tim_ring->arm_cnt, set_timers,
-				   __ATOMIC_RELAXED);
+		__atomic_fetch_add(&tim_ring->arm_cnt, set_timers, rte_memory_order_relaxed);
 
 	return set_timers;
 }
 
+uint16_t
+cnxk_tim_arm_tmo_burst_hwwqe(const struct rte_event_timer_adapter *adptr,
+			     struct rte_event_timer **tim, const uint64_t timeout_tick,
+			     const uint16_t nb_timers)
+{
+	struct cnxk_tim_ring *tim_ring = adptr->data->adapter_priv;
+	uint16_t idx;
+
+	if (unlikely(!timeout_tick || timeout_tick > tim_ring->nb_bkts)) {
+		const enum rte_event_timer_state state = timeout_tick ?
+								 RTE_EVENT_TIMER_ERROR_TOOLATE :
+								 RTE_EVENT_TIMER_ERROR_TOOEARLY;
+		for (idx = 0; idx < nb_timers; idx++)
+			tim[idx]->state = state;
+
+		rte_errno = EINVAL;
+		return 0;
+	}
+
+	return cnxk_tim_add_entry_tmo_hwwqe(tim_ring, tim, timeout_tick * tim_ring->tck_int,
+					    nb_timers);
+}
+
 #define FP(_name, _f2, _f1, _flags)                                            \
 	uint16_t __rte_noinline cnxk_tim_arm_tmo_tick_burst_##_name(           \
 		const struct rte_event_timer_adapter *adptr,                   \
@@ -153,7 +184,7 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 	int ret;
 
 	RTE_SET_USED(adptr);
-	rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+	rte_atomic_thread_fence(rte_memory_order_acquire);
 	for (index = 0; index < nb_timers; index++) {
 		if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
 			rte_errno = EALREADY;
@@ -174,6 +205,38 @@ cnxk_tim_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
 	return index;
 }
 
+uint16_t
+cnxk_tim_timer_cancel_burst_hwwqe(const struct rte_event_timer_adapter *adptr,
+				  struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+	uint64_t *status;
+	uint16_t i;
+
+	RTE_SET_USED(adptr);
+	for (i = 0; i < nb_timers; i++) {
+		if (tim[i]->state == RTE_EVENT_TIMER_CANCELED) {
+			rte_errno = EALREADY;
+			break;
+		}
+
+		if (tim[i]->state != RTE_EVENT_TIMER_ARMED) {
+			rte_errno = EINVAL;
+			break;
+		}
+
+		status = &tim[i]->impl_opaque[1];
+		if (!__atomic_compare_exchange_n(status, (uint64_t *)&tim[i], 0, 0,
+						 rte_memory_order_release,
+						 rte_memory_order_relaxed)) {
+			rte_errno = ENOENT;
+			break;
+		}
+		tim[i]->state = RTE_EVENT_TIMER_CANCELED;
+	}
+
+	return i;
+}
+
 int
 cnxk_tim_remaining_ticks_get(const struct rte_event_timer_adapter *adapter,
 			     const struct rte_event_timer *evtim, uint64_t *ticks_remaining)
diff --git a/drivers/event/cnxk/cnxk_tim_worker.h b/drivers/event/cnxk/cnxk_tim_worker.h
index f530d8c5c4..b9537bdf1c 100644
--- a/drivers/event/cnxk/cnxk_tim_worker.h
+++ b/drivers/event/cnxk/cnxk_tim_worker.h
@@ -132,6 +132,13 @@ cnxk_tim_bkt_fast_mod(uint64_t n, uint64_t d, struct rte_reciprocal_u64 R)
 	return (n - (d * rte_reciprocal_divide_u64(n, &R)));
 }
 
+static inline void
+cnxk_tim_format_event(const struct rte_event_timer *const tim, struct cnxk_tim_ent *const entry)
+{
+	entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 | (tim->ev.event & 0xFFFFFFFFF);
+	entry->wqe = tim->ev.u64;
+}
+
 static __rte_always_inline void
 cnxk_tim_get_target_bucket(struct cnxk_tim_ring *const tim_ring,
 			   const uint32_t rel_bkt, struct cnxk_tim_bkt **bkt,
@@ -574,6 +581,196 @@ cnxk_tim_add_entry_brst(struct cnxk_tim_ring *const tim_ring,
 	return nb_timers;
 }
 
+static int
+cnxk_tim_add_entry_hwwqe(struct cnxk_tim_ring *const tim_ring, struct rte_event_timer *const tim)
+{
+	uint64_t wdata, pa;
+	uintptr_t lmt_addr;
+	uint64_t *status;
+	uint16_t lmt_id;
+	uint64_t *lmt;
+	uint64_t rsp;
+	int rc = 0;
+
+	status = &tim->impl_opaque[0];
+	status[0] = 0;
+	status[1] = 0;
+
+	lmt_addr = tim_ring->lmt_base;
+	ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+	lmt = (uint64_t *)lmt_addr;
+
+	lmt[0] = tim->timeout_ticks * tim_ring->tck_int;
+	lmt[1] = 0x1;
+	lmt[2] = (tim->ev.event & 0xFFC000000000) >> 6 | (tim->ev.event & 0xFFFFFFFFF);
+	lmt[3] = (uint64_t)tim;
+
+	/* One LMT line is used, CNTM1 is 0 and SIZE_VEC is not included. */
+	wdata = lmt_id;
+	/* SIZEM1 is 0 */
+	pa = (tim_ring->tbase & ~0xFF) + TIM_LF_SCHED_TIMER0;
+	pa |= (1UL << 4);
+	roc_lmt_submit_steorl(wdata, pa);
+
+	do {
+		rsp = __atomic_load_n(status, rte_memory_order_relaxed);
+		rsp &= 0xF0UL;
+	} while (!rsp);
+
+	rsp >>= 4;
+	switch (rsp) {
+	case 0x3:
+		tim->state = RTE_EVENT_TIMER_ERROR_TOOEARLY;
+		rc = !rc;
+		break;
+	case 0x4:
+		tim->state = RTE_EVENT_TIMER_ERROR_TOOLATE;
+		rc = !rc;
+		break;
+	case 0x1:
+		tim->state = RTE_EVENT_TIMER_ARMED;
+		break;
+	default:
+		tim->state = RTE_EVENT_TIMER_ERROR;
+		rc = !rc;
+		break;
+	}
+
+	return rc;
+}
+
+static int
+cnxk_tim_add_entry_tmo_hwwqe(struct cnxk_tim_ring *const tim_ring,
+			     struct rte_event_timer **const tim, uint64_t intvl, uint16_t nb_timers)
+{
+	uint16_t cnt, i, j, done;
+	uint64_t wdata, pa;
+	uintptr_t lmt_addr;
+	uint64_t *status;
+	uint16_t lmt_id;
+	uint64_t *lmt;
+	uint64_t rsp;
+
+	/* We have 32 LMTLINES per core, but use only 1 line as we need to check status */
+	lmt_addr = tim_ring->lmt_base;
+	ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
+
+	done = 0;
+	lmt = (uint64_t *)lmt_addr;
+	/* We can do upto 7 timers per LMTLINE */
+	cnt = nb_timers / CNXK_TIM_ENT_PER_LMT;
+
+	lmt[0] = intvl;
+	lmt[1] = 0x1; /* Always relative */
+	/* One LMT line is used, CNTM1 is 0 and SIZE_VEC is not included. */
+	wdata = lmt_id;
+	/* SIZEM1 is 0 */
+	pa = (tim_ring->tbase & ~0xFF) + TIM_LF_SCHED_TIMER0;
+	pa |= (uint64_t)(CNXK_TIM_ENT_PER_LMT << 4);
+	for (i = 0; i < cnt; i++) {
+		status = &tim[i * CNXK_TIM_ENT_PER_LMT]->impl_opaque[0];
+
+		for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++) {
+			cnxk_tim_format_event(tim[(i * CNXK_TIM_ENT_PER_LMT) + j],
+					      (struct cnxk_tim_ent *)&lmt[(j << 1) + 2]);
+			tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->impl_opaque[0] = 0;
+			tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->impl_opaque[1] = 0;
+			tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->state = RTE_EVENT_TIMER_ARMED;
+		}
+
+		roc_lmt_submit_steorl(wdata, pa);
+		do {
+			rsp = __atomic_load_n(status, rte_memory_order_relaxed);
+			rsp &= 0xFUL;
+		} while (!rsp);
+
+		done += CNXK_TIM_ENT_PER_LMT;
+		rsp &= 0xF;
+		if (rsp != 0x1) {
+			switch (rsp) {
+			case 0x3:
+				for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++)
+					tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->state =
+						RTE_EVENT_TIMER_ERROR_TOOEARLY;
+				done -= CNXK_TIM_ENT_PER_LMT;
+				break;
+			case 0x4:
+				for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++)
+					tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->state =
+						RTE_EVENT_TIMER_ERROR_TOOLATE;
+				done -= CNXK_TIM_ENT_PER_LMT;
+				break;
+			case 0x2:
+			default:
+				for (j = 0; j < CNXK_TIM_ENT_PER_LMT; j++) {
+					if ((__atomic_load_n(&tim[(i * CNXK_TIM_ENT_PER_LMT) + j]
+								      ->impl_opaque[0],
+							     rte_memory_order_relaxed) &
+					     0xF0) != 0x10) {
+						tim[(i * CNXK_TIM_ENT_PER_LMT) + j]->state =
+							RTE_EVENT_TIMER_ERROR;
+						done--;
+					}
+				}
+				break;
+			}
+			goto done;
+		}
+	}
+
+	/* SIZEM1 is 0 */
+	pa = (tim_ring->tbase & ~0xFF) + TIM_LF_SCHED_TIMER0;
+	pa |= (uint64_t)((nb_timers - cnt) << 4);
+	if (nb_timers - cnt) {
+		status = &tim[cnt]->impl_opaque[0];
+
+		for (i = 0; i < nb_timers - cnt; i++) {
+			cnxk_tim_format_event(tim[cnt + i],
+					      (struct cnxk_tim_ent *)&lmt[(i << 1) + 2]);
+			tim[cnt + i]->impl_opaque[0] = 0;
+			tim[cnt + i]->impl_opaque[1] = 0;
+			tim[cnt + i]->state = RTE_EVENT_TIMER_ARMED;
+		}
+
+		roc_lmt_submit_steorl(wdata, pa);
+		do {
+			rsp = __atomic_load_n(status, rte_memory_order_relaxed);
+			rsp &= 0xFUL;
+		} while (!rsp);
+
+		done += (nb_timers - cnt);
+		rsp &= 0xF;
+		if (rsp != 0x1) {
+			switch (rsp) {
+			case 0x3:
+				for (j = 0; j < nb_timers - cnt; j++)
+					tim[cnt + j]->state = RTE_EVENT_TIMER_ERROR_TOOEARLY;
+				done -= (nb_timers - cnt);
+				break;
+			case 0x4:
+				for (j = 0; j < nb_timers - cnt; j++)
+					tim[cnt + j]->state = RTE_EVENT_TIMER_ERROR_TOOLATE;
+				done -= (nb_timers - cnt);
+				break;
+			case 0x2:
+			default:
+				for (j = 0; j < nb_timers - cnt; j++) {
+					if ((__atomic_load_n(&tim[cnt + j]->impl_opaque[0],
+							     rte_memory_order_relaxed) &
+					     0xF0) != 0x10) {
+						tim[cnt + j]->state = RTE_EVENT_TIMER_ERROR;
+						done--;
+					}
+				}
+				break;
+			}
+		}
+	}
+
+done:
+	return done;
+}
+
 static int
 cnxk_tim_rm_entry(struct rte_event_timer *tim)
 {
-- 
2.25.1


      parent reply	other threads:[~2024-10-03 13:24 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-03 13:22 [PATCH 01/20] common/cnxk: implement SSO HW info pbhagavatula
2024-10-03 13:22 ` [PATCH 02/20] event/cnxk: add CN20K specific device probe pbhagavatula
2024-10-03 13:22 ` [PATCH 03/20] event/cnxk: add CN20K device config pbhagavatula
2024-10-03 13:22 ` [PATCH 04/20] event/cnxk: add CN20k event queue config pbhagavatula
2024-10-03 13:22 ` [PATCH 05/20] event/cnxk: add CN20K event port configuration pbhagavatula
2024-10-03 13:22 ` [PATCH 06/20] event/cnxk: add CN20K SSO enqueue fast path pbhagavatula
2024-10-03 13:22 ` [PATCH 07/20] event/cnxk: add CN20K SSO dequeue " pbhagavatula
2024-10-03 13:22 ` [PATCH 08/20] event/cnxk: add CN20K event port quiesce pbhagavatula
2024-10-03 13:22 ` [PATCH 09/20] event/cnxk: add CN20K event port profile switch pbhagavatula
2024-10-03 13:22 ` [PATCH 10/20] event/cnxk: add CN20K device start pbhagavatula
2024-10-03 13:22 ` [PATCH 11/20] event/cnxk: add CN20K device stop and close pbhagavatula
2024-10-03 13:22 ` [PATCH 12/20] event/cnxk: add CN20K xstats, selftest and dump pbhagavatula
2024-10-03 13:22 ` [PATCH 13/20] event/cnxk: support CN20K Rx adapter pbhagavatula
2024-10-03 13:22 ` [PATCH 14/20] event/cnxk: support CN20K Rx adapter fast path pbhagavatula
2024-10-03 13:22 ` [PATCH 15/20] event/cnxk: support CN20K Tx adapter pbhagavatula
2024-10-03 13:22 ` [PATCH 16/20] event/cnxk: support CN20K Tx adapter fast path pbhagavatula
2024-10-03 13:22 ` [PATCH 17/20] common/cnxk: add SSO event aggregator pbhagavatula
2024-10-03 13:22 ` [PATCH 18/20] event/cnxk: add Rx/Tx event vector support pbhagavatula
2024-10-03 13:22 ` [PATCH 19/20] common/cnxk: update timer base code pbhagavatula
2024-10-03 13:22 ` pbhagavatula [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241003132237.20193-20-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=dev@dpdk.org \
    --cc=hkalra@marvell.com \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    --cc=sthotton@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).