DPDK patches and discussions
 help / color / mirror / Atom feed
From: Abdullah Sevincer <abdullah.sevincer@intel.com>
To: dev@dpdk.org
Cc: jerinj@marvell.com, mike.ximing.chen@intel.com,
	tirthendu.sarkar@intel.com, pravin.pathak@intel.com,
	shivani.doneria@intel.com,
	Abdullah Sevincer <abdullah.sevincer@intel.com>
Subject: [PATCH v5 3/5] event/dlb2: enhance DLB credit handling
Date: Wed, 19 Jun 2024 16:01:04 -0500	[thread overview]
Message-ID: <20240619210106.253239-4-abdullah.sevincer@intel.com> (raw)
In-Reply-To: <20240619210106.253239-1-abdullah.sevincer@intel.com>

This commit improves DLB credit handling scenarios when
ports hold on to credits but can't release them due to insufficient
accumulation (less than 2 * credit quanta).

Worker ports now release all accumulated credits when back-to-back
zero poll count reaches preset threshold.

Producer ports release all accumulated credits if enqueue fails for a
consecutive number of retries.

All newly introduced compilation flags are in the fastpath.

Signed-off-by: Abdullah Sevincer <abdullah.sevincer@intel.com>
---
 drivers/event/dlb2/dlb2.c      | 322 +++++++++++++++++++++++++++------
 drivers/event/dlb2/dlb2_priv.h |   1 +
 drivers/event/dlb2/meson.build |  40 ++++
 meson_options.txt              |   2 +
 4 files changed, 306 insertions(+), 59 deletions(-)

diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 837c0639a3..e20c0173d0 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -43,7 +43,50 @@
  * to DLB can go ahead of relevant application writes like updates to buffers
  * being sent with event
  */
+#ifndef DLB2_BYPASS_FENCE_ON_PP
 #define DLB2_BYPASS_FENCE_ON_PP 0  /* 1 == Bypass fence, 0 == do not bypass */
+#endif
+/*
+ * HW credit checks can only be turned off for DLB2 device if following
+ * is true for each created eventdev
+ * LDB credits <= DIR credits + minimum CQ Depth
+ * (CQ Depth is minimum of all ports configured within eventdev)
+ * This needs to be true for all eventdevs created on any DLB2 device
+ * managed by this driver.
+ * DLB2.5 does not have any such restriction as it has single credit pool
+ */
+#ifndef DLB_HW_CREDITS_CHECKS
+#define DLB_HW_CREDITS_CHECKS 1
+#endif
+
+/*
+ * SW credit checks can only be turned off if application has a way to
+ * limit input events to the eventdev below assigned credit limit
+ */
+#ifndef DLB_SW_CREDITS_CHECKS
+#define DLB_SW_CREDITS_CHECKS 1
+#endif
+
+/*
+ * Once application is fully validated, type check can be turned off.
+ * HW will continue checking for correct type and generate alarm on mismatch
+ */
+#ifndef DLB_TYPE_CHECK
+#define DLB_TYPE_CHECK 1
+#endif
+#define DLB_TYPE_MACRO 0x010002
+
+/*
+ * To avoid deadlock, ports holding to credits will release them after these
+ * many consecutive zero dequeues
+ */
+#define DLB2_ZERO_DEQ_CREDIT_RETURN_THRES 16384
+
+/*
+ * To avoid deadlock, ports holding to credits will release them after these
+ * many consecutive enqueue failures
+ */
+#define DLB2_ENQ_FAIL_CREDIT_RETURN_THRES 100
 
 /*
  * Resources exposed to eventdev. Some values overridden at runtime using
@@ -366,6 +409,33 @@ set_max_num_events(const char *key __rte_unused,
 	return 0;
 }
 
+static int
+set_max_num_events_v2_5(const char *key __rte_unused,
+			const char *value,
+			void *opaque)
+{
+	int *max_num_events = opaque;
+	int ret;
+
+	if (value == NULL || opaque == NULL) {
+		DLB2_LOG_ERR("NULL pointer\n");
+		return -EINVAL;
+	}
+
+	ret = dlb2_string_to_int(max_num_events, value);
+	if (ret < 0)
+		return ret;
+
+	if (*max_num_events < 0 || *max_num_events >
+			DLB2_MAX_NUM_CREDITS(DLB2_HW_V2_5)) {
+		DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
+			     DLB2_MAX_NUM_CREDITS(DLB2_HW_V2_5));
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int
 set_num_dir_credits(const char *key __rte_unused,
 		    const char *value,
@@ -966,6 +1036,15 @@ dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
 	dlb2->num_queues = 0;
 	dlb2->num_ldb_queues = 0;
 	dlb2->num_dir_queues = 0;
+	if (dlb2->version == DLB2_HW_V2_5) {
+		dlb2->num_credits = 0;
+		dlb2->max_credits = 0;
+	} else {
+		dlb2->num_ldb_credits = 0;
+		dlb2->num_dir_credits = 0;
+		dlb2->max_ldb_credits = 0;
+		dlb2->max_dir_credits = 0;
+	}
 	dlb2->configured = false;
 }
 
@@ -1074,11 +1153,14 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev)
 	if (dlb2->version == DLB2_HW_V2_5) {
 		dlb2->credit_pool = rsrcs->num_credits;
 		dlb2->max_credits = rsrcs->num_credits;
+		dlb2->num_credits = rsrcs->num_credits;
 	} else {
 		dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
 		dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
+		dlb2->num_ldb_credits = rsrcs->num_ldb_credits;
 		dlb2->dir_credit_pool = rsrcs->num_dir_credits;
 		dlb2->max_dir_credits = rsrcs->num_dir_credits;
+		dlb2->num_dir_credits = rsrcs->num_dir_credits;
 	}
 
 	dlb2->configured = true;
@@ -1679,6 +1761,12 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
 
 	qm_port->id = qm_port_id;
 
+	if (dlb2->version == DLB2_HW_V2) {
+		qm_port->cached_ldb_credits = 0;
+		qm_port->cached_dir_credits = 0;
+	} else
+		qm_port->cached_credits = 0;
+
 	if (dlb2->version == DLB2_HW_V2_5 && (dlb2->enable_cq_weight == true)) {
 		struct dlb2_enable_cq_weight_args cq_weight_args = { {0} };
 		cq_weight_args.port_id = qm_port->id;
@@ -2047,19 +2135,8 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev,
 	ev_port->credit_update_quanta = sw_credit_quanta;
 	ev_port->qm_port.hw_credit_quanta = hw_credit_quanta;
 
-	/*
-	 * Validate credit config before creating port
-	 */
 
-	if (port_conf->enqueue_depth > sw_credit_quanta ||
-	    port_conf->enqueue_depth > hw_credit_quanta) {
-		DLB2_LOG_ERR("Invalid port config. Enqueue depth %d must be <= credit quanta %d and batch size %d\n",
-			     port_conf->enqueue_depth,
-			     sw_credit_quanta,
-			     hw_credit_quanta);
-		return -EINVAL;
-	}
-	ev_port->enq_retries = port_conf->enqueue_depth / sw_credit_quanta;
+	ev_port->enq_retries = port_conf->enqueue_depth;
 
 	/* Save off port config for reconfig */
 	ev_port->conf = *port_conf;
@@ -2494,6 +2571,61 @@ dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
 	return ret;
 }
 
+static inline void
+dlb2_port_credits_return(struct dlb2_port *qm_port)
+{
+	/* Return all port credits */
+	if (qm_port->dlb2->version == DLB2_HW_V2_5) {
+		if (qm_port->cached_credits) {
+			rte_atomic_fetch_add_explicit(qm_port->credit_pool[DLB2_COMBINED_POOL],
+					   qm_port->cached_credits, rte_memory_order_seq_cst);
+			qm_port->cached_credits = 0;
+		}
+	} else {
+		if (qm_port->cached_ldb_credits) {
+			rte_atomic_fetch_add_explicit(qm_port->credit_pool[DLB2_LDB_QUEUE],
+					   qm_port->cached_ldb_credits, rte_memory_order_seq_cst);
+			qm_port->cached_ldb_credits = 0;
+		}
+		if (qm_port->cached_dir_credits) {
+			rte_atomic_fetch_add_explicit(qm_port->credit_pool[DLB2_DIR_QUEUE],
+					   qm_port->cached_dir_credits, rte_memory_order_seq_cst);
+			qm_port->cached_dir_credits = 0;
+		}
+	}
+}
+
+static inline void
+dlb2_release_sw_credits(struct dlb2_eventdev *dlb2,
+			struct dlb2_eventdev_port *ev_port, uint16_t val)
+{
+	if (ev_port->inflight_credits) {
+		rte_atomic_fetch_sub_explicit(&dlb2->inflights, val, rte_memory_order_seq_cst);
+		ev_port->inflight_credits -= val;
+	}
+}
+
+static void dlb2_check_and_return_credits(struct dlb2_eventdev_port *ev_port,
+					  bool cond, uint32_t threshold)
+{
+#if DLB_SW_CREDITS_CHECKS || DLB_HW_CREDITS_CHECKS
+	if (cond) {
+		if (++ev_port->credit_return_count > threshold) {
+#if DLB_SW_CREDITS_CHECKS
+			dlb2_release_sw_credits(ev_port->dlb2, ev_port,
+						ev_port->inflight_credits);
+#endif
+#if DLB_HW_CREDITS_CHECKS
+			dlb2_port_credits_return(&ev_port->qm_port);
+#endif
+			ev_port->credit_return_count = 0;
+		}
+	} else {
+		ev_port->credit_return_count = 0;
+	}
+#endif
+}
+
 static int
 dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
 			  uint8_t queues[], uint16_t nb_unlinks)
@@ -2513,14 +2645,15 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
 
 	if (queues == NULL || nb_unlinks == 0) {
 		DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0\n");
-		return 0; /* Ignore and return success */
+		nb_unlinks = 0; /* Ignore and return success */
+		goto ret_credits;
 	}
 
 	if (ev_port->qm_port.is_directed) {
 		DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n",
 			     ev_port->id);
 		rte_errno = 0;
-		return nb_unlinks; /* as if success */
+		goto ret_credits;
 	}
 
 	dlb2 = ev_port->dlb2;
@@ -2559,6 +2692,10 @@ dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
 		ev_queue->num_links--;
 	}
 
+ret_credits:
+	if (ev_port->inflight_credits)
+		dlb2_check_and_return_credits(ev_port, true, 0);
+
 	return nb_unlinks;
 }
 
@@ -2758,8 +2895,7 @@ dlb2_replenish_sw_credits(struct dlb2_eventdev *dlb2,
 		/* Replenish credits, saving one quanta for enqueues */
 		uint16_t val = ev_port->inflight_credits - quanta;
 
-		rte_atomic_fetch_sub_explicit(&dlb2->inflights, val, rte_memory_order_seq_cst);
-		ev_port->inflight_credits -= val;
+		dlb2_release_sw_credits(dlb2, ev_port, val);
 	}
 }
 
@@ -2789,10 +2925,15 @@ dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
 			rte_errno = -ENOSPC;
 			return 1;
 		}
-
-		rte_atomic_fetch_add_explicit(&dlb2->inflights, credit_update_quanta,
-				   rte_memory_order_seq_cst);
-		ev_port->inflight_credits += (credit_update_quanta);
+		/* Application will retry if this attempt fails due to contention */
+		if (rte_atomic_compare_exchange_strong_explicit(&dlb2->inflights, &sw_inflights,
+					(sw_inflights+credit_update_quanta),
+					rte_memory_order_seq_cst, rte_memory_order_seq_cst))
+			ev_port->inflight_credits += (credit_update_quanta);
+		else {
+			rte_errno = -ENOSPC;
+			return 1;
+		}
 
 		if (ev_port->inflight_credits < num) {
 			DLB2_INC_STAT(
@@ -2930,7 +3071,9 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
 {
 	struct dlb2_eventdev *dlb2 = ev_port->dlb2;
 	struct dlb2_eventdev_queue *ev_queue;
+#if DLB_HW_CREDITS_CHECKS
 	uint16_t *cached_credits = NULL;
+#endif
 	struct dlb2_queue *qm_queue;
 
 	ev_queue = &dlb2->ev_queues[ev->queue_id];
@@ -2942,6 +3085,7 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
 		goto op_check;
 
 	if (!qm_queue->is_directed) {
+#if DLB_HW_CREDITS_CHECKS
 		/* Load balanced destination queue */
 
 		if (dlb2->version == DLB2_HW_V2) {
@@ -2985,9 +3129,20 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
 			rte_errno = -EINVAL;
 			return 1;
 		}
+#else
+#if (RTE_SCHED_TYPE_PARALLEL != 2) || (RTE_SCHED_TYPE_ATOMIC != 1)
+#error "ERROR: RTE event schedule type values changed. Needs a code change"
+#endif
+		/* Map RTE eventdev schedule type to DLB HW schedule type */
+		if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED)
+			/* RTE-Parallel -> DLB-UnOrd 2->1, RTE-Atm -> DLB-Atm 1->0 */
+			*sched_type = ev->sched_type - 1;
+		else /* To support CFG_ALL_TYPEs */
+			*sched_type = DLB2_SCHED_ORDERED; /* RTE-Ord -> DLB-Ord 0->2 */
+#endif
 	} else {
 		/* Directed destination queue */
-
+#if DLB_HW_CREDITS_CHECKS
 		if (dlb2->version == DLB2_HW_V2) {
 			if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
 				rte_errno = -ENOSPC;
@@ -3001,6 +3156,7 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
 			}
 			cached_credits = &qm_port->cached_credits;
 		}
+#endif
 		DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
 
 		*sched_type = DLB2_SCHED_DIRECTED;
@@ -3009,13 +3165,17 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
 op_check:
 	switch (ev->op) {
 	case RTE_EVENT_OP_NEW:
+#if DLB_SW_CREDITS_CHECKS
 		/* Check that a sw credit is available */
 		if (dlb2_check_enqueue_sw_credits(dlb2, ev_port)) {
 			rte_errno = -ENOSPC;
 			return 1;
 		}
 		ev_port->inflight_credits--;
+#endif
+#if DLB_HW_CREDITS_CHECKS
 		(*cached_credits)--;
+#endif
 		break;
 	case RTE_EVENT_OP_FORWARD:
 		/* Check for outstanding_releases underflow. If this occurs,
@@ -3026,10 +3186,14 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
 		RTE_ASSERT(ev_port->outstanding_releases > 0);
 		ev_port->outstanding_releases--;
 		qm_port->issued_releases++;
+#if DLB_HW_CREDITS_CHECKS
 		(*cached_credits)--;
+#endif
 		break;
 	case RTE_EVENT_OP_RELEASE:
+#if DLB_SW_CREDITS_CHECKS
 		ev_port->inflight_credits++;
+#endif
 		/* Check for outstanding_releases underflow. If this occurs,
 		 * the application is not using the EVENT_OPs correctly; for
 		 * example, forwarding or releasing events that were not
@@ -3038,9 +3202,28 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
 		RTE_ASSERT(ev_port->outstanding_releases > 0);
 		ev_port->outstanding_releases--;
 		qm_port->issued_releases++;
-
+#if DLB_SW_CREDITS_CHECKS
 		/* Replenish s/w credits if enough are cached */
 		dlb2_replenish_sw_credits(dlb2, ev_port);
+#endif
+		break;
+	/* Fragments not supported in the API, but left here for
+	 * possible future use.
+	 */
+#if DLB_SW_CREDITS_CHECKS
+		/* Check that a sw credit is available */
+		if (dlb2_check_enqueue_sw_credits(dlb2, ev_port)) {
+			rte_errno = -ENOSPC;
+			return 1;
+		}
+#endif
+
+#if DLB_SW_CREDITS_CHECKS
+		ev_port->inflight_credits--;
+#endif
+#if DLB_HW_CREDITS_CHECKS
+		(*cached_credits)--;
+#endif
 		break;
 	}
 
@@ -3151,6 +3334,8 @@ __dlb2_event_enqueue_burst(void *event_port,
 			break;
 	}
 
+	dlb2_check_and_return_credits(ev_port, !i, DLB2_ENQ_FAIL_CREDIT_RETURN_THRES);
+
 	return i;
 }
 
@@ -3289,53 +3474,45 @@ dlb2_event_release(struct dlb2_eventdev *dlb2,
 		return;
 	}
 	ev_port->outstanding_releases -= i;
+#if DLB_SW_CREDITS_CHECKS
 	ev_port->inflight_credits += i;
 
 	/* Replenish s/w credits if enough releases are performed */
 	dlb2_replenish_sw_credits(dlb2, ev_port);
+#endif
 }
 
 static inline void
 dlb2_port_credits_inc(struct dlb2_port *qm_port, int num)
 {
 	uint32_t batch_size = qm_port->hw_credit_quanta;
+	int val;
 
 	/* increment port credits, and return to pool if exceeds threshold */
-	if (!qm_port->is_directed) {
-		if (qm_port->dlb2->version == DLB2_HW_V2) {
-			qm_port->cached_ldb_credits += num;
-			if (qm_port->cached_ldb_credits >= 2 * batch_size) {
-				rte_atomic_fetch_add_explicit(
-					qm_port->credit_pool[DLB2_LDB_QUEUE],
-					batch_size, rte_memory_order_seq_cst);
-				qm_port->cached_ldb_credits -= batch_size;
-			}
-		} else {
-			qm_port->cached_credits += num;
-			if (qm_port->cached_credits >= 2 * batch_size) {
-				rte_atomic_fetch_add_explicit(
-				      qm_port->credit_pool[DLB2_COMBINED_POOL],
-				      batch_size, rte_memory_order_seq_cst);
-				qm_port->cached_credits -= batch_size;
-			}
+	if (qm_port->dlb2->version == DLB2_HW_V2_5) {
+		qm_port->cached_credits += num;
+		if (qm_port->cached_credits >= 2 * batch_size) {
+			val = qm_port->cached_credits - batch_size;
+			rte_atomic_fetch_add_explicit(
+			    qm_port->credit_pool[DLB2_COMBINED_POOL], val,
+			    rte_memory_order_seq_cst);
+			qm_port->cached_credits -= val;
+		}
+	} else if (!qm_port->is_directed) {
+		qm_port->cached_ldb_credits += num;
+		if (qm_port->cached_ldb_credits >= 2 * batch_size) {
+			val = qm_port->cached_ldb_credits - batch_size;
+			rte_atomic_fetch_add_explicit(qm_port->credit_pool[DLB2_LDB_QUEUE],
+					   val, rte_memory_order_seq_cst);
+			qm_port->cached_ldb_credits -= val;
 		}
 	} else {
-		if (qm_port->dlb2->version == DLB2_HW_V2) {
-			qm_port->cached_dir_credits += num;
-			if (qm_port->cached_dir_credits >= 2 * batch_size) {
-				rte_atomic_fetch_add_explicit(
-					qm_port->credit_pool[DLB2_DIR_QUEUE],
-					batch_size, rte_memory_order_seq_cst);
-				qm_port->cached_dir_credits -= batch_size;
-			}
-		} else {
-			qm_port->cached_credits += num;
-			if (qm_port->cached_credits >= 2 * batch_size) {
-				rte_atomic_fetch_add_explicit(
-				      qm_port->credit_pool[DLB2_COMBINED_POOL],
-				      batch_size, rte_memory_order_seq_cst);
-				qm_port->cached_credits -= batch_size;
-			}
+		qm_port->cached_dir_credits += num;
+		if (qm_port->cached_dir_credits >= 2 * batch_size) {
+			val = qm_port->cached_dir_credits - batch_size;
+			rte_atomic_fetch_add_explicit(qm_port->credit_pool[DLB2_DIR_QUEUE],
+					   val, rte_memory_order_seq_cst);
+			qm_port->cached_dir_credits -= val;
 		}
 	}
 }
@@ -3366,6 +3543,16 @@ dlb2_dequeue_wait(struct dlb2_eventdev *dlb2,
 
 	/* Wait/poll time expired */
 	if (elapsed_ticks >= timeout) {
+
+		/* Return all credits before blocking if remaining credits in
+		 * system is less than quanta.
+		 */
+		uint32_t sw_inflights = rte_atomic_load_explicit(&dlb2->inflights,
+				rte_memory_order_seq_cst);
+		uint32_t quanta = ev_port->credit_update_quanta;
+
+		if (dlb2->new_event_limit - sw_inflights < quanta)
+			dlb2_check_and_return_credits(ev_port, true, 0);
 		return 1;
 	} else if (dlb2->umwait_allowed) {
 		struct rte_power_monitor_cond pmc;
@@ -4101,7 +4288,9 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
 
 		ev_port->outstanding_releases += num;
 
+#if DLB_HW_CREDITS_CHECKS
 		dlb2_port_credits_inc(qm_port, num);
+#endif
 	}
 
 	return num;
@@ -4228,8 +4417,9 @@ dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
 			dlb2_consume_qe_immediate(qm_port, num);
 
 		ev_port->outstanding_releases += num;
-
+#if DLB_HW_CREDITS_CHECKS
 		dlb2_port_credits_inc(qm_port, num);
+#endif
 	}
 
 	return num;
@@ -4263,6 +4453,9 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
 	DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
 	DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
 
+	dlb2_check_and_return_credits(ev_port, !cnt,
+				      DLB2_ZERO_DEQ_CREDIT_RETURN_THRES);
+
 	return cnt;
 }
 
@@ -4299,6 +4492,9 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
 
 	DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
 	DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
+
+	dlb2_check_and_return_credits(ev_port, !cnt,
+				      DLB2_ZERO_DEQ_CREDIT_RETURN_THRES);
 	return cnt;
 }
 
@@ -4903,9 +5099,17 @@ dlb2_parse_params(const char *params,
 				return ret;
 			}
 
-			ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
-						 set_max_num_events,
-						 &dlb2_args->max_num_events);
+			if (version == DLB2_HW_V2) {
+				ret = rte_kvargs_process(kvlist,
+						DLB2_MAX_NUM_EVENTS,
+						set_max_num_events,
+						&dlb2_args->max_num_events);
+			} else {
+				ret = rte_kvargs_process(kvlist,
+						DLB2_MAX_NUM_EVENTS,
+						set_max_num_events_v2_5,
+						&dlb2_args->max_num_events);
+			}
 			if (ret != 0) {
 				DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
 					     name);
diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h
index e7ed27251e..47f76f938f 100644
--- a/drivers/event/dlb2/dlb2_priv.h
+++ b/drivers/event/dlb2/dlb2_priv.h
@@ -527,6 +527,7 @@ struct __rte_cache_aligned dlb2_eventdev_port {
 	struct rte_event_port_conf conf; /* user-supplied configuration */
 	uint16_t inflight_credits; /* num credits this port has right now */
 	uint16_t credit_update_quanta;
+	uint32_t credit_return_count; /* count till the credit return condition is true */
 	struct dlb2_eventdev *dlb2; /* backlink optimization */
 	alignas(RTE_CACHE_LINE_SIZE) struct dlb2_port_stats stats;
 	struct dlb2_event_queue_link link[DLB2_MAX_NUM_QIDS_PER_LDB_CQ];
diff --git a/drivers/event/dlb2/meson.build b/drivers/event/dlb2/meson.build
index 515d1795fe..7760f78f87 100644
--- a/drivers/event/dlb2/meson.build
+++ b/drivers/event/dlb2/meson.build
@@ -68,3 +68,43 @@ endif
 headers = files('rte_pmd_dlb2.h')
 
 deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']
+
+dlb_pmd_opts = ['bypass_fence', 'hw_credits_checks', 'sw_credits_checks', 'type_check']
+dlb_pmd_defines = ['DLB2_BYPASS_FENCE_ON_PP', 'DLB_HW_CREDITS_CHECKS', 'DLB_SW_CREDITS_CHECKS', 'DLB_TYPE_CHECK']
+dlb_pmd_default = ['0','1','1','1']
+dlb_pmd_args = []
+
+#DLB PMD arguments can be provided as -Ddlb_pmd_args=option1:value1,option2:value2.. in meson command line options.
+arg_str=get_option('dlb_pmd_args').strip()
+if arg_str != ''
+	dlb_pmd_args = arg_str.split(',')
+	foreach arg: dlb_pmd_args
+		opt_args = arg.split(':')
+		if opt_args[0] not in dlb_pmd_opts
+			err_str = 'Unsupported DLB PMD option ' + opt_args[0]
+			err_str += ' Valid options are: bypass_fence, hw_credits_checks, sw_credits_checks, type_check'
+			error(err_str)
+		endif
+	endforeach
+endif
+
+index = 0
+foreach opt: dlb_pmd_opts
+	val = dlb_pmd_default[index]
+	foreach arg: dlb_pmd_args
+		opt_args = arg.split(':')
+		if opt == opt_args[0]
+			if opt_args[1] == 'enable' or opt_args[1] == '1'
+				val = '1'
+			elif opt_args[1] == 'disable' or opt_args[1] == '0'
+				val = '0'
+			else
+				err_str = 'Invalid DLB pmd option value: ' + arg + ' Valid values=enable/1/disable/0'
+				error(err_str)
+			endif
+			break
+		endif
+	endforeach
+	cflags += '-D' + dlb_pmd_defines[index] + '=' + val
+	index = index + 1
+endforeach
diff --git a/meson_options.txt b/meson_options.txt
index e49b2fc089..78b1e849f0 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -12,6 +12,8 @@ option('disable_drivers', type: 'string', value: '', description:
        'Comma-separated list of drivers to explicitly disable.')
 option('disable_libs', type: 'string', value: '', description:
        'Comma-separated list of optional libraries to explicitly disable. [NOTE: mandatory libs cannot be disabled]')
+option('dlb_pmd_args', type: 'string', value: '', description:
+       'Comma-separated list of DLB PMD arguments in option:value format')
 option('drivers_install_subdir', type: 'string', value: 'dpdk/pmds-<VERSION>', description:
        'Subdirectory of libdir where to install PMDs. Defaults to using a versioned subdirectory.')
 option('enable_docs', type: 'boolean', value: false, description:
-- 
2.25.1


  parent reply	other threads:[~2024-06-19 21:01 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-05-01 19:46 [PATCH v4 0/3] DLB2 Enhancements Abdullah Sevincer
2024-05-01 19:46 ` [PATCH v4 1/3] event/dlb2: add support for HW delayed token Abdullah Sevincer
2024-05-27 15:19   ` Jerin Jacob
2024-06-19 21:01   ` [PATCH v5 0/5] DLB2 Enhancements Abdullah Sevincer
2024-06-19 21:01     ` [PATCH v5 1/5] event/dlb2: add support for HW delayed token Abdullah Sevincer
2024-06-20 12:01       ` Jerin Jacob
2024-06-19 21:01     ` [PATCH v5 2/5] event/dlb2: add support for dynamic HL entries Abdullah Sevincer
2024-06-19 21:01     ` Abdullah Sevincer [this message]
2024-06-20 12:09       ` [PATCH v5 3/5] event/dlb2: enhance DLB credit handling Jerin Jacob
2024-06-26  0:26         ` Sevincer, Abdullah
2024-06-26  9:37           ` Jerin Jacob
2024-06-19 21:01     ` [PATCH v5 4/5] doc: update DLB2 documentation Abdullah Sevincer
2024-06-19 21:01     ` [PATCH v5 5/5] doc: update release notes for 24.07 Abdullah Sevincer
2024-06-20  7:02       ` David Marchand
2024-05-01 19:46 ` [PATCH v4 2/3] event/dlb2: add support for dynamic HL entries Abdullah Sevincer
2024-05-27 15:23   ` Jerin Jacob
2024-05-01 19:46 ` [PATCH v4 3/3] event/dlb2: enhance DLB credit handling Abdullah Sevincer
2024-05-27 15:30   ` Jerin Jacob
2024-06-04 18:22     ` Sevincer, Abdullah
2024-06-05  4:02       ` Jerin Jacob
2024-06-19 21:07         ` Sevincer, Abdullah
2024-05-02  7:34 ` [PATCH v4 0/3] DLB2 Enhancements Bruce Richardson
2024-05-02 15:52   ` Sevincer, Abdullah

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240619210106.253239-4-abdullah.sevincer@intel.com \
    --to=abdullah.sevincer@intel.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=mike.ximing.chen@intel.com \
    --cc=pravin.pathak@intel.com \
    --cc=shivani.doneria@intel.com \
    --cc=tirthendu.sarkar@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).