From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 3FEC6A0524;
	Tue, 13 Apr 2021 22:18:19 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id B2728161339;
	Tue, 13 Apr 2021 22:16:37 +0200 (CEST)
Received: from mga03.intel.com (mga03.intel.com [134.134.136.65])
 by mails.dpdk.org (Postfix) with ESMTP id 019461612CE
 for <dev@dpdk.org>; Tue, 13 Apr 2021 22:16:17 +0200 (CEST)
IronPort-SDR: us+MgMebcwxwDSnUMW4R+6NgeZvX8tnOmMdc/PqURwsJg13qyofDjFVvyB5Fapn6hyKHCns9w7
 Tfv9ODDD681w==
X-IronPort-AV: E=McAfee;i="6200,9189,9953"; a="194519719"
X-IronPort-AV: E=Sophos;i="5.82,220,1613462400"; d="scan'208";a="194519719"
Received: from orsmga008.jf.intel.com ([10.7.209.65])
 by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 13 Apr 2021 13:16:17 -0700
IronPort-SDR: aUWK/TzpBbAPqXIifdZRMAyWPgIDPftvju0XBV7iK4p1U42yZyyHCFZH/gvFbBPmOhdQuDpZ+6
 js5rJdnl3NYw==
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.82,220,1613462400"; d="scan'208";a="424406543"
Received: from txasoft-yocto.an.intel.com ([10.123.72.192])
 by orsmga008.jf.intel.com with ESMTP; 13 Apr 2021 13:16:16 -0700
From: Timothy McDaniel <timothy.mcdaniel@intel.com>
To: 
Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,
 harry.van.haaren@intel.com, jerinj@marvell.com, thomas@monjalon.net
Date: Tue, 13 Apr 2021 15:14:44 -0500
Message-Id: <1618344896-2090-15-git-send-email-timothy.mcdaniel@intel.com>
X-Mailer: git-send-email 1.7.10
In-Reply-To: <1618344896-2090-1-git-send-email-timothy.mcdaniel@intel.com>
References: <20210316221857.2254-2-timothy.mcdaniel@intel.com>
 <1618344896-2090-1-git-send-email-timothy.mcdaniel@intel.com>
Subject: [dpdk-dev] [PATCH v3 14/26] event/dlb2: add v2.5 credit scheme
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

DLB v2.5 uses a different credit scheme than was used in DLB v2.0 .
Specifically, there is a single credit pool for both load balanced
and directed traffic, instead of a separate pool for each as is
found with DLB v2.0.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb2/dlb2.c | 311 ++++++++++++++++++++++++++------------
 1 file changed, 212 insertions(+), 99 deletions(-)

diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 0048f6a1b..cc6495b76 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -436,8 +436,13 @@ dlb2_eventdev_info_get(struct rte_eventdev *dev,
 	 */
 	evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
 	evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
-	evdev_dlb2_default_info.max_num_events += dlb2->max_ldb_credits;
-
+	if (dlb2->version == DLB2_HW_V2_5) {
+		evdev_dlb2_default_info.max_num_events +=
+			dlb2->max_credits;
+	} else {
+		evdev_dlb2_default_info.max_num_events +=
+			dlb2->max_ldb_credits;
+	}
 	evdev_dlb2_default_info.max_event_queues =
 		RTE_MIN(evdev_dlb2_default_info.max_event_queues,
 			RTE_EVENT_MAX_QUEUES_PER_DEV);
@@ -451,7 +456,8 @@ dlb2_eventdev_info_get(struct rte_eventdev *dev,
 
 static int
 dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
-			    const struct dlb2_hw_rsrcs *resources_asked)
+			    const struct dlb2_hw_rsrcs *resources_asked,
+			    uint8_t device_version)
 {
 	int ret = 0;
 	struct dlb2_create_sched_domain_args *cfg;
@@ -468,8 +474,10 @@ dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
 	/* DIR ports and queues */
 
 	cfg->num_dir_ports = resources_asked->num_dir_ports;
-
-	cfg->num_dir_credits = resources_asked->num_dir_credits;
+	if (device_version == DLB2_HW_V2_5)
+		cfg->num_credits = resources_asked->num_credits;
+	else
+		cfg->num_dir_credits = resources_asked->num_dir_credits;
 
 	/* LDB queues */
 
@@ -509,8 +517,8 @@ dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
 		break;
 	}
 
-	cfg->num_ldb_credits =
-		resources_asked->num_ldb_credits;
+	if (device_version == DLB2_HW_V2)
+		cfg->num_ldb_credits = resources_asked->num_ldb_credits;
 
 	cfg->num_atomic_inflights =
 		DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
@@ -519,14 +527,24 @@ dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
 	cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
 		DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
 
-	DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
-		     cfg->num_ldb_queues,
-		     resources_asked->num_ldb_ports,
-		     cfg->num_dir_ports,
-		     cfg->num_atomic_inflights,
-		     cfg->num_hist_list_entries,
-		     cfg->num_ldb_credits,
-		     cfg->num_dir_credits);
+	if (device_version == DLB2_HW_V2_5) {
+		DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, credits=%d\n",
+			     cfg->num_ldb_queues,
+			     resources_asked->num_ldb_ports,
+			     cfg->num_dir_ports,
+			     cfg->num_atomic_inflights,
+			     cfg->num_hist_list_entries,
+			     cfg->num_credits);
+	} else {
+		DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
+			     cfg->num_ldb_queues,
+			     resources_asked->num_ldb_ports,
+			     cfg->num_dir_ports,
+			     cfg->num_atomic_inflights,
+			     cfg->num_hist_list_entries,
+			     cfg->num_ldb_credits,
+			     cfg->num_dir_credits);
+	}
 
 	/* Configure the QM */
 
@@ -606,7 +624,6 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev)
 	 */
 	if (dlb2->configured) {
 		dlb2_hw_reset_sched_domain(dev, true);
-
 		ret = dlb2_hw_query_resources(dlb2);
 		if (ret) {
 			DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
@@ -665,20 +682,26 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev)
 	/* 1 dir queue per dir port */
 	rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
 
-	/* Scale down nb_events_limit by 4 for directed credits, since there
-	 * are 4x as many load-balanced credits.
-	 */
-	rsrcs->num_ldb_credits = 0;
-	rsrcs->num_dir_credits = 0;
+	if (dlb2->version == DLB2_HW_V2_5) {
+		rsrcs->num_credits = 0;
+		if (rsrcs->num_ldb_queues || rsrcs->num_dir_ports)
+			rsrcs->num_credits = config->nb_events_limit;
+	} else {
+		/* Scale down nb_events_limit by 4 for directed credits,
+		 * since there are 4x as many load-balanced credits.
+		 */
+		rsrcs->num_ldb_credits = 0;
+		rsrcs->num_dir_credits = 0;
 
-	if (rsrcs->num_ldb_queues)
-		rsrcs->num_ldb_credits = config->nb_events_limit;
-	if (rsrcs->num_dir_ports)
-		rsrcs->num_dir_credits = config->nb_events_limit / 4;
-	if (dlb2->num_dir_credits_override != -1)
-		rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
+		if (rsrcs->num_ldb_queues)
+			rsrcs->num_ldb_credits = config->nb_events_limit;
+		if (rsrcs->num_dir_ports)
+			rsrcs->num_dir_credits = config->nb_events_limit / 4;
+		if (dlb2->num_dir_credits_override != -1)
+			rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
+	}
 
-	if (dlb2_hw_create_sched_domain(handle, rsrcs) < 0) {
+	if (dlb2_hw_create_sched_domain(handle, rsrcs, dlb2->version) < 0) {
 		DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
 		return -ENODEV;
 	}
@@ -693,10 +716,15 @@ dlb2_eventdev_configure(const struct rte_eventdev *dev)
 	dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
 	dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
 	dlb2->num_dir_queues = dlb2->num_dir_ports;
-	dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
-	dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
-	dlb2->dir_credit_pool = rsrcs->num_dir_credits;
-	dlb2->max_dir_credits = rsrcs->num_dir_credits;
+	if (dlb2->version == DLB2_HW_V2_5) {
+		dlb2->credit_pool = rsrcs->num_credits;
+		dlb2->max_credits = rsrcs->num_credits;
+	} else {
+		dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
+		dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
+		dlb2->dir_credit_pool = rsrcs->num_dir_credits;
+		dlb2->max_dir_credits = rsrcs->num_dir_credits;
+	}
 
 	dlb2->configured = true;
 
@@ -1170,8 +1198,9 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
 	struct dlb2_port *qm_port = NULL;
 	char mz_name[RTE_MEMZONE_NAMESIZE];
 	uint32_t qm_port_id;
-	uint16_t ldb_credit_high_watermark;
-	uint16_t dir_credit_high_watermark;
+	uint16_t ldb_credit_high_watermark = 0;
+	uint16_t dir_credit_high_watermark = 0;
+	uint16_t credit_high_watermark = 0;
 
 	if (handle == NULL)
 		return -EINVAL;
@@ -1206,15 +1235,18 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
 	/* User controls the LDB high watermark via enqueue depth. The DIR high
 	 * watermark is equal, unless the directed credit pool is too small.
 	 */
-	ldb_credit_high_watermark = enqueue_depth;
-
-	/* If there are no directed ports, the kernel driver will ignore this
-	 * port's directed credit settings. Don't use enqueue_depth if it would
-	 * require more directed credits than are available.
-	 */
-	dir_credit_high_watermark =
-		RTE_MIN(enqueue_depth,
-			handle->cfg.num_dir_credits / dlb2->num_ports);
+	if (dlb2->version == DLB2_HW_V2) {
+		ldb_credit_high_watermark = enqueue_depth;
+		/* If there are no directed ports, the kernel driver will
+		 * ignore this port's directed credit settings. Don't use
+		 * enqueue_depth if it would require more directed credits
+		 * than are available.
+		 */
+		dir_credit_high_watermark =
+			RTE_MIN(enqueue_depth,
+				handle->cfg.num_dir_credits / dlb2->num_ports);
+	} else
+		credit_high_watermark = enqueue_depth;
 
 	/* Per QM values */
 
@@ -1249,8 +1281,12 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
 
 	qm_port->id = qm_port_id;
 
-	qm_port->cached_ldb_credits = 0;
-	qm_port->cached_dir_credits = 0;
+	if (dlb2->version == DLB2_HW_V2) {
+		qm_port->cached_ldb_credits = 0;
+		qm_port->cached_dir_credits = 0;
+	} else
+		qm_port->cached_credits = 0;
+
 	/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
 	 * the effective depth is smaller.
 	 */
@@ -1298,17 +1334,26 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
 	qm_port->state = PORT_STARTED; /* enabled at create time */
 	qm_port->config_state = DLB2_CONFIGURED;
 
-	qm_port->dir_credits = dir_credit_high_watermark;
-	qm_port->ldb_credits = ldb_credit_high_watermark;
-	qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
-	qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
-
-	DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
-		     qm_port_id,
-		     dequeue_depth,
-		     qm_port->ldb_credits,
-		     qm_port->dir_credits);
+	if (dlb2->version == DLB2_HW_V2) {
+		qm_port->dir_credits = dir_credit_high_watermark;
+		qm_port->ldb_credits = ldb_credit_high_watermark;
+		qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
+		qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
+
+		DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
+			     qm_port_id,
+			     dequeue_depth,
+			     qm_port->ldb_credits,
+			     qm_port->dir_credits);
+	} else {
+		qm_port->credits = credit_high_watermark;
+		qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
 
+		DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, credits=%d\n",
+			     qm_port_id,
+			     dequeue_depth,
+			     qm_port->credits);
+	}
 	rte_spinlock_unlock(&handle->resource_lock);
 
 	return 0;
@@ -1356,8 +1401,9 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
 	struct dlb2_port *qm_port = NULL;
 	char mz_name[RTE_MEMZONE_NAMESIZE];
 	uint32_t qm_port_id;
-	uint16_t ldb_credit_high_watermark;
-	uint16_t dir_credit_high_watermark;
+	uint16_t ldb_credit_high_watermark = 0;
+	uint16_t dir_credit_high_watermark = 0;
+	uint16_t credit_high_watermark = 0;
 
 	if (dlb2 == NULL || handle == NULL)
 		return -EINVAL;
@@ -1386,14 +1432,16 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
 	/* User controls the LDB high watermark via enqueue depth. The DIR high
 	 * watermark is equal, unless the directed credit pool is too small.
 	 */
-	ldb_credit_high_watermark = enqueue_depth;
-
-	/* Don't use enqueue_depth if it would require more directed credits
-	 * than are available.
-	 */
-	dir_credit_high_watermark =
-		RTE_MIN(enqueue_depth,
-			handle->cfg.num_dir_credits / dlb2->num_ports);
+	if (dlb2->version == DLB2_HW_V2) {
+		ldb_credit_high_watermark = enqueue_depth;
+		/* Don't use enqueue_depth if it would require more directed
+		 * credits than are available.
+		 */
+		dir_credit_high_watermark =
+			RTE_MIN(enqueue_depth,
+				handle->cfg.num_dir_credits / dlb2->num_ports);
+	} else
+		credit_high_watermark = enqueue_depth;
 
 	/* Per QM values */
 
@@ -1430,8 +1478,12 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
 
 	qm_port->id = qm_port_id;
 
-	qm_port->cached_ldb_credits = 0;
-	qm_port->cached_dir_credits = 0;
+	if (dlb2->version == DLB2_HW_V2) {
+		qm_port->cached_ldb_credits = 0;
+		qm_port->cached_dir_credits = 0;
+	} else
+		qm_port->cached_credits = 0;
+
 	/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
 	 * the effective depth is smaller.
 	 */
@@ -1467,17 +1519,26 @@ dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
 	qm_port->state = PORT_STARTED; /* enabled at create time */
 	qm_port->config_state = DLB2_CONFIGURED;
 
-	qm_port->dir_credits = dir_credit_high_watermark;
-	qm_port->ldb_credits = ldb_credit_high_watermark;
-	qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
-	qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
-
-	DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
-		     qm_port_id,
-		     dequeue_depth,
-		     dir_credit_high_watermark,
-		     ldb_credit_high_watermark);
+	if (dlb2->version == DLB2_HW_V2) {
+		qm_port->dir_credits = dir_credit_high_watermark;
+		qm_port->ldb_credits = ldb_credit_high_watermark;
+		qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
+		qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
+
+		DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
+			     qm_port_id,
+			     dequeue_depth,
+			     dir_credit_high_watermark,
+			     ldb_credit_high_watermark);
+	} else {
+		qm_port->credits = credit_high_watermark;
+		qm_port->credit_pool[DLB2_COMBINED_POOL] = &dlb2->credit_pool;
 
+		DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d\n",
+			     qm_port_id,
+			     dequeue_depth,
+			     credit_high_watermark);
+	}
 	rte_spinlock_unlock(&handle->resource_lock);
 
 	return 0;
@@ -2297,6 +2358,24 @@ dlb2_check_enqueue_hw_dir_credits(struct dlb2_port *qm_port)
 	return 0;
 }
 
+static inline int
+dlb2_check_enqueue_hw_credits(struct dlb2_port *qm_port)
+{
+	if (unlikely(qm_port->cached_credits == 0)) {
+		qm_port->cached_credits =
+			dlb2_port_credits_get(qm_port,
+					      DLB2_COMBINED_POOL);
+		if (unlikely(qm_port->cached_credits == 0)) {
+			DLB2_INC_STAT(
+			qm_port->ev_port->stats.traffic.tx_nospc_hw_credits, 1);
+			DLB2_LOG_DBG("credits exhausted\n");
+			return 1; /* credits exhausted */
+		}
+	}
+
+	return 0;
+}
+
 static __rte_always_inline void
 dlb2_pp_write(struct dlb2_enqueue_qe *qe4,
 	      struct process_local_port_data *port_data)
@@ -2565,12 +2644,19 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
 	if (!qm_queue->is_directed) {
 		/* Load balanced destination queue */
 
-		if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
-			rte_errno = -ENOSPC;
-			return 1;
+		if (dlb2->version == DLB2_HW_V2) {
+			if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
+				rte_errno = -ENOSPC;
+				return 1;
+			}
+			cached_credits = &qm_port->cached_ldb_credits;
+		} else {
+			if (dlb2_check_enqueue_hw_credits(qm_port)) {
+				rte_errno = -ENOSPC;
+				return 1;
+			}
+			cached_credits = &qm_port->cached_credits;
 		}
-		cached_credits = &qm_port->cached_ldb_credits;
-
 		switch (ev->sched_type) {
 		case RTE_SCHED_TYPE_ORDERED:
 			DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED\n");
@@ -2602,12 +2688,19 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
 	} else {
 		/* Directed destination queue */
 
-		if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
-			rte_errno = -ENOSPC;
-			return 1;
+		if (dlb2->version == DLB2_HW_V2) {
+			if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
+				rte_errno = -ENOSPC;
+				return 1;
+			}
+			cached_credits = &qm_port->cached_dir_credits;
+		} else {
+			if (dlb2_check_enqueue_hw_credits(qm_port)) {
+				rte_errno = -ENOSPC;
+				return 1;
+			}
+			cached_credits = &qm_port->cached_credits;
 		}
-		cached_credits = &qm_port->cached_dir_credits;
-
 		DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
 
 		*sched_type = DLB2_SCHED_DIRECTED;
@@ -2891,20 +2984,40 @@ dlb2_port_credits_inc(struct dlb2_port *qm_port, int num)
 
 	/* increment port credits, and return to pool if exceeds threshold */
 	if (!qm_port->is_directed) {
-		qm_port->cached_ldb_credits += num;
-		if (qm_port->cached_ldb_credits >= 2 * batch_size) {
-			__atomic_fetch_add(
-				qm_port->credit_pool[DLB2_LDB_QUEUE],
-				batch_size, __ATOMIC_SEQ_CST);
-			qm_port->cached_ldb_credits -= batch_size;
+		if (qm_port->dlb2->version == DLB2_HW_V2) {
+			qm_port->cached_ldb_credits += num;
+			if (qm_port->cached_ldb_credits >= 2 * batch_size) {
+				__atomic_fetch_add(
+					qm_port->credit_pool[DLB2_LDB_QUEUE],
+					batch_size, __ATOMIC_SEQ_CST);
+				qm_port->cached_ldb_credits -= batch_size;
+			}
+		} else {
+			qm_port->cached_credits += num;
+			if (qm_port->cached_credits >= 2 * batch_size) {
+				__atomic_fetch_add(
+				      qm_port->credit_pool[DLB2_COMBINED_POOL],
+				      batch_size, __ATOMIC_SEQ_CST);
+				qm_port->cached_credits -= batch_size;
+			}
 		}
 	} else {
-		qm_port->cached_dir_credits += num;
-		if (qm_port->cached_dir_credits >= 2 * batch_size) {
-			__atomic_fetch_add(
-				qm_port->credit_pool[DLB2_DIR_QUEUE],
-				batch_size, __ATOMIC_SEQ_CST);
-			qm_port->cached_dir_credits -= batch_size;
+		if (qm_port->dlb2->version == DLB2_HW_V2) {
+			qm_port->cached_dir_credits += num;
+			if (qm_port->cached_dir_credits >= 2 * batch_size) {
+				__atomic_fetch_add(
+					qm_port->credit_pool[DLB2_DIR_QUEUE],
+					batch_size, __ATOMIC_SEQ_CST);
+				qm_port->cached_dir_credits -= batch_size;
+			}
+		} else {
+			qm_port->cached_credits += num;
+			if (qm_port->cached_credits >= 2 * batch_size) {
+				__atomic_fetch_add(
+				      qm_port->credit_pool[DLB2_COMBINED_POOL],
+				      batch_size, __ATOMIC_SEQ_CST);
+				qm_port->cached_credits -= batch_size;
+			}
 		}
 	}
 }
-- 
2.23.0