DPDK patches and discussions
 help / color / mirror / Atom feed
From: Timothy McDaniel <timothy.mcdaniel@intel.com>
Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,
	harry.van.haaren@intel.com, jerinj@marvell.com,
	thomas@monjalon.net
Subject: [dpdk-dev] [PATCH v3 06/26] event/dlb2: add v2.5 domain reset
Date: Tue, 13 Apr 2021 15:14:36 -0500	[thread overview]
Message-ID: <1618344896-2090-7-git-send-email-timothy.mcdaniel@intel.com> (raw)
In-Reply-To: <1618344896-2090-1-git-send-email-timothy.mcdaniel@intel.com>

Convert to new register map and new register access
macros.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 .../event/dlb2/pf/base/dlb2_hw_types_new.h    |    1 +
 drivers/event/dlb2/pf/base/dlb2_resource.c    | 1494 ----------
 .../event/dlb2/pf/base/dlb2_resource_new.c    | 2562 +++++++++++++++++
 3 files changed, 2563 insertions(+), 1494 deletions(-)

diff --git a/drivers/event/dlb2/pf/base/dlb2_hw_types_new.h b/drivers/event/dlb2/pf/base/dlb2_hw_types_new.h
index 4a4185acd..4a6037775 100644
--- a/drivers/event/dlb2/pf/base/dlb2_hw_types_new.h
+++ b/drivers/event/dlb2/pf/base/dlb2_hw_types_new.h
@@ -181,6 +181,7 @@ struct dlb2_ldb_port {
 	u32 hist_list_entry_base;
 	u32 hist_list_entry_limit;
 	u32 ref_cnt;
+	u8 cq_depth;
 	u8 init_tkn_cnt;
 	u8 num_pending_removals;
 	u8 num_mappings;
diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c
index 99c3d031d..041aeaeee 100644
--- a/drivers/event/dlb2/pf/base/dlb2_resource.c
+++ b/drivers/event/dlb2/pf/base/dlb2_resource.c
@@ -65,69 +65,6 @@ static inline void dlb2_flush_csr(struct dlb2_hw *hw)
 	DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS);
 }
 
-static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
-				     struct dlb2_dir_pq_pair *port)
-{
-	union dlb2_lsp_cq_dir_dsbl reg;
-
-	reg.field.disabled = 1;
-
-	DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
-
-	dlb2_flush_csr(hw);
-}
-
-static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
-				   struct dlb2_dir_pq_pair *port)
-{
-	union dlb2_lsp_cq_dir_tkn_cnt r0;
-
-	r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id));
-
-	/*
-	 * Account for the initial token count, which is used in order to
-	 * provide a CQ with depth less than 8.
-	 */
-
-	return r0.field.count - port->init_tkn_cnt;
-}
-
-static int dlb2_drain_dir_cq(struct dlb2_hw *hw,
-			     struct dlb2_dir_pq_pair *port)
-{
-	unsigned int port_id = port->id.phys_id;
-	u32 cnt;
-
-	/* Return any outstanding tokens */
-	cnt = dlb2_dir_cq_token_count(hw, port);
-
-	if (cnt != 0) {
-		struct dlb2_hcw hcw_mem[8], *hcw;
-		void  *pp_addr;
-
-		pp_addr = os_map_producer_port(hw, port_id, false);
-
-		/* Point hcw to a 64B-aligned location */
-		hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
-
-		/*
-		 * Program the first HCW for a batch token return and
-		 * the rest as NOOPS
-		 */
-		memset(hcw, 0, 4 * sizeof(*hcw));
-		hcw->cq_token = 1;
-		hcw->lock_id = cnt - 1;
-
-		dlb2_movdir64b(pp_addr, hcw);
-
-		os_fence_hcw(hw, pp_addr);
-
-		os_unmap_producer_port(hw, pp_addr);
-	}
-
-	return 0;
-}
-
 static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
 				    struct dlb2_dir_pq_pair *port)
 {
@@ -140,37 +77,6 @@ static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
 	dlb2_flush_csr(hw);
 }
 
-static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
-				     struct dlb2_hw_domain *domain,
-				     bool toggle_port)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_dir_pq_pair *port;
-	int ret;
-	RTE_SET_USED(iter);
-
-	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
-		/*
-		 * Can't drain a port if it's not configured, and there's
-		 * nothing to drain if its queue is unconfigured.
-		 */
-		if (!port->port_configured || !port->queue_configured)
-			continue;
-
-		if (toggle_port)
-			dlb2_dir_port_cq_disable(hw, port);
-
-		ret = dlb2_drain_dir_cq(hw, port);
-		if (ret < 0)
-			return ret;
-
-		if (toggle_port)
-			dlb2_dir_port_cq_enable(hw, port);
-	}
-
-	return 0;
-}
-
 static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
 				struct dlb2_dir_pq_pair *queue)
 {
@@ -182,63 +88,6 @@ static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
 	return r0.field.count;
 }
 
-static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
-				    struct dlb2_dir_pq_pair *queue)
-{
-	return dlb2_dir_queue_depth(hw, queue) == 0;
-}
-
-static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
-					 struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_dir_pq_pair *queue;
-	RTE_SET_USED(iter);
-
-	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
-		if (!dlb2_dir_queue_is_empty(hw, queue))
-			return false;
-	}
-
-	return true;
-}
-
-static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
-					struct dlb2_hw_domain *domain)
-{
-	int i, ret;
-
-	/* If the domain hasn't been started, there's no traffic to drain */
-	if (!domain->started)
-		return 0;
-
-	for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
-		ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
-		if (ret < 0)
-			return ret;
-
-		if (dlb2_domain_dir_queues_empty(hw, domain))
-			break;
-	}
-
-	if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
-		DLB2_HW_ERR(hw,
-			    "[%s()] Internal error: failed to empty queues\n",
-			    __func__);
-		return -EFAULT;
-	}
-
-	/*
-	 * Drain the CQs one more time. For the queues to go empty, they would
-	 * have scheduled one or more QEs.
-	 */
-	ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
 static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
 				    struct dlb2_ldb_port *port)
 {
@@ -271,105 +120,6 @@ static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
 	dlb2_flush_csr(hw);
 }
 
-static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
-				      struct dlb2_ldb_port *port)
-{
-	union dlb2_lsp_cq_ldb_infl_cnt r0;
-
-	r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
-
-	return r0.field.count;
-}
-
-static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
-				   struct dlb2_ldb_port *port)
-{
-	union dlb2_lsp_cq_ldb_tkn_cnt r0;
-
-	r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id));
-
-	/*
-	 * Account for the initial token count, which is used in order to
-	 * provide a CQ with depth less than 8.
-	 */
-
-	return r0.field.token_count - port->init_tkn_cnt;
-}
-
-static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
-{
-	u32 infl_cnt, tkn_cnt;
-	unsigned int i;
-
-	infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
-	tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
-
-	if (infl_cnt || tkn_cnt) {
-		struct dlb2_hcw hcw_mem[8], *hcw;
-		void  *pp_addr;
-
-		pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
-
-		/* Point hcw to a 64B-aligned location */
-		hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
-
-		/*
-		 * Program the first HCW for a completion and token return and
-		 * the other HCWs as NOOPS
-		 */
-
-		memset(hcw, 0, 4 * sizeof(*hcw));
-		hcw->qe_comp = (infl_cnt > 0);
-		hcw->cq_token = (tkn_cnt > 0);
-		hcw->lock_id = tkn_cnt - 1;
-
-		/* Return tokens in the first HCW */
-		dlb2_movdir64b(pp_addr, hcw);
-
-		hcw->cq_token = 0;
-
-		/* Issue remaining completions (if any) */
-		for (i = 1; i < infl_cnt; i++)
-			dlb2_movdir64b(pp_addr, hcw);
-
-		os_fence_hcw(hw, pp_addr);
-
-		os_unmap_producer_port(hw, pp_addr);
-	}
-
-	return 0;
-}
-
-static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
-				     struct dlb2_hw_domain *domain,
-				     bool toggle_port)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_ldb_port *port;
-	int ret, i;
-	RTE_SET_USED(iter);
-
-	/* If the domain hasn't been started, there's no traffic to drain */
-	if (!domain->started)
-		return 0;
-
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
-			if (toggle_port)
-				dlb2_ldb_port_cq_disable(hw, port);
-
-			ret = dlb2_drain_ldb_cq(hw, port);
-			if (ret < 0)
-				return ret;
-
-			if (toggle_port)
-				dlb2_ldb_port_cq_enable(hw, port);
-		}
-	}
-
-	return 0;
-}
-
 static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
 				struct dlb2_ldb_queue *queue)
 {
@@ -388,90 +138,6 @@ static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
 	return r0.field.count + r1.field.count + r2.field.count;
 }
 
-static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
-				    struct dlb2_ldb_queue *queue)
-{
-	return dlb2_ldb_queue_depth(hw, queue) == 0;
-}
-
-static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
-					    struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_ldb_queue *queue;
-	RTE_SET_USED(iter);
-
-	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
-		if (queue->num_mappings == 0)
-			continue;
-
-		if (!dlb2_ldb_queue_is_empty(hw, queue))
-			return false;
-	}
-
-	return true;
-}
-
-static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
-					   struct dlb2_hw_domain *domain)
-{
-	int i, ret;
-
-	/* If the domain hasn't been started, there's no traffic to drain */
-	if (!domain->started)
-		return 0;
-
-	if (domain->num_pending_removals > 0) {
-		DLB2_HW_ERR(hw,
-			    "[%s()] Internal error: failed to unmap domain queues\n",
-			    __func__);
-		return -EFAULT;
-	}
-
-	for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
-		ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
-		if (ret < 0)
-			return ret;
-
-		if (dlb2_domain_mapped_queues_empty(hw, domain))
-			break;
-	}
-
-	if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
-		DLB2_HW_ERR(hw,
-			    "[%s()] Internal error: failed to empty queues\n",
-			    __func__);
-		return -EFAULT;
-	}
-
-	/*
-	 * Drain the CQs one more time. For the queues to go empty, they would
-	 * have scheduled one or more QEs.
-	 */
-	ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
-				       struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_ldb_port *port;
-	int i;
-	RTE_SET_USED(iter);
-
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
-			port->enabled = true;
-
-			dlb2_ldb_port_cq_enable(hw, port);
-		}
-	}
-}
-
 static struct dlb2_ldb_queue *
 dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
 			   u32 id,
@@ -1455,1166 +1121,6 @@ dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
 	return domain->num_pending_removals;
 }
 
-static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
-					struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_ldb_port *port;
-	int i;
-	RTE_SET_USED(iter);
-
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
-			port->enabled = false;
-
-			dlb2_ldb_port_cq_disable(hw, port);
-		}
-	}
-}
-
-static void dlb2_log_reset_domain(struct dlb2_hw *hw,
-				  u32 domain_id,
-				  bool vdev_req,
-				  unsigned int vdev_id)
-{
-	DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
-	if (vdev_req)
-		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
-	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
-}
-
-static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
-					 struct dlb2_hw_domain *domain,
-					 unsigned int vdev_id)
-{
-	struct dlb2_list_entry *iter;
-	union dlb2_sys_vf_dir_vpp_v r1;
-	struct dlb2_dir_pq_pair *port;
-	RTE_SET_USED(iter);
-
-	r1.field.vpp_v = 0;
-
-	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
-		unsigned int offs;
-		u32 virt_id;
-
-		if (hw->virt_mode == DLB2_VIRT_SRIOV)
-			virt_id = port->id.virt_id;
-		else
-			virt_id = port->id.phys_id;
-
-		offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
-
-		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r1.val);
-	}
-}
-
-static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
-					 struct dlb2_hw_domain *domain,
-					 unsigned int vdev_id)
-{
-	struct dlb2_list_entry *iter;
-	union dlb2_sys_vf_ldb_vpp_v r1;
-	struct dlb2_ldb_port *port;
-	int i;
-	RTE_SET_USED(iter);
-
-	r1.field.vpp_v = 0;
-
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
-			unsigned int offs;
-			u32 virt_id;
-
-			if (hw->virt_mode == DLB2_VIRT_SRIOV)
-				virt_id = port->id.virt_id;
-			else
-				virt_id = port->id.phys_id;
-
-			offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
-
-			DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r1.val);
-		}
-	}
-}
-
-static void
-dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
-					struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	union dlb2_chp_ldb_cq_int_enb r0 = { {0} };
-	union dlb2_chp_ldb_cq_wd_enb r1 = { {0} };
-	struct dlb2_ldb_port *port;
-	int i;
-	RTE_SET_USED(iter);
-
-	r0.field.en_tim = 0;
-	r0.field.en_depth = 0;
-
-	r1.field.wd_enable = 0;
-
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
-			DLB2_CSR_WR(hw,
-				    DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
-				    r0.val);
-
-			DLB2_CSR_WR(hw,
-				    DLB2_CHP_LDB_CQ_WD_ENB(port->id.phys_id),
-				    r1.val);
-		}
-	}
-}
-
-static void
-dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
-					struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	union dlb2_chp_dir_cq_int_enb r0 = { {0} };
-	union dlb2_chp_dir_cq_wd_enb r1 = { {0} };
-	struct dlb2_dir_pq_pair *port;
-	RTE_SET_USED(iter);
-
-	r0.field.en_tim = 0;
-	r0.field.en_depth = 0;
-
-	r1.field.wd_enable = 0;
-
-	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
-		DLB2_CSR_WR(hw,
-			    DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
-			    r0.val);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_CHP_DIR_CQ_WD_ENB(port->id.phys_id),
-			    r1.val);
-	}
-}
-
-static void
-dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
-					  struct dlb2_hw_domain *domain)
-{
-	int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
-	struct dlb2_list_entry *iter;
-	struct dlb2_ldb_queue *queue;
-	RTE_SET_USED(iter);
-
-	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
-		union dlb2_sys_ldb_vasqid_v r0 = { {0} };
-		union dlb2_sys_ldb_qid2vqid r1 = { {0} };
-		union dlb2_sys_vf_ldb_vqid_v r2 = { {0} };
-		union dlb2_sys_vf_ldb_vqid2qid r3 = { {0} };
-		int idx;
-
-		idx = domain_offset + queue->id.phys_id;
-
-		DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), r0.val);
-
-		if (queue->id.vdev_owned) {
-			DLB2_CSR_WR(hw,
-				    DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
-				    r1.val);
-
-			idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
-				queue->id.virt_id;
-
-			DLB2_CSR_WR(hw,
-				    DLB2_SYS_VF_LDB_VQID_V(idx),
-				    r2.val);
-
-			DLB2_CSR_WR(hw,
-				    DLB2_SYS_VF_LDB_VQID2QID(idx),
-				    r3.val);
-		}
-	}
-}
-
-static void
-dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
-					  struct dlb2_hw_domain *domain)
-{
-	int domain_offset = domain->id.phys_id *
-		DLB2_MAX_NUM_DIR_PORTS(hw->ver);
-	struct dlb2_list_entry *iter;
-	struct dlb2_dir_pq_pair *queue;
-	RTE_SET_USED(iter);
-
-	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
-		union dlb2_sys_dir_vasqid_v r0 = { {0} };
-		union dlb2_sys_vf_dir_vqid_v r1 = { {0} };
-		union dlb2_sys_vf_dir_vqid2qid r2 = { {0} };
-		int idx;
-
-		idx = domain_offset + queue->id.phys_id;
-
-		DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), r0.val);
-
-		if (queue->id.vdev_owned) {
-			idx = queue->id.vdev_id *
-				DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
-				queue->id.virt_id;
-
-			DLB2_CSR_WR(hw,
-				    DLB2_SYS_VF_DIR_VQID_V(idx),
-				    r1.val);
-
-			DLB2_CSR_WR(hw,
-				    DLB2_SYS_VF_DIR_VQID2QID(idx),
-				    r2.val);
-		}
-	}
-}
-
-static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
-					       struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	union dlb2_chp_sn_chk_enbl r1;
-	struct dlb2_ldb_port *port;
-	int i;
-	RTE_SET_USED(iter);
-
-	r1.field.en = 0;
-
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
-			DLB2_CSR_WR(hw,
-				    DLB2_CHP_SN_CHK_ENBL(port->id.phys_id),
-				    r1.val);
-	}
-}
-
-static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
-						 struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_ldb_port *port;
-	int i;
-	RTE_SET_USED(iter);
-
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
-			int i;
-
-			for (i = 0; i < DLB2_MAX_CQ_COMP_CHECK_LOOPS; i++) {
-				if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
-					break;
-			}
-
-			if (i == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
-				DLB2_HW_ERR(hw,
-					    "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
-					    __func__, port->id.phys_id);
-				return -EFAULT;
-			}
-		}
-	}
-
-	return 0;
-}
-
-static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
-					struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_dir_pq_pair *port;
-	RTE_SET_USED(iter);
-
-	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
-		port->enabled = false;
-
-		dlb2_dir_port_cq_disable(hw, port);
-	}
-}
-
-static void
-dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
-				       struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_dir_pq_pair *port;
-	union dlb2_sys_dir_pp_v r1;
-	RTE_SET_USED(iter);
-
-	r1.field.pp_v = 0;
-
-	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
-		DLB2_CSR_WR(hw,
-			    DLB2_SYS_DIR_PP_V(port->id.phys_id),
-			    r1.val);
-}
-
-static void
-dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
-				       struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	union dlb2_sys_ldb_pp_v r1;
-	struct dlb2_ldb_port *port;
-	int i;
-	RTE_SET_USED(iter);
-
-	r1.field.pp_v = 0;
-
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
-			DLB2_CSR_WR(hw,
-				    DLB2_SYS_LDB_PP_V(port->id.phys_id),
-				    r1.val);
-	}
-}
-
-static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
-					    struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_dir_pq_pair *dir_port;
-	struct dlb2_ldb_port *ldb_port;
-	struct dlb2_ldb_queue *queue;
-	int i;
-	RTE_SET_USED(iter);
-
-	/*
-	 * Confirm that all the domain's queue's inflight counts and AQED
-	 * active counts are 0.
-	 */
-	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
-		if (!dlb2_ldb_queue_is_empty(hw, queue)) {
-			DLB2_HW_ERR(hw,
-				    "[%s()] Internal error: failed to empty ldb queue %d\n",
-				    __func__, queue->id.phys_id);
-			return -EFAULT;
-		}
-	}
-
-	/* Confirm that all the domain's CQs inflight and token counts are 0. */
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
-			if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
-			    dlb2_ldb_cq_token_count(hw, ldb_port)) {
-				DLB2_HW_ERR(hw,
-					    "[%s()] Internal error: failed to empty ldb port %d\n",
-					    __func__, ldb_port->id.phys_id);
-				return -EFAULT;
-			}
-		}
-	}
-
-	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
-		if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
-			DLB2_HW_ERR(hw,
-				    "[%s()] Internal error: failed to empty dir queue %d\n",
-				    __func__, dir_port->id.phys_id);
-			return -EFAULT;
-		}
-
-		if (dlb2_dir_cq_token_count(hw, dir_port)) {
-			DLB2_HW_ERR(hw,
-				    "[%s()] Internal error: failed to empty dir port %d\n",
-				    __func__, dir_port->id.phys_id);
-			return -EFAULT;
-		}
-	}
-
-	return 0;
-}
-
-static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
-						   struct dlb2_ldb_port *port)
-{
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
-		    DLB2_SYS_LDB_PP2VAS_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_LDB_CQ2VAS(port->id.phys_id),
-		    DLB2_CHP_LDB_CQ2VAS_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
-		    DLB2_SYS_LDB_PP2VDEV_RST);
-
-	if (port->id.vdev_owned) {
-		unsigned int offs;
-		u32 virt_id;
-
-		/*
-		 * DLB uses producer port address bits 17:12 to determine the
-		 * producer port ID. In Scalable IOV mode, PP accesses come
-		 * through the PF MMIO window for the physical producer port,
-		 * so for translation purposes the virtual and physical port
-		 * IDs are equal.
-		 */
-		if (hw->virt_mode == DLB2_VIRT_SRIOV)
-			virt_id = port->id.virt_id;
-		else
-			virt_id = port->id.phys_id;
-
-		offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
-
-		DLB2_CSR_WR(hw,
-			    DLB2_SYS_VF_LDB_VPP2PP(offs),
-			    DLB2_SYS_VF_LDB_VPP2PP_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_SYS_VF_LDB_VPP_V(offs),
-			    DLB2_SYS_VF_LDB_VPP_V_RST);
-	}
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_LDB_PP_V(port->id.phys_id),
-		    DLB2_SYS_LDB_PP_V_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id),
-		    DLB2_LSP_CQ_LDB_DSBL_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_LDB_CQ_DEPTH(port->id.phys_id),
-		    DLB2_CHP_LDB_CQ_DEPTH_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id),
-		    DLB2_LSP_CQ_LDB_INFL_LIM_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_HIST_LIST_LIM(port->id.phys_id),
-		    DLB2_CHP_HIST_LIST_LIM_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_HIST_LIST_BASE(port->id.phys_id),
-		    DLB2_CHP_HIST_LIST_BASE_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id),
-		    DLB2_CHP_HIST_LIST_POP_PTR_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),
-		    DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id.phys_id),
-		    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_LDB_CQ_TMR_THRSH(port->id.phys_id),
-		    DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
-		    DLB2_CHP_LDB_CQ_INT_ENB_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
-		    DLB2_SYS_LDB_CQ_ISR_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),
-		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),
-		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),
-		    DLB2_CHP_LDB_CQ_WPTR_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
-		    DLB2_LSP_CQ_LDB_TKN_CNT_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
-		    DLB2_SYS_LDB_CQ_ADDR_L_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
-		    DLB2_SYS_LDB_CQ_ADDR_U_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
-		    DLB2_SYS_LDB_CQ_AT_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),
-		    DLB2_SYS_LDB_CQ_PASID_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
-		    DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(port->id.phys_id),
-		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(port->id.phys_id),
-		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ2QID0(port->id.phys_id),
-		    DLB2_LSP_CQ2QID0_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ2QID1(port->id.phys_id),
-		    DLB2_LSP_CQ2QID1_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ2PRIOV(port->id.phys_id),
-		    DLB2_LSP_CQ2PRIOV_RST);
-}
-
-static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
-						 struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_ldb_port *port;
-	int i;
-	RTE_SET_USED(iter);
-
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
-			__dlb2_domain_reset_ldb_port_registers(hw, port);
-	}
-}
-
-static void
-__dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
-				       struct dlb2_dir_pq_pair *port)
-{
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
-		    DLB2_CHP_DIR_CQ2VAS_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id),
-		    DLB2_LSP_CQ_DIR_DSBL_RST);
-
-	DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_DIR_CQ_DEPTH(port->id.phys_id),
-		    DLB2_CHP_DIR_CQ_DEPTH_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id.phys_id),
-		    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_DIR_CQ_TMR_THRSH(port->id.phys_id),
-		    DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
-		    DLB2_CHP_DIR_CQ_INT_ENB_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
-		    DLB2_SYS_DIR_CQ_ISR_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),
-		    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),
-		    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),
-		    DLB2_CHP_DIR_CQ_WPTR_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
-		    DLB2_LSP_CQ_DIR_TKN_CNT_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
-		    DLB2_SYS_DIR_CQ_ADDR_L_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
-		    DLB2_SYS_DIR_CQ_ADDR_U_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
-		    DLB2_SYS_DIR_CQ_AT_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),
-		    DLB2_SYS_DIR_CQ_PASID_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
-		    DLB2_SYS_DIR_CQ_FMT_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
-		    DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(port->id.phys_id),
-		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(port->id.phys_id),
-		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
-		    DLB2_SYS_DIR_PP2VAS_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
-		    DLB2_CHP_DIR_CQ2VAS_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
-		    DLB2_SYS_DIR_PP2VDEV_RST);
-
-	if (port->id.vdev_owned) {
-		unsigned int offs;
-		u32 virt_id;
-
-		/*
-		 * DLB uses producer port address bits 17:12 to determine the
-		 * producer port ID. In Scalable IOV mode, PP accesses come
-		 * through the PF MMIO window for the physical producer port,
-		 * so for translation purposes the virtual and physical port
-		 * IDs are equal.
-		 */
-		if (hw->virt_mode == DLB2_VIRT_SRIOV)
-			virt_id = port->id.virt_id;
-		else
-			virt_id = port->id.phys_id;
-
-		offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver)
-			+ virt_id;
-
-		DLB2_CSR_WR(hw,
-			    DLB2_SYS_VF_DIR_VPP2PP(offs),
-			    DLB2_SYS_VF_DIR_VPP2PP_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_SYS_VF_DIR_VPP_V(offs),
-			    DLB2_SYS_VF_DIR_VPP_V_RST);
-	}
-
-	DLB2_CSR_WR(hw,
-		    DLB2_SYS_DIR_PP_V(port->id.phys_id),
-		    DLB2_SYS_DIR_PP_V_RST);
-}
-
-static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
-						 struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_dir_pq_pair *port;
-	RTE_SET_USED(iter);
-
-	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
-		__dlb2_domain_reset_dir_port_registers(hw, port);
-}
-
-static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
-						  struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_ldb_queue *queue;
-	RTE_SET_USED(iter);
-
-	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
-		unsigned int queue_id = queue->id.phys_id;
-		int i;
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(queue_id),
-			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(queue_id),
-			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(queue_id),
-			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(queue_id),
-			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_NALDB_MAX_DEPTH(queue_id),
-			    DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_LDB_INFL_LIM(queue_id),
-			    DLB2_LSP_QID_LDB_INFL_LIM_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_AQED_ACTIVE_LIM(queue_id),
-			    DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_ATM_DEPTH_THRSH(queue_id),
-			    DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_NALDB_DEPTH_THRSH(queue_id),
-			    DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_SYS_LDB_QID_ITS(queue_id),
-			    DLB2_SYS_LDB_QID_ITS_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_CHP_ORD_QID_SN(queue_id),
-			    DLB2_CHP_ORD_QID_SN_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_CHP_ORD_QID_SN_MAP(queue_id),
-			    DLB2_CHP_ORD_QID_SN_MAP_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_SYS_LDB_QID_V(queue_id),
-			    DLB2_SYS_LDB_QID_V_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_SYS_LDB_QID_CFG_V(queue_id),
-			    DLB2_SYS_LDB_QID_CFG_V_RST);
-
-		if (queue->sn_cfg_valid) {
-			u32 offs[2];
-
-			offs[0] = DLB2_RO_PIPE_GRP_0_SLT_SHFT(queue->sn_slot);
-			offs[1] = DLB2_RO_PIPE_GRP_1_SLT_SHFT(queue->sn_slot);
-
-			DLB2_CSR_WR(hw,
-				    offs[queue->sn_group],
-				    DLB2_RO_PIPE_GRP_0_SLT_SHFT_RST);
-		}
-
-		for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
-			DLB2_CSR_WR(hw,
-				    DLB2_LSP_QID2CQIDIX(queue_id, i),
-				    DLB2_LSP_QID2CQIDIX_00_RST);
-
-			DLB2_CSR_WR(hw,
-				    DLB2_LSP_QID2CQIDIX2(queue_id, i),
-				    DLB2_LSP_QID2CQIDIX2_00_RST);
-
-			DLB2_CSR_WR(hw,
-				    DLB2_ATM_QID2CQIDIX(queue_id, i),
-				    DLB2_ATM_QID2CQIDIX_00_RST);
-		}
-	}
-}
-
-static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
-						  struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_dir_pq_pair *queue;
-	RTE_SET_USED(iter);
-
-	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_DIR_MAX_DEPTH(queue->id.phys_id),
-			    DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(queue->id.phys_id),
-			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(queue->id.phys_id),
-			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),
-			    DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
-			    DLB2_SYS_DIR_QID_ITS_RST);
-
-		DLB2_CSR_WR(hw,
-			    DLB2_SYS_DIR_QID_V(queue->id.phys_id),
-			    DLB2_SYS_DIR_QID_V_RST);
-	}
-}
-
-static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
-					struct dlb2_hw_domain *domain)
-{
-	dlb2_domain_reset_ldb_port_registers(hw, domain);
-
-	dlb2_domain_reset_dir_port_registers(hw, domain);
-
-	dlb2_domain_reset_ldb_queue_registers(hw, domain);
-
-	dlb2_domain_reset_dir_queue_registers(hw, domain);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
-		    DLB2_CHP_CFG_LDB_VAS_CRD_RST);
-
-	DLB2_CSR_WR(hw,
-		    DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
-		    DLB2_CHP_CFG_DIR_VAS_CRD_RST);
-}
-
-static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
-					    struct dlb2_hw_domain *domain)
-{
-	struct dlb2_dir_pq_pair *tmp_dir_port;
-	struct dlb2_ldb_queue *tmp_ldb_queue;
-	struct dlb2_ldb_port *tmp_ldb_port;
-	struct dlb2_list_entry *iter1;
-	struct dlb2_list_entry *iter2;
-	struct dlb2_function_resources *rsrcs;
-	struct dlb2_dir_pq_pair *dir_port;
-	struct dlb2_ldb_queue *ldb_queue;
-	struct dlb2_ldb_port *ldb_port;
-	struct dlb2_list_head *list;
-	int ret, i;
-	RTE_SET_USED(tmp_dir_port);
-	RTE_SET_USED(tmp_ldb_queue);
-	RTE_SET_USED(tmp_ldb_port);
-	RTE_SET_USED(iter1);
-	RTE_SET_USED(iter2);
-
-	rsrcs = domain->parent_func;
-
-	/* Move the domain's ldb queues to the function's avail list */
-	list = &domain->used_ldb_queues;
-	DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
-		if (ldb_queue->sn_cfg_valid) {
-			struct dlb2_sn_group *grp;
-
-			grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
-
-			dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
-			ldb_queue->sn_cfg_valid = false;
-		}
-
-		ldb_queue->owned = false;
-		ldb_queue->num_mappings = 0;
-		ldb_queue->num_pending_additions = 0;
-
-		dlb2_list_del(&domain->used_ldb_queues,
-			      &ldb_queue->domain_list);
-		dlb2_list_add(&rsrcs->avail_ldb_queues,
-			      &ldb_queue->func_list);
-		rsrcs->num_avail_ldb_queues++;
-	}
-
-	list = &domain->avail_ldb_queues;
-	DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
-		ldb_queue->owned = false;
-
-		dlb2_list_del(&domain->avail_ldb_queues,
-			      &ldb_queue->domain_list);
-		dlb2_list_add(&rsrcs->avail_ldb_queues,
-			      &ldb_queue->func_list);
-		rsrcs->num_avail_ldb_queues++;
-	}
-
-	/* Move the domain's ldb ports to the function's avail list */
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		list = &domain->used_ldb_ports[i];
-		DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
-				       iter1, iter2) {
-			int j;
-
-			ldb_port->owned = false;
-			ldb_port->configured = false;
-			ldb_port->num_pending_removals = 0;
-			ldb_port->num_mappings = 0;
-			ldb_port->init_tkn_cnt = 0;
-			for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
-				ldb_port->qid_map[j].state =
-					DLB2_QUEUE_UNMAPPED;
-
-			dlb2_list_del(&domain->used_ldb_ports[i],
-				      &ldb_port->domain_list);
-			dlb2_list_add(&rsrcs->avail_ldb_ports[i],
-				      &ldb_port->func_list);
-			rsrcs->num_avail_ldb_ports[i]++;
-		}
-
-		list = &domain->avail_ldb_ports[i];
-		DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
-				       iter1, iter2) {
-			ldb_port->owned = false;
-
-			dlb2_list_del(&domain->avail_ldb_ports[i],
-				      &ldb_port->domain_list);
-			dlb2_list_add(&rsrcs->avail_ldb_ports[i],
-				      &ldb_port->func_list);
-			rsrcs->num_avail_ldb_ports[i]++;
-		}
-	}
-
-	/* Move the domain's dir ports to the function's avail list */
-	list = &domain->used_dir_pq_pairs;
-	DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
-		dir_port->owned = false;
-		dir_port->port_configured = false;
-		dir_port->init_tkn_cnt = 0;
-
-		dlb2_list_del(&domain->used_dir_pq_pairs,
-			      &dir_port->domain_list);
-
-		dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
-			      &dir_port->func_list);
-		rsrcs->num_avail_dir_pq_pairs++;
-	}
-
-	list = &domain->avail_dir_pq_pairs;
-	DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
-		dir_port->owned = false;
-
-		dlb2_list_del(&domain->avail_dir_pq_pairs,
-			      &dir_port->domain_list);
-
-		dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
-			      &dir_port->func_list);
-		rsrcs->num_avail_dir_pq_pairs++;
-	}
-
-	/* Return hist list entries to the function */
-	ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
-				    domain->hist_list_entry_base,
-				    domain->total_hist_list_entries);
-	if (ret) {
-		DLB2_HW_ERR(hw,
-			    "[%s()] Internal error: domain hist list base doesn't match the function's bitmap.\n",
-			    __func__);
-		return ret;
-	}
-
-	domain->total_hist_list_entries = 0;
-	domain->avail_hist_list_entries = 0;
-	domain->hist_list_entry_base = 0;
-	domain->hist_list_entry_offset = 0;
-
-	rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
-	domain->num_ldb_credits = 0;
-
-	rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
-	domain->num_dir_credits = 0;
-
-	rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
-	rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
-	domain->num_avail_aqed_entries = 0;
-	domain->num_used_aqed_entries = 0;
-
-	domain->num_pending_removals = 0;
-	domain->num_pending_additions = 0;
-	domain->configured = false;
-	domain->started = false;
-
-	/*
-	 * Move the domain out of the used_domains list and back to the
-	 * function's avail_domains list.
-	 */
-	dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
-	dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
-	rsrcs->num_avail_domains++;
-
-	return 0;
-}
-
-static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
-					    struct dlb2_hw_domain *domain,
-					    struct dlb2_ldb_queue *queue)
-{
-	struct dlb2_ldb_port *port;
-	int ret, i;
-
-	/* If a domain has LDB queues, it must have LDB ports */
-	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
-		if (!dlb2_list_empty(&domain->used_ldb_ports[i]))
-			break;
-	}
-
-	if (i == DLB2_NUM_COS_DOMAINS) {
-		DLB2_HW_ERR(hw,
-			    "[%s()] Internal error: No configured LDB ports\n",
-			    __func__);
-		return -EFAULT;
-	}
-
-	port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i], typeof(*port));
-
-	/* If necessary, free up a QID slot in this CQ */
-	if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
-		struct dlb2_ldb_queue *mapped_queue;
-
-		mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
-
-		ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
-		if (ret)
-			return ret;
-	}
-
-	ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
-	if (ret)
-		return ret;
-
-	return dlb2_domain_drain_mapped_queues(hw, domain);
-}
-
-static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
-					     struct dlb2_hw_domain *domain)
-{
-	struct dlb2_list_entry *iter;
-	struct dlb2_ldb_queue *queue;
-	int ret;
-	RTE_SET_USED(iter);
-
-	/* If the domain hasn't been started, there's no traffic to drain */
-	if (!domain->started)
-		return 0;
-
-	/*
-	 * Pre-condition: the unattached queue must not have any outstanding
-	 * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
-	 * prior to this in dlb2_domain_drain_mapped_queues().
-	 */
-	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
-		if (queue->num_mappings != 0 ||
-		    dlb2_ldb_queue_is_empty(hw, queue))
-			continue;
-
-		ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-/**
- * dlb2_reset_domain() - Reset a DLB scheduling domain and its associated
- *	hardware resources.
- * @hw:	Contains the current state of the DLB2 hardware.
- * @domain_id: Domain ID
- * @vdev_req: Request came from a virtual device.
- * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
- *
- * Note: User software *must* stop sending to this domain's producer ports
- * before invoking this function, otherwise undefined behavior will result.
- *
- * Return: returns < 0 on error, 0 otherwise.
- */
-int dlb2_reset_domain(struct dlb2_hw *hw,
-		      u32 domain_id,
-		      bool vdev_req,
-		      unsigned int vdev_id)
-{
-	struct dlb2_hw_domain *domain;
-	int ret;
-
-	dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
-
-	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
-
-	if (domain  == NULL || !domain->configured)
-		return -EINVAL;
-
-	/* Disable VPPs */
-	if (vdev_req) {
-		dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
-
-		dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
-	}
-
-	/* Disable CQ interrupts */
-	dlb2_domain_disable_dir_port_interrupts(hw, domain);
-
-	dlb2_domain_disable_ldb_port_interrupts(hw, domain);
-
-	/*
-	 * For each queue owned by this domain, disable its write permissions to
-	 * cause any traffic sent to it to be dropped. Well-behaved software
-	 * should not be sending QEs at this point.
-	 */
-	dlb2_domain_disable_dir_queue_write_perms(hw, domain);
-
-	dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
-
-	/* Turn off completion tracking on all the domain's PPs. */
-	dlb2_domain_disable_ldb_seq_checks(hw, domain);
-
-	/*
-	 * Disable the LDB CQs and drain them in order to complete the map and
-	 * unmap procedures, which require zero CQ inflights and zero QID
-	 * inflights respectively.
-	 */
-	dlb2_domain_disable_ldb_cqs(hw, domain);
-
-	ret = dlb2_domain_drain_ldb_cqs(hw, domain, false);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
-	if (ret < 0)
-		return ret;
-
-	/* Re-enable the CQs in order to drain the mapped queues. */
-	dlb2_domain_enable_ldb_cqs(hw, domain);
-
-	ret = dlb2_domain_drain_mapped_queues(hw, domain);
-	if (ret < 0)
-		return ret;
-
-	ret = dlb2_domain_drain_unmapped_queues(hw, domain);
-	if (ret < 0)
-		return ret;
-
-	/* Done draining LDB QEs, so disable the CQs. */
-	dlb2_domain_disable_ldb_cqs(hw, domain);
-
-	dlb2_domain_drain_dir_queues(hw, domain);
-
-	/* Done draining DIR QEs, so disable the CQs. */
-	dlb2_domain_disable_dir_cqs(hw, domain);
-
-	/* Disable PPs */
-	dlb2_domain_disable_dir_producer_ports(hw, domain);
-
-	dlb2_domain_disable_ldb_producer_ports(hw, domain);
-
-	ret = dlb2_domain_verify_reset_success(hw, domain);
-	if (ret)
-		return ret;
-
-	/* Reset the QID and port state. */
-	dlb2_domain_reset_registers(hw, domain);
-
-	/* Hardware reset complete. Reset the domain's software state */
-	ret = dlb2_domain_reset_software_state(hw, domain);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
 unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
 {
 	int i, num = 0;
diff --git a/drivers/event/dlb2/pf/base/dlb2_resource_new.c b/drivers/event/dlb2/pf/base/dlb2_resource_new.c
index 8f97dd865..641812412 100644
--- a/drivers/event/dlb2/pf/base/dlb2_resource_new.c
+++ b/drivers/event/dlb2/pf/base/dlb2_resource_new.c
@@ -34,6 +34,17 @@
 #define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
 	DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
 
+/*
+ * The PF driver cannot assume that a register write will affect subsequent HCW
+ * writes. To ensure a write completes, the driver must read back a CSR. This
+ * function only need be called for configuration that can occur after the
+ * domain has started; prior to starting, applications can't send HCWs.
+ */
+static inline void dlb2_flush_csr(struct dlb2_hw *hw)
+{
+	DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS(hw->ver));
+}
+
 static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
 {
 	int i;
@@ -1019,3 +1030,2554 @@ int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
 
 	return 0;
 }
+
+static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
+				     struct dlb2_dir_pq_pair *port)
+{
+	u32 reg = 0;
+
+	DLB2_BIT_SET(reg, DLB2_LSP_CQ_DIR_DSBL_DISABLED);
+	DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
+
+	dlb2_flush_csr(hw);
+}
+
+static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
+				   struct dlb2_dir_pq_pair *port)
+{
+	u32 cnt;
+
+	cnt = DLB2_CSR_RD(hw,
+			  DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id));
+
+	/*
+	 * Account for the initial token count, which is used in order to
+	 * provide a CQ with depth less than 8.
+	 */
+
+	return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_DIR_TKN_CNT_COUNT) -
+	       port->init_tkn_cnt;
+}
+
+static void dlb2_drain_dir_cq(struct dlb2_hw *hw,
+			      struct dlb2_dir_pq_pair *port)
+{
+	unsigned int port_id = port->id.phys_id;
+	u32 cnt;
+
+	/* Return any outstanding tokens */
+	cnt = dlb2_dir_cq_token_count(hw, port);
+
+	if (cnt != 0) {
+		struct dlb2_hcw hcw_mem[8], *hcw;
+		void __iomem *pp_addr;
+
+		pp_addr = os_map_producer_port(hw, port_id, false);
+
+		/* Point hcw to a 64B-aligned location */
+		hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
+
+		/*
+		 * Program the first HCW for a batch token return and
+		 * the rest as NOOPS
+		 */
+		memset(hcw, 0, 4 * sizeof(*hcw));
+		hcw->cq_token = 1;
+		hcw->lock_id = cnt - 1;
+
+		dlb2_movdir64b(pp_addr, hcw);
+
+		os_fence_hcw(hw, pp_addr);
+
+		os_unmap_producer_port(hw, pp_addr);
+	}
+}
+
+static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
+				    struct dlb2_dir_pq_pair *port)
+{
+	u32 reg = 0;
+
+	DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id), reg);
+
+	dlb2_flush_csr(hw);
+}
+
+static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
+				     struct dlb2_hw_domain *domain,
+				     bool toggle_port)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_dir_pq_pair *port;
+	RTE_SET_USED(iter);
+
+	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
+		/*
+		 * Can't drain a port if it's not configured, and there's
+		 * nothing to drain if its queue is unconfigured.
+		 */
+		if (!port->port_configured || !port->queue_configured)
+			continue;
+
+		if (toggle_port)
+			dlb2_dir_port_cq_disable(hw, port);
+
+		dlb2_drain_dir_cq(hw, port);
+
+		if (toggle_port)
+			dlb2_dir_port_cq_enable(hw, port);
+	}
+
+	return 0;
+}
+
+static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
+				struct dlb2_dir_pq_pair *queue)
+{
+	u32 cnt;
+
+	cnt = DLB2_CSR_RD(hw, DLB2_LSP_QID_DIR_ENQUEUE_CNT(hw->ver,
+						      queue->id.phys_id));
+
+	return DLB2_BITS_GET(cnt, DLB2_LSP_QID_DIR_ENQUEUE_CNT_COUNT);
+}
+
+static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
+				    struct dlb2_dir_pq_pair *queue)
+{
+	return dlb2_dir_queue_depth(hw, queue) == 0;
+}
+
+static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
+					 struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_dir_pq_pair *queue;
+	RTE_SET_USED(iter);
+
+	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
+		if (!dlb2_dir_queue_is_empty(hw, queue))
+			return false;
+	}
+
+	return true;
+}
+static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
+					struct dlb2_hw_domain *domain)
+{
+	int i;
+
+	/* If the domain hasn't been started, there's no traffic to drain */
+	if (!domain->started)
+		return 0;
+
+	for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
+		dlb2_domain_drain_dir_cqs(hw, domain, true);
+
+		if (dlb2_domain_dir_queues_empty(hw, domain))
+			break;
+	}
+
+	if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
+		DLB2_HW_ERR(hw,
+			    "[%s()] Internal error: failed to empty queues\n",
+			    __func__);
+		return -EFAULT;
+	}
+
+	/*
+	 * Drain the CQs one more time. For the queues to go empty, they would
+	 * have scheduled one or more QEs.
+	 */
+	dlb2_domain_drain_dir_cqs(hw, domain, true);
+
+	return 0;
+}
+
+static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
+				    struct dlb2_ldb_port *port)
+{
+	u32 reg = 0;
+
+	/*
+	 * Don't re-enable the port if a removal is pending. The caller should
+	 * mark this port as enabled (if it isn't already), and when the
+	 * removal completes the port will be enabled.
+	 */
+	if (port->num_pending_removals)
+		return;
+
+	DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
+
+	dlb2_flush_csr(hw);
+}
+
+static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
+				     struct dlb2_ldb_port *port)
+{
+	u32 reg = 0;
+
+	DLB2_BIT_SET(reg, DLB2_LSP_CQ_LDB_DSBL_DISABLED);
+	DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id), reg);
+
+	dlb2_flush_csr(hw);
+}
+
+static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
+				      struct dlb2_ldb_port *port)
+{
+	u32 cnt;
+
+	cnt = DLB2_CSR_RD(hw,
+			  DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver, port->id.phys_id));
+
+	return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT);
+}
+
+static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
+				   struct dlb2_ldb_port *port)
+{
+	u32 cnt;
+
+	cnt = DLB2_CSR_RD(hw,
+			  DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id));
+
+	/*
+	 * Account for the initial token count, which is used in order to
+	 * provide a CQ with depth less than 8.
+	 */
+
+	return DLB2_BITS_GET(cnt, DLB2_LSP_CQ_LDB_TKN_CNT_TOKEN_COUNT) -
+		port->init_tkn_cnt;
+}
+
+static void dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
+{
+	u32 infl_cnt, tkn_cnt;
+	unsigned int i;
+
+	infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
+	tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
+
+	if (infl_cnt || tkn_cnt) {
+		struct dlb2_hcw hcw_mem[8], *hcw;
+		void __iomem *pp_addr;
+
+		pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
+
+		/* Point hcw to a 64B-aligned location */
+		hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
+
+		/*
+		 * Program the first HCW for a completion and token return and
+		 * the other HCWs as NOOPS
+		 */
+
+		memset(hcw, 0, 4 * sizeof(*hcw));
+		hcw->qe_comp = (infl_cnt > 0);
+		hcw->cq_token = (tkn_cnt > 0);
+		hcw->lock_id = tkn_cnt - 1;
+
+		/* Return tokens in the first HCW */
+		dlb2_movdir64b(pp_addr, hcw);
+
+		hcw->cq_token = 0;
+
+		/* Issue remaining completions (if any) */
+		for (i = 1; i < infl_cnt; i++)
+			dlb2_movdir64b(pp_addr, hcw);
+
+		os_fence_hcw(hw, pp_addr);
+
+		os_unmap_producer_port(hw, pp_addr);
+	}
+}
+
+static void dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
+				      struct dlb2_hw_domain *domain,
+				      bool toggle_port)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	int i;
+	RTE_SET_USED(iter);
+
+	/* If the domain hasn't been started, there's no traffic to drain */
+	if (!domain->started)
+		return;
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+			if (toggle_port)
+				dlb2_ldb_port_cq_disable(hw, port);
+
+			dlb2_drain_ldb_cq(hw, port);
+
+			if (toggle_port)
+				dlb2_ldb_port_cq_enable(hw, port);
+		}
+	}
+}
+
+static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
+				struct dlb2_ldb_queue *queue)
+{
+	u32 aqed, ldb, atm;
+
+	aqed = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
+						       queue->id.phys_id));
+	ldb = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
+						      queue->id.phys_id));
+	atm = DLB2_CSR_RD(hw,
+			  DLB2_LSP_QID_ATM_ACTIVE(hw->ver, queue->id.phys_id));
+
+	return DLB2_BITS_GET(aqed, DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT)
+	       + DLB2_BITS_GET(ldb, DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT)
+	       + DLB2_BITS_GET(atm, DLB2_LSP_QID_ATM_ACTIVE_COUNT);
+}
+
+static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
+				    struct dlb2_ldb_queue *queue)
+{
+	return dlb2_ldb_queue_depth(hw, queue) == 0;
+}
+
+static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
+					    struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_queue *queue;
+	RTE_SET_USED(iter);
+
+	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
+		if (queue->num_mappings == 0)
+			continue;
+
+		if (!dlb2_ldb_queue_is_empty(hw, queue))
+			return false;
+	}
+
+	return true;
+}
+
+static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
+					   struct dlb2_hw_domain *domain)
+{
+	int i;
+
+	/* If the domain hasn't been started, there's no traffic to drain */
+	if (!domain->started)
+		return 0;
+
+	if (domain->num_pending_removals > 0) {
+		DLB2_HW_ERR(hw,
+			    "[%s()] Internal error: failed to unmap domain queues\n",
+			    __func__);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
+		dlb2_domain_drain_ldb_cqs(hw, domain, true);
+
+		if (dlb2_domain_mapped_queues_empty(hw, domain))
+			break;
+	}
+
+	if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
+		DLB2_HW_ERR(hw,
+			    "[%s()] Internal error: failed to empty queues\n",
+			    __func__);
+		return -EFAULT;
+	}
+
+	/*
+	 * Drain the CQs one more time. For the queues to go empty, they would
+	 * have scheduled one or more QEs.
+	 */
+	dlb2_domain_drain_ldb_cqs(hw, domain, true);
+
+	return 0;
+}
+
+static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
+				       struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	int i;
+	RTE_SET_USED(iter);
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+			port->enabled = true;
+
+			dlb2_ldb_port_cq_enable(hw, port);
+		}
+	}
+}
+
+static struct dlb2_ldb_queue *
+dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
+			   u32 id,
+			   bool vdev_req,
+			   unsigned int vdev_id)
+{
+	struct dlb2_list_entry *iter1;
+	struct dlb2_list_entry *iter2;
+	struct dlb2_function_resources *rsrcs;
+	struct dlb2_hw_domain *domain;
+	struct dlb2_ldb_queue *queue;
+	RTE_SET_USED(iter1);
+	RTE_SET_USED(iter2);
+
+	if (id >= DLB2_MAX_NUM_LDB_QUEUES)
+		return NULL;
+
+	rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
+
+	if (!vdev_req)
+		return &hw->rsrcs.ldb_queues[id];
+
+	DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2) {
+			if (queue->id.virt_id == id)
+				return queue;
+		}
+	}
+
+	DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1) {
+		if (queue->id.virt_id == id)
+			return queue;
+	}
+
+	return NULL;
+}
+
+static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
+						      u32 id,
+						      bool vdev_req,
+						      unsigned int vdev_id)
+{
+	struct dlb2_list_entry *iteration;
+	struct dlb2_function_resources *rsrcs;
+	struct dlb2_hw_domain *domain;
+	RTE_SET_USED(iteration);
+
+	if (id >= DLB2_MAX_NUM_DOMAINS)
+		return NULL;
+
+	if (!vdev_req)
+		return &hw->domains[id];
+
+	rsrcs = &hw->vdev[vdev_id];
+
+	DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration) {
+		if (domain->id.virt_id == id)
+			return domain;
+	}
+
+	return NULL;
+}
+
+static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
+					   struct dlb2_ldb_port *port,
+					   struct dlb2_ldb_queue *queue,
+					   int slot,
+					   enum dlb2_qid_map_state new_state)
+{
+	enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
+	struct dlb2_hw_domain *domain;
+	int domain_id;
+
+	domain_id = port->domain_id.phys_id;
+
+	domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
+	if (domain == NULL) {
+		DLB2_HW_ERR(hw,
+			    "[%s()] Internal error: unable to find domain %d\n",
+			    __func__, domain_id);
+		return -EINVAL;
+	}
+
+	switch (curr_state) {
+	case DLB2_QUEUE_UNMAPPED:
+		switch (new_state) {
+		case DLB2_QUEUE_MAPPED:
+			queue->num_mappings++;
+			port->num_mappings++;
+			break;
+		case DLB2_QUEUE_MAP_IN_PROG:
+			queue->num_pending_additions++;
+			domain->num_pending_additions++;
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case DLB2_QUEUE_MAPPED:
+		switch (new_state) {
+		case DLB2_QUEUE_UNMAPPED:
+			queue->num_mappings--;
+			port->num_mappings--;
+			break;
+		case DLB2_QUEUE_UNMAP_IN_PROG:
+			port->num_pending_removals++;
+			domain->num_pending_removals++;
+			break;
+		case DLB2_QUEUE_MAPPED:
+			/* Priority change, nothing to update */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case DLB2_QUEUE_MAP_IN_PROG:
+		switch (new_state) {
+		case DLB2_QUEUE_UNMAPPED:
+			queue->num_pending_additions--;
+			domain->num_pending_additions--;
+			break;
+		case DLB2_QUEUE_MAPPED:
+			queue->num_mappings++;
+			port->num_mappings++;
+			queue->num_pending_additions--;
+			domain->num_pending_additions--;
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case DLB2_QUEUE_UNMAP_IN_PROG:
+		switch (new_state) {
+		case DLB2_QUEUE_UNMAPPED:
+			port->num_pending_removals--;
+			domain->num_pending_removals--;
+			queue->num_mappings--;
+			port->num_mappings--;
+			break;
+		case DLB2_QUEUE_MAPPED:
+			port->num_pending_removals--;
+			domain->num_pending_removals--;
+			break;
+		case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
+			/* Nothing to update */
+			break;
+		default:
+			goto error;
+		}
+		break;
+	case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
+		switch (new_state) {
+		case DLB2_QUEUE_UNMAP_IN_PROG:
+			/* Nothing to update */
+			break;
+		case DLB2_QUEUE_UNMAPPED:
+			/*
+			 * An UNMAP_IN_PROG_PENDING_MAP slot briefly
+			 * becomes UNMAPPED before it transitions to
+			 * MAP_IN_PROG.
+			 */
+			queue->num_mappings--;
+			port->num_mappings--;
+			port->num_pending_removals--;
+			domain->num_pending_removals--;
+			break;
+		default:
+			goto error;
+		}
+		break;
+	default:
+		goto error;
+	}
+
+	port->qid_map[slot].state = new_state;
+
+	DLB2_HW_DBG(hw,
+		    "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
+		    __func__, queue->id.phys_id, port->id.phys_id,
+		    curr_state, new_state);
+	return 0;
+
+error:
+	DLB2_HW_ERR(hw,
+		    "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
+		    __func__, queue->id.phys_id, port->id.phys_id,
+		    curr_state, new_state);
+	return -EFAULT;
+}
+
+static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
+				enum dlb2_qid_map_state state,
+				int *slot)
+{
+	int i;
+
+	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+		if (port->qid_map[i].state == state)
+			break;
+	}
+
+	*slot = i;
+
+	return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
+}
+
+static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
+				      enum dlb2_qid_map_state state,
+				      struct dlb2_ldb_queue *queue,
+				      int *slot)
+{
+	int i;
+
+	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+		if (port->qid_map[i].state == state &&
+		    port->qid_map[i].qid == queue->id.phys_id)
+			break;
+	}
+
+	*slot = i;
+
+	return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
+}
+
+/*
+ * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
+ * their function names imply, and should only be called by the dynamic CQ
+ * mapping code.
+ */
+static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
+					      struct dlb2_hw_domain *domain,
+					      struct dlb2_ldb_queue *queue)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	int slot, i;
+	RTE_SET_USED(iter);
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+			enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
+
+			if (!dlb2_port_find_slot_queue(port, state,
+						       queue, &slot))
+				continue;
+
+			if (port->enabled)
+				dlb2_ldb_port_cq_disable(hw, port);
+		}
+	}
+}
+
+static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
+					     struct dlb2_hw_domain *domain,
+					     struct dlb2_ldb_queue *queue)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	int slot, i;
+	RTE_SET_USED(iter);
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+			enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
+
+			if (!dlb2_port_find_slot_queue(port, state,
+						       queue, &slot))
+				continue;
+
+			if (port->enabled)
+				dlb2_ldb_port_cq_enable(hw, port);
+		}
+	}
+}
+
+static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
+						struct dlb2_ldb_port *port,
+						int slot)
+{
+	u32 ctrl = 0;
+
+	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
+	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
+	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
+
+	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
+
+	dlb2_flush_csr(hw);
+}
+
+static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
+					      struct dlb2_ldb_port *port,
+					      int slot)
+{
+	u32 ctrl = 0;
+
+	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
+	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
+	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
+	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V);
+
+	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
+
+	dlb2_flush_csr(hw);
+}
+
+static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
+					struct dlb2_ldb_port *p,
+					struct dlb2_ldb_queue *q,
+					u8 priority)
+{
+	enum dlb2_qid_map_state state;
+	u32 lsp_qid2cq2;
+	u32 lsp_qid2cq;
+	u32 atm_qid2cq;
+	u32 cq2priov;
+	u32 cq2qid;
+	int i;
+
+	/* Look for a pending or already mapped slot, else an unused slot */
+	if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
+	    !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
+	    !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
+		DLB2_HW_ERR(hw,
+			    "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
+			    __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	/* Read-modify-write the priority and valid bit register */
+	cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id));
+
+	cq2priov |= (1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC)) & DLB2_LSP_CQ2PRIOV_V;
+	cq2priov |= ((priority & 0x7) << (i + DLB2_LSP_CQ2PRIOV_PRIO_LOC) * 3)
+		    & DLB2_LSP_CQ2PRIOV_PRIO;
+
+	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, p->id.phys_id), cq2priov);
+
+	/* Read-modify-write the QID map register */
+	if (i < 4)
+		cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(hw->ver,
+							  p->id.phys_id));
+	else
+		cq2qid = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(hw->ver,
+							  p->id.phys_id));
+
+	if (i == 0 || i == 4)
+		DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P0);
+	if (i == 1 || i == 5)
+		DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P1);
+	if (i == 2 || i == 6)
+		DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P2);
+	if (i == 3 || i == 7)
+		DLB2_BITS_SET(cq2qid, q->id.phys_id, DLB2_LSP_CQ2QID0_QID_P3);
+
+	if (i < 4)
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_CQ2QID0(hw->ver, p->id.phys_id), cq2qid);
+	else
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_CQ2QID1(hw->ver, p->id.phys_id), cq2qid);
+
+	atm_qid2cq = DLB2_CSR_RD(hw,
+				 DLB2_ATM_QID2CQIDIX(q->id.phys_id,
+						p->id.phys_id / 4));
+
+	lsp_qid2cq = DLB2_CSR_RD(hw,
+				 DLB2_LSP_QID2CQIDIX(hw->ver, q->id.phys_id,
+						p->id.phys_id / 4));
+
+	lsp_qid2cq2 = DLB2_CSR_RD(hw,
+				  DLB2_LSP_QID2CQIDIX2(hw->ver, q->id.phys_id,
+						  p->id.phys_id / 4));
+
+	switch (p->id.phys_id % 4) {
+	case 0:
+		DLB2_BIT_SET(atm_qid2cq,
+			     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
+		DLB2_BIT_SET(lsp_qid2cq,
+			     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
+		DLB2_BIT_SET(lsp_qid2cq2,
+			     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
+		break;
+
+	case 1:
+		DLB2_BIT_SET(atm_qid2cq,
+			     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
+		DLB2_BIT_SET(lsp_qid2cq,
+			     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
+		DLB2_BIT_SET(lsp_qid2cq2,
+			     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
+		break;
+
+	case 2:
+		DLB2_BIT_SET(atm_qid2cq,
+			     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
+		DLB2_BIT_SET(lsp_qid2cq,
+			     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
+		DLB2_BIT_SET(lsp_qid2cq2,
+			     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
+		break;
+
+	case 3:
+		DLB2_BIT_SET(atm_qid2cq,
+			     1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
+		DLB2_BIT_SET(lsp_qid2cq,
+			     1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
+		DLB2_BIT_SET(lsp_qid2cq2,
+			     1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
+		break;
+	}
+
+	DLB2_CSR_WR(hw,
+		    DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
+		    atm_qid2cq);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_QID2CQIDIX(hw->ver,
+					q->id.phys_id, p->id.phys_id / 4),
+		    lsp_qid2cq);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_QID2CQIDIX2(hw->ver,
+					 q->id.phys_id, p->id.phys_id / 4),
+		    lsp_qid2cq2);
+
+	dlb2_flush_csr(hw);
+
+	p->qid_map[i].qid = q->id.phys_id;
+	p->qid_map[i].priority = priority;
+
+	state = DLB2_QUEUE_MAPPED;
+
+	return dlb2_port_slot_state_transition(hw, p, q, i, state);
+}
+
+static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
+					   struct dlb2_ldb_port *port,
+					   struct dlb2_ldb_queue *queue,
+					   int slot)
+{
+	u32 ctrl = 0;
+	u32 active;
+	u32 enq;
+
+	/* Set the atomic scheduling haswork bit */
+	active = DLB2_CSR_RD(hw, DLB2_LSP_QID_AQED_ACTIVE_CNT(hw->ver,
+							 queue->id.phys_id));
+
+	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
+	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
+	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
+	DLB2_BITS_SET(ctrl,
+		      DLB2_BITS_GET(active,
+				    DLB2_LSP_QID_AQED_ACTIVE_CNT_COUNT) > 0,
+				    DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
+
+	/* Set the non-atomic scheduling haswork bit */
+	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
+
+	enq = DLB2_CSR_RD(hw,
+			  DLB2_LSP_QID_LDB_ENQUEUE_CNT(hw->ver,
+						       queue->id.phys_id));
+
+	memset(&ctrl, 0, sizeof(ctrl));
+
+	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
+	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
+	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_VALUE);
+	DLB2_BITS_SET(ctrl,
+		      DLB2_BITS_GET(enq,
+				    DLB2_LSP_QID_LDB_ENQUEUE_CNT_COUNT) > 0,
+		      DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
+
+	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
+
+	dlb2_flush_csr(hw);
+
+	return 0;
+}
+
+static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
+					      struct dlb2_ldb_port *port,
+					      u8 slot)
+{
+	u32 ctrl = 0;
+
+	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
+	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
+	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
+
+	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
+
+	memset(&ctrl, 0, sizeof(ctrl));
+
+	DLB2_BITS_SET(ctrl, port->id.phys_id, DLB2_LSP_LDB_SCHED_CTRL_CQ);
+	DLB2_BITS_SET(ctrl, slot, DLB2_LSP_LDB_SCHED_CTRL_QIDIX);
+	DLB2_BIT_SET(ctrl, DLB2_LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
+
+	DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL(hw->ver), ctrl);
+
+	dlb2_flush_csr(hw);
+}
+
+
+static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
+					      struct dlb2_ldb_queue *queue)
+{
+	u32 infl_lim = 0;
+
+	DLB2_BITS_SET(infl_lim, queue->num_qid_inflights,
+		 DLB2_LSP_QID_LDB_INFL_LIM_LIMIT);
+
+	DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
+		    infl_lim);
+}
+
+static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
+						struct dlb2_ldb_queue *queue)
+{
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue->id.phys_id),
+		    DLB2_LSP_QID_LDB_INFL_LIM_RST);
+}
+
+static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
+						struct dlb2_hw_domain *domain,
+						struct dlb2_ldb_port *port,
+						struct dlb2_ldb_queue *queue)
+{
+	struct dlb2_list_entry *iter;
+	enum dlb2_qid_map_state state;
+	int slot, ret, i;
+	u32 infl_cnt;
+	u8 prio;
+	RTE_SET_USED(iter);
+
+	infl_cnt = DLB2_CSR_RD(hw,
+			       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
+						    queue->id.phys_id));
+
+	if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
+		DLB2_HW_ERR(hw,
+			    "[%s()] Internal error: non-zero QID inflight count\n",
+			    __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Static map the port and set its corresponding has_work bits.
+	 */
+	state = DLB2_QUEUE_MAP_IN_PROG;
+	if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
+		return -EINVAL;
+
+	prio = port->qid_map[slot].priority;
+
+	/*
+	 * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
+	 * the port's qid_map state.
+	 */
+	ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
+	if (ret)
+		return ret;
+
+	ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
+	if (ret)
+		return ret;
+
+	/*
+	 * Ensure IF_status(cq,qid) is 0 before enabling the port to
+	 * prevent spurious schedules to cause the queue's inflight
+	 * count to increase.
+	 */
+	dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
+
+	/* Reset the queue's inflight status */
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+			state = DLB2_QUEUE_MAPPED;
+			if (!dlb2_port_find_slot_queue(port, state,
+						       queue, &slot))
+				continue;
+
+			dlb2_ldb_port_set_queue_if_status(hw, port, slot);
+		}
+	}
+
+	dlb2_ldb_queue_set_inflight_limit(hw, queue);
+
+	/* Re-enable CQs mapped to this queue */
+	dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
+
+	/* If this queue has other mappings pending, clear its inflight limit */
+	if (queue->num_pending_additions > 0)
+		dlb2_ldb_queue_clear_inflight_limit(hw, queue);
+
+	return 0;
+}
+
+/**
+ * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
+ * @hw: dlb2_hw handle for a particular device.
+ * @port: load-balanced port
+ * @queue: load-balanced queue
+ * @priority: queue servicing priority
+ *
+ * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
+ * at a later point, and <0 if an error occurred.
+ */
+static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
+					 struct dlb2_ldb_port *port,
+					 struct dlb2_ldb_queue *queue,
+					 u8 priority)
+{
+	enum dlb2_qid_map_state state;
+	struct dlb2_hw_domain *domain;
+	int domain_id, slot, ret;
+	u32 infl_cnt;
+
+	domain_id = port->domain_id.phys_id;
+
+	domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
+	if (domain == NULL) {
+		DLB2_HW_ERR(hw,
+			    "[%s()] Internal error: unable to find domain %d\n",
+			    __func__, port->domain_id.phys_id);
+		return -EINVAL;
+	}
+
+	/*
+	 * Set the QID inflight limit to 0 to prevent further scheduling of the
+	 * queue.
+	 */
+	DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(hw->ver,
+						  queue->id.phys_id), 0);
+
+	if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
+		DLB2_HW_ERR(hw,
+			    "Internal error: No available unmapped slots\n");
+		return -EFAULT;
+	}
+
+	port->qid_map[slot].qid = queue->id.phys_id;
+	port->qid_map[slot].priority = priority;
+
+	state = DLB2_QUEUE_MAP_IN_PROG;
+	ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
+	if (ret)
+		return ret;
+
+	infl_cnt = DLB2_CSR_RD(hw,
+			       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
+						    queue->id.phys_id));
+
+	if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
+		/*
+		 * The queue is owed completions so it's not safe to map it
+		 * yet. Schedule a kernel thread to complete the mapping later,
+		 * once software has completed all the queue's inflight events.
+		 */
+		if (!os_worker_active(hw))
+			os_schedule_work(hw);
+
+		return 1;
+	}
+
+	/*
+	 * Disable the affected CQ, and the CQs already mapped to the QID,
+	 * before reading the QID's inflight count a second time. There is an
+	 * unlikely race in which the QID may schedule one more QE after we
+	 * read an inflight count of 0, and disabling the CQs guarantees that
+	 * the race will not occur after a re-read of the inflight count
+	 * register.
+	 */
+	if (port->enabled)
+		dlb2_ldb_port_cq_disable(hw, port);
+
+	dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
+
+	infl_cnt = DLB2_CSR_RD(hw,
+			       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver,
+						    queue->id.phys_id));
+
+	if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
+		if (port->enabled)
+			dlb2_ldb_port_cq_enable(hw, port);
+
+		dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
+
+		/*
+		 * The queue is owed completions so it's not safe to map it
+		 * yet. Schedule a kernel thread to complete the mapping later,
+		 * once software has completed all the queue's inflight events.
+		 */
+		if (!os_worker_active(hw))
+			os_schedule_work(hw);
+
+		return 1;
+	}
+
+	return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
+}
+
+static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
+					struct dlb2_hw_domain *domain,
+					struct dlb2_ldb_port *port)
+{
+	int i;
+
+	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+		u32 infl_cnt;
+		struct dlb2_ldb_queue *queue;
+		int qid;
+
+		if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
+			continue;
+
+		qid = port->qid_map[i].qid;
+
+		queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
+
+		if (queue == NULL) {
+			DLB2_HW_ERR(hw,
+				    "[%s()] Internal error: unable to find queue %d\n",
+				    __func__, qid);
+			continue;
+		}
+
+		infl_cnt = DLB2_CSR_RD(hw,
+				       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
+
+		if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT))
+			continue;
+
+		/*
+		 * Disable the affected CQ, and the CQs already mapped to the
+		 * QID, before reading the QID's inflight count a second time.
+		 * There is an unlikely race in which the QID may schedule one
+		 * more QE after we read an inflight count of 0, and disabling
+		 * the CQs guarantees that the race will not occur after a
+		 * re-read of the inflight count register.
+		 */
+		if (port->enabled)
+			dlb2_ldb_port_cq_disable(hw, port);
+
+		dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
+
+		infl_cnt = DLB2_CSR_RD(hw,
+				       DLB2_LSP_QID_LDB_INFL_CNT(hw->ver, qid));
+
+		if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_QID_LDB_INFL_CNT_COUNT)) {
+			if (port->enabled)
+				dlb2_ldb_port_cq_enable(hw, port);
+
+			dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
+
+			continue;
+		}
+
+		dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
+	}
+}
+
+static unsigned int
+dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
+				      struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	int i;
+	RTE_SET_USED(iter);
+
+	if (!domain->configured || domain->num_pending_additions == 0)
+		return 0;
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
+			dlb2_domain_finish_map_port(hw, domain, port);
+	}
+
+	return domain->num_pending_additions;
+}
+
+static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
+				   struct dlb2_ldb_port *port,
+				   struct dlb2_ldb_queue *queue)
+{
+	enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
+	u32 lsp_qid2cq2;
+	u32 lsp_qid2cq;
+	u32 atm_qid2cq;
+	u32 cq2priov;
+	u32 queue_id;
+	u32 port_id;
+	int i;
+
+	/* Find the queue's slot */
+	mapped = DLB2_QUEUE_MAPPED;
+	in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
+	pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
+
+	if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
+	    !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
+	    !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
+		DLB2_HW_ERR(hw,
+			    "[%s():%d] Internal error: QID %d isn't mapped\n",
+			    __func__, __LINE__, queue->id.phys_id);
+		return -EFAULT;
+	}
+
+	port_id = port->id.phys_id;
+	queue_id = queue->id.phys_id;
+
+	/* Read-modify-write the priority and valid bit register */
+	cq2priov = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id));
+
+	cq2priov &= ~(1 << (i + DLB2_LSP_CQ2PRIOV_V_LOC));
+
+	DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(hw->ver, port_id), cq2priov);
+
+	atm_qid2cq = DLB2_CSR_RD(hw, DLB2_ATM_QID2CQIDIX(queue_id,
+							 port_id / 4));
+
+	lsp_qid2cq = DLB2_CSR_RD(hw,
+				 DLB2_LSP_QID2CQIDIX(hw->ver,
+						queue_id, port_id / 4));
+
+	lsp_qid2cq2 = DLB2_CSR_RD(hw,
+				  DLB2_LSP_QID2CQIDIX2(hw->ver,
+						  queue_id, port_id / 4));
+
+	switch (port_id % 4) {
+	case 0:
+		atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P0_LOC));
+		lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P0_LOC));
+		lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P0_LOC));
+		break;
+
+	case 1:
+		atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P1_LOC));
+		lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P1_LOC));
+		lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P1_LOC));
+		break;
+
+	case 2:
+		atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P2_LOC));
+		lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P2_LOC));
+		lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P2_LOC));
+		break;
+
+	case 3:
+		atm_qid2cq &= ~(1 << (i + DLB2_ATM_QID2CQIDIX_00_CQ_P3_LOC));
+		lsp_qid2cq &= ~(1 << (i + DLB2_LSP_QID2CQIDIX_00_CQ_P3_LOC));
+		lsp_qid2cq2 &= ~(1 << (i + DLB2_LSP_QID2CQIDIX2_00_CQ_P3_LOC));
+		break;
+	}
+
+	DLB2_CSR_WR(hw, DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4), atm_qid2cq);
+
+	DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, port_id / 4),
+		    lsp_qid2cq);
+
+	DLB2_CSR_WR(hw, DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, port_id / 4),
+		    lsp_qid2cq2);
+
+	dlb2_flush_csr(hw);
+
+	unmapped = DLB2_QUEUE_UNMAPPED;
+
+	return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
+}
+
+static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
+				 struct dlb2_hw_domain *domain,
+				 struct dlb2_ldb_port *port,
+				 struct dlb2_ldb_queue *queue,
+				 u8 prio)
+{
+	if (domain->started)
+		return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
+	else
+		return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
+}
+
+static void
+dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
+				   struct dlb2_hw_domain *domain,
+				   struct dlb2_ldb_port *port,
+				   int slot)
+{
+	enum dlb2_qid_map_state state;
+	struct dlb2_ldb_queue *queue;
+
+	queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
+
+	state = port->qid_map[slot].state;
+
+	/* Update the QID2CQIDX and CQ2QID vectors */
+	dlb2_ldb_port_unmap_qid(hw, port, queue);
+
+	/*
+	 * Ensure the QID will not be serviced by this {CQ, slot} by clearing
+	 * the has_work bits
+	 */
+	dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
+
+	/* Reset the {CQ, slot} to its default state */
+	dlb2_ldb_port_set_queue_if_status(hw, port, slot);
+
+	/* Re-enable the CQ if it was not manually disabled by the user */
+	if (port->enabled)
+		dlb2_ldb_port_cq_enable(hw, port);
+
+	/*
+	 * If there is a mapping that is pending this slot's removal, perform
+	 * the mapping now.
+	 */
+	if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
+		struct dlb2_ldb_port_qid_map *map;
+		struct dlb2_ldb_queue *map_queue;
+		u8 prio;
+
+		map = &port->qid_map[slot];
+
+		map->qid = map->pending_qid;
+		map->priority = map->pending_priority;
+
+		map_queue = &hw->rsrcs.ldb_queues[map->qid];
+		prio = map->priority;
+
+		dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
+	}
+}
+
+
+static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
+					  struct dlb2_hw_domain *domain,
+					  struct dlb2_ldb_port *port)
+{
+	u32 infl_cnt;
+	int i;
+
+	if (port->num_pending_removals == 0)
+		return false;
+
+	/*
+	 * The unmap requires all the CQ's outstanding inflights to be
+	 * completed.
+	 */
+	infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
+						       port->id.phys_id));
+	if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)
+		return false;
+
+	for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+		struct dlb2_ldb_port_qid_map *map;
+
+		map = &port->qid_map[i];
+
+		if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
+		    map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
+			continue;
+
+		dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
+	}
+
+	return true;
+}
+
+static unsigned int
+dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
+					struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	int i;
+	RTE_SET_USED(iter);
+
+	if (!domain->configured || domain->num_pending_removals == 0)
+		return 0;
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
+			dlb2_domain_finish_unmap_port(hw, domain, port);
+	}
+
+	return domain->num_pending_removals;
+}
+
+static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
+					struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	int i;
+	RTE_SET_USED(iter);
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+			port->enabled = false;
+
+			dlb2_ldb_port_cq_disable(hw, port);
+		}
+	}
+}
+
+
+static void dlb2_log_reset_domain(struct dlb2_hw *hw,
+				  u32 domain_id,
+				  bool vdev_req,
+				  unsigned int vdev_id)
+{
+	DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
+	if (vdev_req)
+		DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
+	DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
+}
+
+static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
+					 struct dlb2_hw_domain *domain,
+					 unsigned int vdev_id)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_dir_pq_pair *port;
+	u32 vpp_v = 0;
+	RTE_SET_USED(iter);
+
+	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
+		unsigned int offs;
+		u32 virt_id;
+
+		if (hw->virt_mode == DLB2_VIRT_SRIOV)
+			virt_id = port->id.virt_id;
+		else
+			virt_id = port->id.phys_id;
+
+		offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id;
+
+		DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), vpp_v);
+	}
+}
+
+static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
+					 struct dlb2_hw_domain *domain,
+					 unsigned int vdev_id)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	u32 vpp_v = 0;
+	int i;
+	RTE_SET_USED(iter);
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+			unsigned int offs;
+			u32 virt_id;
+
+			if (hw->virt_mode == DLB2_VIRT_SRIOV)
+				virt_id = port->id.virt_id;
+			else
+				virt_id = port->id.phys_id;
+
+			offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
+
+			DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), vpp_v);
+		}
+	}
+}
+
+static void
+dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
+					struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	u32 int_en = 0;
+	u32 wd_en = 0;
+	int i;
+	RTE_SET_USED(iter);
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+			DLB2_CSR_WR(hw,
+				    DLB2_CHP_LDB_CQ_INT_ENB(hw->ver,
+						       port->id.phys_id),
+				    int_en);
+
+			DLB2_CSR_WR(hw,
+				    DLB2_CHP_LDB_CQ_WD_ENB(hw->ver,
+						      port->id.phys_id),
+				    wd_en);
+		}
+	}
+}
+
+static void
+dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
+					struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_dir_pq_pair *port;
+	u32 int_en = 0;
+	u32 wd_en = 0;
+	RTE_SET_USED(iter);
+
+	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
+		DLB2_CSR_WR(hw,
+			    DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
+			    int_en);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_CHP_DIR_CQ_WD_ENB(hw->ver, port->id.phys_id),
+			    wd_en);
+	}
+}
+
+static void
+dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
+					  struct dlb2_hw_domain *domain)
+{
+	int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_queue *queue;
+	RTE_SET_USED(iter);
+
+	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
+		int idx = domain_offset + queue->id.phys_id;
+
+		DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), 0);
+
+		if (queue->id.vdev_owned) {
+			DLB2_CSR_WR(hw,
+				    DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
+				    0);
+
+			idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
+				queue->id.virt_id;
+
+			DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID_V(idx), 0);
+
+			DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VQID2QID(idx), 0);
+		}
+	}
+}
+
+static void
+dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
+					  struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_dir_pq_pair *queue;
+	unsigned long max_ports;
+	int domain_offset;
+	RTE_SET_USED(iter);
+
+	max_ports = DLB2_MAX_NUM_DIR_PORTS(hw->ver);
+
+	domain_offset = domain->id.phys_id * max_ports;
+
+	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
+		int idx = domain_offset + queue->id.phys_id;
+
+		DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), 0);
+
+		if (queue->id.vdev_owned) {
+			idx = queue->id.vdev_id * max_ports + queue->id.virt_id;
+
+			DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID_V(idx), 0);
+
+			DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VQID2QID(idx), 0);
+		}
+	}
+}
+
+static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
+					       struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	u32 chk_en = 0;
+	int i;
+	RTE_SET_USED(iter);
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+			DLB2_CSR_WR(hw,
+				    DLB2_CHP_SN_CHK_ENBL(hw->ver,
+							 port->id.phys_id),
+				    chk_en);
+		}
+	}
+}
+
+static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
+						 struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	int i;
+	RTE_SET_USED(iter);
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+			int j;
+
+			for (j = 0; j < DLB2_MAX_CQ_COMP_CHECK_LOOPS; j++) {
+				if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
+					break;
+			}
+
+			if (j == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
+				DLB2_HW_ERR(hw,
+					    "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
+					    __func__, port->id.phys_id);
+				return -EFAULT;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
+					struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_dir_pq_pair *port;
+	RTE_SET_USED(iter);
+
+	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
+		port->enabled = false;
+
+		dlb2_dir_port_cq_disable(hw, port);
+	}
+}
+
+static void
+dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
+				       struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_dir_pq_pair *port;
+	u32 pp_v = 0;
+	RTE_SET_USED(iter);
+
+	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_DIR_PP_V(port->id.phys_id),
+			    pp_v);
+	}
+}
+
+static void
+dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
+				       struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	u32 pp_v = 0;
+	int i;
+	RTE_SET_USED(iter);
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+			DLB2_CSR_WR(hw,
+				    DLB2_SYS_LDB_PP_V(port->id.phys_id),
+				    pp_v);
+		}
+	}
+}
+
+static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
+					    struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_dir_pq_pair *dir_port;
+	struct dlb2_ldb_port *ldb_port;
+	struct dlb2_ldb_queue *queue;
+	int i;
+	RTE_SET_USED(iter);
+
+	/*
+	 * Confirm that all the domain's queue's inflight counts and AQED
+	 * active counts are 0.
+	 */
+	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
+		if (!dlb2_ldb_queue_is_empty(hw, queue)) {
+			DLB2_HW_ERR(hw,
+				    "[%s()] Internal error: failed to empty ldb queue %d\n",
+				    __func__, queue->id.phys_id);
+			return -EFAULT;
+		}
+	}
+
+	/* Confirm that all the domain's CQs inflight and token counts are 0. */
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
+			if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
+			    dlb2_ldb_cq_token_count(hw, ldb_port)) {
+				DLB2_HW_ERR(hw,
+					    "[%s()] Internal error: failed to empty ldb port %d\n",
+					    __func__, ldb_port->id.phys_id);
+				return -EFAULT;
+			}
+		}
+	}
+
+	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
+		if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
+			DLB2_HW_ERR(hw,
+				    "[%s()] Internal error: failed to empty dir queue %d\n",
+				    __func__, dir_port->id.phys_id);
+			return -EFAULT;
+		}
+
+		if (dlb2_dir_cq_token_count(hw, dir_port)) {
+			DLB2_HW_ERR(hw,
+				    "[%s()] Internal error: failed to empty dir port %d\n",
+				    __func__, dir_port->id.phys_id);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
+						   struct dlb2_ldb_port *port)
+{
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
+		    DLB2_SYS_LDB_PP2VAS_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_LDB_CQ2VAS(hw->ver, port->id.phys_id),
+		    DLB2_CHP_LDB_CQ2VAS_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
+		    DLB2_SYS_LDB_PP2VDEV_RST);
+
+	if (port->id.vdev_owned) {
+		unsigned int offs;
+		u32 virt_id;
+
+		/*
+		 * DLB uses producer port address bits 17:12 to determine the
+		 * producer port ID. In Scalable IOV mode, PP accesses come
+		 * through the PF MMIO window for the physical producer port,
+		 * so for translation purposes the virtual and physical port
+		 * IDs are equal.
+		 */
+		if (hw->virt_mode == DLB2_VIRT_SRIOV)
+			virt_id = port->id.virt_id;
+		else
+			virt_id = port->id.phys_id;
+
+		offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
+
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_VF_LDB_VPP2PP(offs),
+			    DLB2_SYS_VF_LDB_VPP2PP_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_VF_LDB_VPP_V(offs),
+			    DLB2_SYS_VF_LDB_VPP_V_RST);
+	}
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_LDB_PP_V(port->id.phys_id),
+		    DLB2_SYS_LDB_PP_V_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_LDB_DSBL(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ_LDB_DSBL_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_LDB_CQ_DEPTH(hw->ver, port->id.phys_id),
+		    DLB2_CHP_LDB_CQ_DEPTH_RST);
+
+	if (hw->ver != DLB2_HW_V2)
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_CFG_CQ_LDB_WU_LIMIT(port->id.phys_id),
+			    DLB2_LSP_CFG_CQ_LDB_WU_LIMIT_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_LDB_INFL_LIM(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ_LDB_INFL_LIM_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_HIST_LIST_LIM(hw->ver, port->id.phys_id),
+		    DLB2_CHP_HIST_LIST_LIM_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_HIST_LIST_BASE(hw->ver, port->id.phys_id),
+		    DLB2_CHP_HIST_LIST_BASE_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_HIST_LIST_POP_PTR(hw->ver, port->id.phys_id),
+		    DLB2_CHP_HIST_LIST_POP_PTR_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_HIST_LIST_PUSH_PTR(hw->ver, port->id.phys_id),
+		    DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
+		    DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_LDB_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
+		    DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_LDB_CQ_INT_ENB(hw->ver, port->id.phys_id),
+		    DLB2_CHP_LDB_CQ_INT_ENB_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
+		    DLB2_SYS_LDB_CQ_ISR_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
+		    DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_LDB_CQ_WPTR(hw->ver, port->id.phys_id),
+		    DLB2_CHP_LDB_CQ_WPTR_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_LDB_TKN_CNT(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ_LDB_TKN_CNT_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
+		    DLB2_SYS_LDB_CQ_ADDR_L_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
+		    DLB2_SYS_LDB_CQ_ADDR_U_RST);
+
+	if (hw->ver == DLB2_HW_V2)
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
+			    DLB2_SYS_LDB_CQ_AT_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_LDB_CQ_PASID(hw->ver, port->id.phys_id),
+		    DLB2_SYS_LDB_CQ_PASID_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
+		    DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ2QID0(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ2QID0_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ2QID1(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ2QID1_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ2PRIOV(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ2PRIOV_RST);
+}
+
+static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
+						 struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_port *port;
+	int i;
+	RTE_SET_USED(iter);
+
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
+			__dlb2_domain_reset_ldb_port_registers(hw, port);
+	}
+}
+
+static void
+__dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
+				       struct dlb2_dir_pq_pair *port)
+{
+	u32 reg = 0;
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
+		    DLB2_CHP_DIR_CQ2VAS_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_DIR_DSBL(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ_DIR_DSBL_RST);
+
+	DLB2_BIT_SET(reg, DLB2_SYS_WB_DIR_CQ_STATE_CQ_OPT_CLR);
+
+	if (hw->ver == DLB2_HW_V2)
+		DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
+	else
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_WB_DIR_CQ_STATE(port->id.phys_id), reg);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_DIR_CQ_DEPTH(hw->ver, port->id.phys_id),
+		    DLB2_CHP_DIR_CQ_DEPTH_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(hw->ver, port->id.phys_id),
+		    DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_DIR_CQ_TMR_THRSH(hw->ver, port->id.phys_id),
+		    DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_DIR_CQ_INT_ENB(hw->ver, port->id.phys_id),
+		    DLB2_CHP_DIR_CQ_INT_ENB_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
+		    DLB2_SYS_DIR_CQ_ISR_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(hw->ver,
+						      port->id.phys_id),
+		    DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(hw->ver, port->id.phys_id),
+		    DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_DIR_CQ_WPTR(hw->ver, port->id.phys_id),
+		    DLB2_CHP_DIR_CQ_WPTR_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_DIR_TKN_CNT(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ_DIR_TKN_CNT_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
+		    DLB2_SYS_DIR_CQ_ADDR_L_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
+		    DLB2_SYS_DIR_CQ_ADDR_U_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
+		    DLB2_SYS_DIR_CQ_AT_RST);
+
+	if (hw->ver == DLB2_HW_V2)
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
+			    DLB2_SYS_DIR_CQ_AT_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_DIR_CQ_PASID(hw->ver, port->id.phys_id),
+		    DLB2_SYS_DIR_CQ_PASID_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
+		    DLB2_SYS_DIR_CQ_FMT_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
+		    DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(hw->ver, port->id.phys_id),
+		    DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
+		    DLB2_SYS_DIR_PP2VAS_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_CHP_DIR_CQ2VAS(hw->ver, port->id.phys_id),
+		    DLB2_CHP_DIR_CQ2VAS_RST);
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
+		    DLB2_SYS_DIR_PP2VDEV_RST);
+
+	if (port->id.vdev_owned) {
+		unsigned int offs;
+		u32 virt_id;
+
+		/*
+		 * DLB uses producer port address bits 17:12 to determine the
+		 * producer port ID. In Scalable IOV mode, PP accesses come
+		 * through the PF MMIO window for the physical producer port,
+		 * so for translation purposes the virtual and physical port
+		 * IDs are equal.
+		 */
+		if (hw->virt_mode == DLB2_VIRT_SRIOV)
+			virt_id = port->id.virt_id;
+		else
+			virt_id = port->id.phys_id;
+
+		offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) +
+			virt_id;
+
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_VF_DIR_VPP2PP(offs),
+			    DLB2_SYS_VF_DIR_VPP2PP_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_VF_DIR_VPP_V(offs),
+			    DLB2_SYS_VF_DIR_VPP_V_RST);
+	}
+
+	DLB2_CSR_WR(hw,
+		    DLB2_SYS_DIR_PP_V(port->id.phys_id),
+		    DLB2_SYS_DIR_PP_V_RST);
+}
+
+static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
+						 struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_dir_pq_pair *port;
+	RTE_SET_USED(iter);
+
+	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
+		__dlb2_domain_reset_dir_port_registers(hw, port);
+}
+
+static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
+						  struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_queue *queue;
+	RTE_SET_USED(iter);
+
+	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
+		unsigned int queue_id = queue->id.phys_id;
+		int i;
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(hw->ver, queue_id),
+			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(hw->ver, queue_id),
+			    DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(hw->ver, queue_id),
+			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(hw->ver, queue_id),
+			    DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_NALDB_MAX_DEPTH(hw->ver, queue_id),
+			    DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_LDB_INFL_LIM(hw->ver, queue_id),
+			    DLB2_LSP_QID_LDB_INFL_LIM_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_AQED_ACTIVE_LIM(hw->ver, queue_id),
+			    DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_ATM_DEPTH_THRSH(hw->ver, queue_id),
+			    DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_NALDB_DEPTH_THRSH(hw->ver, queue_id),
+			    DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_LDB_QID_ITS(queue_id),
+			    DLB2_SYS_LDB_QID_ITS_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_CHP_ORD_QID_SN(hw->ver, queue_id),
+			    DLB2_CHP_ORD_QID_SN_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_CHP_ORD_QID_SN_MAP(hw->ver, queue_id),
+			    DLB2_CHP_ORD_QID_SN_MAP_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_LDB_QID_V(queue_id),
+			    DLB2_SYS_LDB_QID_V_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_LDB_QID_CFG_V(queue_id),
+			    DLB2_SYS_LDB_QID_CFG_V_RST);
+
+		if (queue->sn_cfg_valid) {
+			u32 offs[2];
+
+			offs[0] = DLB2_RO_GRP_0_SLT_SHFT(hw->ver,
+							 queue->sn_slot);
+			offs[1] = DLB2_RO_GRP_1_SLT_SHFT(hw->ver,
+							 queue->sn_slot);
+
+			DLB2_CSR_WR(hw,
+				    offs[queue->sn_group],
+				    DLB2_RO_GRP_0_SLT_SHFT_RST);
+		}
+
+		for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
+			DLB2_CSR_WR(hw,
+				    DLB2_LSP_QID2CQIDIX(hw->ver, queue_id, i),
+				    DLB2_LSP_QID2CQIDIX_00_RST);
+
+			DLB2_CSR_WR(hw,
+				    DLB2_LSP_QID2CQIDIX2(hw->ver, queue_id, i),
+				    DLB2_LSP_QID2CQIDIX2_00_RST);
+
+			DLB2_CSR_WR(hw,
+				    DLB2_ATM_QID2CQIDIX(queue_id, i),
+				    DLB2_ATM_QID2CQIDIX_00_RST);
+		}
+	}
+}
+
+static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
+						  struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_dir_pq_pair *queue;
+	RTE_SET_USED(iter);
+
+	DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_DIR_MAX_DEPTH(hw->ver,
+						       queue->id.phys_id),
+			    DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(hw->ver,
+							  queue->id.phys_id),
+			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(hw->ver,
+							  queue->id.phys_id),
+			    DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_LSP_QID_DIR_DEPTH_THRSH(hw->ver,
+							 queue->id.phys_id),
+			    DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
+			    DLB2_SYS_DIR_QID_ITS_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_SYS_DIR_QID_V(queue->id.phys_id),
+			    DLB2_SYS_DIR_QID_V_RST);
+	}
+}
+
+
+
+
+
+static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
+					struct dlb2_hw_domain *domain)
+{
+	dlb2_domain_reset_ldb_port_registers(hw, domain);
+
+	dlb2_domain_reset_dir_port_registers(hw, domain);
+
+	dlb2_domain_reset_ldb_queue_registers(hw, domain);
+
+	dlb2_domain_reset_dir_queue_registers(hw, domain);
+
+	if (hw->ver == DLB2_HW_V2) {
+		DLB2_CSR_WR(hw,
+			    DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
+			    DLB2_CHP_CFG_LDB_VAS_CRD_RST);
+
+		DLB2_CSR_WR(hw,
+			    DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
+			    DLB2_CHP_CFG_DIR_VAS_CRD_RST);
+	} else
+		DLB2_CSR_WR(hw,
+			    DLB2_CHP_CFG_VAS_CRD(domain->id.phys_id),
+			    DLB2_CHP_CFG_VAS_CRD_RST);
+}
+
+static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
+					    struct dlb2_hw_domain *domain)
+{
+	struct dlb2_dir_pq_pair *tmp_dir_port;
+	struct dlb2_ldb_queue *tmp_ldb_queue;
+	struct dlb2_ldb_port *tmp_ldb_port;
+	struct dlb2_list_entry *iter1;
+	struct dlb2_list_entry *iter2;
+	struct dlb2_function_resources *rsrcs;
+	struct dlb2_dir_pq_pair *dir_port;
+	struct dlb2_ldb_queue *ldb_queue;
+	struct dlb2_ldb_port *ldb_port;
+	struct dlb2_list_head *list;
+	int ret, i;
+	RTE_SET_USED(tmp_dir_port);
+	RTE_SET_USED(tmp_ldb_queue);
+	RTE_SET_USED(tmp_ldb_port);
+	RTE_SET_USED(iter1);
+	RTE_SET_USED(iter2);
+
+	rsrcs = domain->parent_func;
+
+	/* Move the domain's ldb queues to the function's avail list */
+	list = &domain->used_ldb_queues;
+	DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
+		if (ldb_queue->sn_cfg_valid) {
+			struct dlb2_sn_group *grp;
+
+			grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
+
+			dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
+			ldb_queue->sn_cfg_valid = false;
+		}
+
+		ldb_queue->owned = false;
+		ldb_queue->num_mappings = 0;
+		ldb_queue->num_pending_additions = 0;
+
+		dlb2_list_del(&domain->used_ldb_queues,
+			      &ldb_queue->domain_list);
+		dlb2_list_add(&rsrcs->avail_ldb_queues,
+			      &ldb_queue->func_list);
+		rsrcs->num_avail_ldb_queues++;
+	}
+
+	list = &domain->avail_ldb_queues;
+	DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
+		ldb_queue->owned = false;
+
+		dlb2_list_del(&domain->avail_ldb_queues,
+			      &ldb_queue->domain_list);
+		dlb2_list_add(&rsrcs->avail_ldb_queues,
+			      &ldb_queue->func_list);
+		rsrcs->num_avail_ldb_queues++;
+	}
+
+	/* Move the domain's ldb ports to the function's avail list */
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		list = &domain->used_ldb_ports[i];
+		DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
+				       iter1, iter2) {
+			int j;
+
+			ldb_port->owned = false;
+			ldb_port->configured = false;
+			ldb_port->num_pending_removals = 0;
+			ldb_port->num_mappings = 0;
+			ldb_port->init_tkn_cnt = 0;
+			ldb_port->cq_depth = 0;
+			for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
+				ldb_port->qid_map[j].state =
+					DLB2_QUEUE_UNMAPPED;
+
+			dlb2_list_del(&domain->used_ldb_ports[i],
+				      &ldb_port->domain_list);
+			dlb2_list_add(&rsrcs->avail_ldb_ports[i],
+				      &ldb_port->func_list);
+			rsrcs->num_avail_ldb_ports[i]++;
+		}
+
+		list = &domain->avail_ldb_ports[i];
+		DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
+				       iter1, iter2) {
+			ldb_port->owned = false;
+
+			dlb2_list_del(&domain->avail_ldb_ports[i],
+				      &ldb_port->domain_list);
+			dlb2_list_add(&rsrcs->avail_ldb_ports[i],
+				      &ldb_port->func_list);
+			rsrcs->num_avail_ldb_ports[i]++;
+		}
+	}
+
+	/* Move the domain's dir ports to the function's avail list */
+	list = &domain->used_dir_pq_pairs;
+	DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
+		dir_port->owned = false;
+		dir_port->port_configured = false;
+		dir_port->init_tkn_cnt = 0;
+
+		dlb2_list_del(&domain->used_dir_pq_pairs,
+			      &dir_port->domain_list);
+
+		dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
+			      &dir_port->func_list);
+		rsrcs->num_avail_dir_pq_pairs++;
+	}
+
+	list = &domain->avail_dir_pq_pairs;
+	DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
+		dir_port->owned = false;
+
+		dlb2_list_del(&domain->avail_dir_pq_pairs,
+			      &dir_port->domain_list);
+
+		dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
+			      &dir_port->func_list);
+		rsrcs->num_avail_dir_pq_pairs++;
+	}
+
+	/* Return hist list entries to the function */
+	ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
+				    domain->hist_list_entry_base,
+				    domain->total_hist_list_entries);
+	if (ret) {
+		DLB2_HW_ERR(hw,
+			    "[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
+			    __func__);
+		return ret;
+	}
+
+	domain->total_hist_list_entries = 0;
+	domain->avail_hist_list_entries = 0;
+	domain->hist_list_entry_base = 0;
+	domain->hist_list_entry_offset = 0;
+
+	if (hw->ver == DLB2_HW_V2_5) {
+		rsrcs->num_avail_entries += domain->num_credits;
+		domain->num_credits = 0;
+	} else {
+		rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
+		domain->num_ldb_credits = 0;
+
+		rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
+		domain->num_dir_credits = 0;
+	}
+	rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
+	rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
+	domain->num_avail_aqed_entries = 0;
+	domain->num_used_aqed_entries = 0;
+
+	domain->num_pending_removals = 0;
+	domain->num_pending_additions = 0;
+	domain->configured = false;
+	domain->started = false;
+
+	/*
+	 * Move the domain out of the used_domains list and back to the
+	 * function's avail_domains list.
+	 */
+	dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
+	dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
+	rsrcs->num_avail_domains++;
+
+	return 0;
+}
+
+static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
+					    struct dlb2_hw_domain *domain,
+					    struct dlb2_ldb_queue *queue)
+{
+	struct dlb2_ldb_port *port = NULL;
+	int ret, i;
+
+	/* If a domain has LDB queues, it must have LDB ports */
+	for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+		port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i],
+					  typeof(*port));
+		if (port)
+			break;
+	}
+
+	if (port == NULL) {
+		DLB2_HW_ERR(hw,
+			    "[%s()] Internal error: No configured LDB ports\n",
+			    __func__);
+		return -EFAULT;
+	}
+
+	/* If necessary, free up a QID slot in this CQ */
+	if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+		struct dlb2_ldb_queue *mapped_queue;
+
+		mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
+
+		ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
+		if (ret)
+			return ret;
+	}
+
+	ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
+	if (ret)
+		return ret;
+
+	return dlb2_domain_drain_mapped_queues(hw, domain);
+}
+
+static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
+					     struct dlb2_hw_domain *domain)
+{
+	struct dlb2_list_entry *iter;
+	struct dlb2_ldb_queue *queue;
+	int ret;
+	RTE_SET_USED(iter);
+
+	/* If the domain hasn't been started, there's no traffic to drain */
+	if (!domain->started)
+		return 0;
+
+	/*
+	 * Pre-condition: the unattached queue must not have any outstanding
+	 * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
+	 * prior to this in dlb2_domain_drain_mapped_queues().
+	 */
+	DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
+		if (queue->num_mappings != 0 ||
+		    dlb2_ldb_queue_is_empty(hw, queue))
+			continue;
+
+		ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * dlb2_reset_domain() - reset a scheduling domain
+ * @hw: dlb2_hw handle for a particular device.
+ * @domain_id: domain ID.
+ * @vdev_req: indicates whether this request came from a vdev.
+ * @vdev_id: If vdev_req is true, this contains the vdev's ID.
+ *
+ * This function resets and frees a DLB 2.0 scheduling domain and its associated
+ * resources.
+ *
+ * Pre-condition: the driver must ensure software has stopped sending QEs
+ * through this domain's producer ports before invoking this function, or
+ * undefined behavior will result.
+ *
+ * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual
+ * device.
+ *
+ * Return:
+ * Returns 0 upon success, -1 otherwise.
+ *
+ * EINVAL - Invalid domain ID, or the domain is not configured.
+ * EFAULT - Internal error. (Possibly caused if software is the pre-condition
+ *	    is not met.)
+ * ETIMEDOUT - Hardware component didn't reset in the expected time.
+ */
+int dlb2_reset_domain(struct dlb2_hw *hw,
+		      u32 domain_id,
+		      bool vdev_req,
+		      unsigned int vdev_id)
+{
+	struct dlb2_hw_domain *domain;
+	int ret;
+
+	dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
+
+	domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
+
+	if (domain == NULL || !domain->configured)
+		return -EINVAL;
+
+	/* Disable VPPs */
+	if (vdev_req) {
+		dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
+
+		dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
+	}
+
+	/* Disable CQ interrupts */
+	dlb2_domain_disable_dir_port_interrupts(hw, domain);
+
+	dlb2_domain_disable_ldb_port_interrupts(hw, domain);
+
+	/*
+	 * For each queue owned by this domain, disable its write permissions to
+	 * cause any traffic sent to it to be dropped. Well-behaved software
+	 * should not be sending QEs at this point.
+	 */
+	dlb2_domain_disable_dir_queue_write_perms(hw, domain);
+
+	dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
+
+	/* Turn off completion tracking on all the domain's PPs. */
+	dlb2_domain_disable_ldb_seq_checks(hw, domain);
+
+	/*
+	 * Disable the LDB CQs and drain them in order to complete the map and
+	 * unmap procedures, which require zero CQ inflights and zero QID
+	 * inflights respectively.
+	 */
+	dlb2_domain_disable_ldb_cqs(hw, domain);
+
+	dlb2_domain_drain_ldb_cqs(hw, domain, false);
+
+	ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
+	if (ret)
+		return ret;
+
+	ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
+	if (ret)
+		return ret;
+
+	ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
+	if (ret)
+		return ret;
+
+	/* Re-enable the CQs in order to drain the mapped queues. */
+	dlb2_domain_enable_ldb_cqs(hw, domain);
+
+	ret = dlb2_domain_drain_mapped_queues(hw, domain);
+	if (ret)
+		return ret;
+
+	ret = dlb2_domain_drain_unmapped_queues(hw, domain);
+	if (ret)
+		return ret;
+
+	/* Done draining LDB QEs, so disable the CQs. */
+	dlb2_domain_disable_ldb_cqs(hw, domain);
+
+	dlb2_domain_drain_dir_queues(hw, domain);
+
+	/* Done draining DIR QEs, so disable the CQs. */
+	dlb2_domain_disable_dir_cqs(hw, domain);
+
+	/* Disable PPs */
+	dlb2_domain_disable_dir_producer_ports(hw, domain);
+
+	dlb2_domain_disable_ldb_producer_ports(hw, domain);
+
+	ret = dlb2_domain_verify_reset_success(hw, domain);
+	if (ret)
+		return ret;
+
+	/* Reset the QID and port state. */
+	dlb2_domain_reset_registers(hw, domain);
+
+	/* Hardware reset complete. Reset the domain's software state */
+	return dlb2_domain_reset_software_state(hw, domain);
+}
-- 
2.23.0


  parent reply	other threads:[~2021-04-13 20:17 UTC|newest]

Thread overview: 174+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-16 22:18 [dpdk-dev] [PATCH 00/25] Add Support for DLB v2.5 Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 01/25] event/dlb2: add dlb v2.5 probe Timothy McDaniel
2021-03-21  9:48   ` Jerin Jacob
2021-03-24 19:31     ` McDaniel, Timothy
2021-03-26 11:01       ` Jerin Jacob
2021-03-26 14:03         ` McDaniel, Timothy
2021-03-26 14:33           ` Jerin Jacob
2021-03-29 15:00             ` McDaniel, Timothy
2021-03-29 15:51               ` Jerin Jacob
2021-03-29 15:55                 ` McDaniel, Timothy
2021-03-30 19:35   ` [dpdk-dev] [PATCH v2 00/27] Add DLB V2.5 Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 01/27] event/dlb2: add v2.5 probe Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 02/27] event/dlb2: add v2.5 HW init Timothy McDaniel
2021-04-03 10:18       ` Jerin Jacob
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 03/27] event/dlb2: add v2.5 get_resources Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 04/27] event/dlb2: add v2.5 create sched domain Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 05/27] event/dlb2: add v2.5 domain reset Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 06/27] event/dlb2: add V2.5 create ldb queue Timothy McDaniel
2021-04-14 19:20       ` Jerin Jacob
2021-04-14 19:41         ` McDaniel, Timothy
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 07/27] event/dlb2: add v2.5 create ldb port Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 08/27] event/dlb2: add v2.5 create dir port Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 09/27] event/dlb2: add v2.5 create dir queue Timothy McDaniel
2021-04-03 10:26       ` Jerin Jacob
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 10/27] event/dlb2: add v2.5 map qid Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 11/27] event/dlb2: add v2.5 unmap queue Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 12/27] event/dlb2: add v2.5 start domain Timothy McDaniel
2021-04-14 19:23       ` Jerin Jacob
2021-04-14 19:42         ` McDaniel, Timothy
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 13/27] event/dlb2: add v2.5 credit scheme Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 14/27] event/dlb2: add v2.5 queue depth functions Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 15/27] event/dlb2: add v2.5 finish map/unmap Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 16/27] event/dlb2: add v2.5 sparse cq mode Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 17/27] event/dlb2: add v2.5 sequence number management Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 18/27] event/dlb2: consolidate resource header files into one file Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 19/27] event/dlb2: delete old dlb2_resource.c file Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 20/27] event/dlb2: move dlb_resource_new.c to dlb_resource.c Timothy McDaniel
2021-04-03 10:29       ` Jerin Jacob
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 21/27] event/dlb2: remove temporary file, dlb_hw_types.h Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 22/27] event/dlb2: move dlb2_hw_type_new.h to dlb2_hw_types.h Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 23/27] event/dlb2: delete old register map file, dlb2_regs.h Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 24/27] event/dlb2: rename dlb2_regs_new.h to dlb2_regs.h Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 25/27] event/dlb2: update xstats for v2.5 Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 26/27] doc/dlb2: update documentation " Timothy McDaniel
2021-03-30 19:35     ` [dpdk-dev] [PATCH v2 27/27] event/dlb2: Change device name to dlb_event Timothy McDaniel
2021-04-03 10:39       ` Jerin Jacob
2021-04-03  9:51     ` [dpdk-dev] [PATCH v2 00/27] Add DLB V2.5 Jerin Jacob
2021-04-13 20:14   ` [dpdk-dev] [PATCH v3 00/26] " Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 01/26] event/dlb2: add v2.5 probe Timothy McDaniel
2021-04-14 19:16       ` Jerin Jacob
2021-04-14 19:41         ` McDaniel, Timothy
2021-04-14 19:47           ` Jerin Jacob
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 02/26] event/dlb2: add v2.5 HW register definitions Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 03/26] event/dlb2: add v2.5 HW init Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 04/26] event/dlb2: add v2.5 get resources Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 05/26] event/dlb2: add v2.5 create sched domain Timothy McDaniel
2021-04-13 20:14     ` Timothy McDaniel [this message]
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 07/26] event/dlb2: add V2.5 create ldb queue Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 08/26] event/dlb2: add v2.5 create ldb port Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 09/26] event/dlb2: add v2.5 create dir port Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 10/26] event/dlb2: add v2.5 create dir queue Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 11/26] event/dlb2: add v2.5 map qid Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 12/26] event/dlb2: add v2.5 unmap queue Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 13/26] event/dlb2: add v2.5 start domain Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 14/26] event/dlb2: add v2.5 credit scheme Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 15/26] event/dlb2: add v2.5 queue depth functions Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 16/26] event/dlb2: add v2.5 finish map/unmap Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 17/26] event/dlb2: add v2.5 sparse cq mode Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 18/26] event/dlb2: add v2.5 sequence number management Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 19/26] event/dlb2: use new implementation of resource header Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 20/26] event/dlb2: use new implementation of resource file Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 21/26] event/dlb2: use new implementation of HW types header Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 22/26] event/dlb2: use new combined register map Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 23/26] event/dlb2: update xstats for v2.5 Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 24/26] doc/dlb2: update documentation " Timothy McDaniel
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 25/26] event/dlb: remove version from device name Timothy McDaniel
2021-04-14 19:31       ` Jerin Jacob
2021-04-14 19:42         ` McDaniel, Timothy
2021-04-14 19:44       ` Jerin Jacob
2021-04-14 20:33         ` Thomas Monjalon
2021-04-15  3:22           ` McDaniel, Timothy
2021-04-15  5:47           ` Jerin Jacob
2021-04-15  7:48             ` Thomas Monjalon
2021-04-15  7:56               ` Jerin Jacob
2021-04-13 20:14     ` [dpdk-dev] [PATCH v3 26/26] event/dlb: move rte config defines to runtime devargs Timothy McDaniel
2021-04-14 19:11       ` Jerin Jacob
2021-04-14 19:38         ` McDaniel, Timothy
2021-04-14 19:52           ` Jerin Jacob
2021-04-15  1:48   ` [dpdk-dev] [PATCH v4 00/27] Add DLB v2.5 Timothy McDaniel
2021-04-15  1:48     ` [dpdk-dev] [PATCH v4 01/27] event/dlb2: minor code cleanup Timothy McDaniel
2021-04-15  1:48     ` [dpdk-dev] [PATCH v4 02/27] event/dlb2: add v2.5 probe Timothy McDaniel
2021-04-29  7:09       ` Jerin Jacob
2021-04-29 13:46         ` McDaniel, Timothy
2021-04-15  1:48     ` [dpdk-dev] [PATCH v4 03/27] event/dlb2: add v2.5 HW register definitions Timothy McDaniel
2021-04-15  1:48     ` [dpdk-dev] [PATCH v4 04/27] event/dlb2: add v2.5 HW init Timothy McDaniel
2021-04-15  1:48     ` [dpdk-dev] [PATCH v4 05/27] event/dlb2: add v2.5 get resources Timothy McDaniel
2021-04-15  1:48     ` [dpdk-dev] [PATCH v4 06/27] event/dlb2: add v2.5 create sched domain Timothy McDaniel
2021-04-15  1:48     ` [dpdk-dev] [PATCH v4 07/27] event/dlb2: add v2.5 domain reset Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 08/27] event/dlb2: add v2.5 create ldb queue Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 09/27] event/dlb2: add v2.5 create ldb port Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 10/27] event/dlb2: add v2.5 create dir port Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 11/27] event/dlb2: add v2.5 create dir queue Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 12/27] event/dlb2: add v2.5 map qid Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 13/27] event/dlb2: add v2.5 unmap queue Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 14/27] event/dlb2: add v2.5 start domain Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 15/27] event/dlb2: add v2.5 credit scheme Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 16/27] event/dlb2: add v2.5 queue depth functions Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 17/27] event/dlb2: add v2.5 finish map/unmap Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 18/27] event/dlb2: add v2.5 sparse cq mode Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 19/27] event/dlb2: add v2.5 sequence number management Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 20/27] event/dlb2: use new implementation of resource header Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 21/27] event/dlb2: use new implementation of resource file Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 22/27] event/dlb2: use new implementation of HW types header Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 23/27] event/dlb2: use new combined register map Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 24/27] event/dlb2: update xstats for v2.5 Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 25/27] doc/dlb2: update documentation " Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 26/27] event/dlb: rename dlb2 driver Timothy McDaniel
2021-04-15  1:49     ` [dpdk-dev] [PATCH v4 27/27] event/dlb: move rte config defines to runtime devargs Timothy McDaniel
2021-05-01 19:03   ` [dpdk-dev] [PATCH v5 00/26] Add DLB v2.5 McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 01/26] event/dlb2: minor code cleanup McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 02/26] event/dlb2: add v2.5 probe McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 03/26] event/dlb2: add v2.5 HW register definitions McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 04/26] event/dlb2: add v2.5 HW init McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 05/26] event/dlb2: add v2.5 get resources McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 06/26] event/dlb2: add v2.5 create sched domain McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 07/26] event/dlb2: add v2.5 domain reset McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 08/26] event/dlb2: add v2.5 create ldb queue McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 09/26] event/dlb2: add v2.5 create ldb port McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 10/26] event/dlb2: add v2.5 create dir port McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 11/26] event/dlb2: add v2.5 create dir queue McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 12/26] event/dlb2: add v2.5 map qid McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 13/26] event/dlb2: add v2.5 unmap queue McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 14/26] event/dlb2: add v2.5 start domain McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 15/26] event/dlb2: add v2.5 credit scheme McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 16/26] event/dlb2: add v2.5 queue depth functions McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 17/26] event/dlb2: add v2.5 finish map/unmap McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 18/26] event/dlb2: add v2.5 sparse cq mode McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 19/26] event/dlb2: add v2.5 sequence number management McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 20/26] event/dlb2: use new implementation of resource header McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 21/26] event/dlb2: use new implementation of resource file McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 22/26] event/dlb2: use new implementation of HW types header McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 23/26] event/dlb2: use new combined register map McDaniel, Timothy
2021-05-01 19:03     ` [dpdk-dev] [PATCH v5 24/26] event/dlb2: update xstats for v2.5 McDaniel, Timothy
2021-05-01 19:04     ` [dpdk-dev] [PATCH v5 25/26] event/dlb2: move rte config defines to runtime devargs McDaniel, Timothy
2021-05-01 19:04     ` [dpdk-dev] [PATCH v5 26/26] doc/dlb2: update documentation for v2.5 McDaniel, Timothy
2021-05-04  8:28     ` [dpdk-dev] [PATCH v5 00/26] Add DLB v2.5 Jerin Jacob
2021-03-16 22:18 ` [dpdk-dev] [PATCH 02/25] event/dlb2: add DLB v2.5 probe-time hardware init Timothy McDaniel
2021-03-21 10:30   ` [dpdk-dev] [EXT] " Jerin Jacob Kollanukkaran
2021-03-26 16:37     ` McDaniel, Timothy
2021-03-16 22:18 ` [dpdk-dev] [PATCH 03/25] event/dlb2: add DLB v2.5 support to get_resources Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 04/25] event/dlb2: add DLB v2.5 support to create sched domain Timothy McDaniel
2021-04-03 10:22   ` Jerin Jacob
2021-03-16 22:18 ` [dpdk-dev] [PATCH 05/25] event/dlb2: add DLB v2.5 support to domain reset Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 06/25] event/dlb2: add DLB V2.5 support to create ldb queue Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 07/25] event/dlb2: add DLB v2.5 support to create ldb port Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 08/25] event/dlb2: add DLB v2.5 support to create dir port Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 09/25] event/dlb2: add DLB v2.5 support to create dir queue Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 10/25] event/dlb2: add DLB v2.5 support to map qid Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 11/25] event/dlb2: add DLB v2.5 support to unmap queue Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 12/25] event/dlb2: add DLB v2.5 support to start domain Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 13/25] event/dlb2: add DLB v2.5 credit scheme Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 14/25] event/dlb2: Add DLB v2.5 support to get queue depth functions Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 15/25] event/dlb2: add DLB v2.5 finish map/unmap interfaces Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 16/25] event/dlb2: add DLB v2.5 sparse cq mode Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 17/25] event/dlb2: add DLB v2.5 support to sequence number management Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 18/25] event/dlb2: consolidate dlb resource header files into one file Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 19/25] event/dlb2: delete old dlb2_resource.c file Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 20/25] event/dlb2: move dlb_resource_new.c to dlb_resource.c Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 21/25] event/dlb2: remove temporary file, dlb_hw_types.h Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 22/25] event/dlb2: move dlb2_hw_type_new.h to dlb2_hw_types.h Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 23/25] event/dlb2: delete old register map file, dlb2_regs.h Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 24/25] event/dlb2: rename dlb2_regs_new.h to dlb2_regs.h Timothy McDaniel
2021-03-16 22:18 ` [dpdk-dev] [PATCH 25/25] event/dlb2: update xstats for DLB v2.5 Timothy McDaniel
2021-03-21 10:50 ` [dpdk-dev] [PATCH 00/25] Add Support " Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1618344896-2090-7-git-send-email-timothy.mcdaniel@intel.com \
    --to=timothy.mcdaniel@intel.com \
    --cc=dev@dpdk.org \
    --cc=erik.g.carrillo@intel.com \
    --cc=gage.eads@intel.com \
    --cc=harry.van.haaren@intel.com \
    --cc=jerinj@marvell.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).