DPDK patches and discussions
 help / color / mirror / Atom feed
From: Timothy McDaniel <timothy.mcdaniel@intel.com>
Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,
	harry.van.haaren@intel.com, jerinj@marvell.com
Subject: [dpdk-dev] [PATCH v5 11/22] event/dlb: add port setup
Date: Sat, 17 Oct 2020 14:04:05 -0500	[thread overview]
Message-ID: <1602961456-17392-12-git-send-email-timothy.mcdaniel@intel.com> (raw)
In-Reply-To: <1602961456-17392-1-git-send-email-timothy.mcdaniel@intel.com>

Configure the load balanded (ldb) or directed (dir) port.
The consumer queue (CQ) and producer port (PP) are also
set up here.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb/dlb.c                  |  516 ++++++++++-
 drivers/event/dlb/dlb_iface.c            |   11 +
 drivers/event/dlb/dlb_iface.h            |   14 +
 drivers/event/dlb/pf/base/dlb_resource.c | 1436 +++++++++++++++++++++++++++++-
 drivers/event/dlb/pf/dlb_pf.c            |  206 +++++
 5 files changed, 2179 insertions(+), 4 deletions(-)

diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
index 0d420f0..704c9d3 100644
--- a/drivers/event/dlb/dlb.c
+++ b/drivers/event/dlb/dlb.c
@@ -152,6 +152,69 @@ dlb_free_qe_mem(struct dlb_port *qm_port)
 	qm_port->consume_qe = NULL;
 }
 
+static int
+dlb_init_consume_qe(struct dlb_port *qm_port, char *mz_name)
+{
+	struct dlb_cq_pop_qe *qe;
+
+	qe = rte_zmalloc(mz_name,
+			DLB_NUM_QES_PER_CACHE_LINE *
+				sizeof(struct dlb_cq_pop_qe),
+			RTE_CACHE_LINE_SIZE);
+
+	if (qe == NULL)	{
+		DLB_LOG_ERR("dlb: no memory for consume_qe\n");
+		return -ENOMEM;
+	}
+
+	qm_port->consume_qe = qe;
+
+	qe->qe_valid = 0;
+	qe->qe_frag = 0;
+	qe->qe_comp = 0;
+	qe->cq_token = 1;
+	/* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
+	 * and so on.
+	 */
+	qe->tokens = 0;	/* set at run time */
+	qe->meas_lat = 0;
+	qe->no_dec = 0;
+	/* Completion IDs are disabled */
+	qe->cmp_id = 0;
+
+	return 0;
+}
+
+static int
+dlb_init_qe_mem(struct dlb_port *qm_port, char *mz_name)
+{
+	int ret, sz;
+
+	sz = DLB_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb_enqueue_qe);
+
+	qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
+
+	if (qm_port->qe4 == NULL) {
+		DLB_LOG_ERR("dlb: no qe4 memory\n");
+		ret = -ENOMEM;
+		goto error_exit;
+	}
+
+	ret = dlb_init_consume_qe(qm_port, mz_name);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: dlb_init_consume_qe ret=%d\n", ret);
+		goto error_exit;
+	}
+
+	return 0;
+
+error_exit:
+
+	dlb_free_qe_mem(qm_port);
+
+	return ret;
+}
+
 /* Wrapper for string to int conversion. Substituted for atoi(...), which is
  * unsafe.
  */
@@ -657,6 +720,329 @@ dlb_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
 	queue_conf->priority = 0;
 }
 
+static int
+dlb_hw_create_ldb_port(struct dlb_eventdev *dlb,
+		       struct dlb_eventdev_port *ev_port,
+		       uint32_t dequeue_depth,
+		       uint32_t cq_depth,
+		       uint32_t enqueue_depth,
+		       uint16_t rsvd_tokens,
+		       bool use_rsvd_token_scheme)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_create_ldb_port_args cfg = {0};
+	struct dlb_cmd_response response = {0};
+	int ret;
+	struct dlb_port *qm_port = NULL;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	uint32_t qm_port_id;
+
+	if (handle == NULL)
+		return -EINVAL;
+
+	if (cq_depth < DLB_MIN_LDB_CQ_DEPTH) {
+		DLB_LOG_ERR("dlb: invalid cq_depth, must be %d-%d\n",
+			DLB_MIN_LDB_CQ_DEPTH, DLB_MAX_INPUT_QUEUE_DEPTH);
+		return -EINVAL;
+	}
+
+	if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
+		DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
+			    DLB_MIN_ENQUEUE_DEPTH);
+		return -EINVAL;
+	}
+
+	rte_spinlock_lock(&handle->resource_lock);
+
+	cfg.response = (uintptr_t)&response;
+
+	/* We round up to the next power of 2 if necessary */
+	cfg.cq_depth = rte_align32pow2(cq_depth);
+	cfg.cq_depth_threshold = rsvd_tokens;
+
+	cfg.cq_history_list_size = DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
+
+	/* User controls the LDB high watermark via enqueue depth. The DIR high
+	 * watermark is equal, unless the directed credit pool is too small.
+	 */
+	cfg.ldb_credit_high_watermark = enqueue_depth;
+
+	/* If there are no directed ports, the kernel driver will ignore this
+	 * port's directed credit settings. Don't use enqueue_depth if it would
+	 * require more directed credits than are available.
+	 */
+	cfg.dir_credit_high_watermark =
+		RTE_MIN(enqueue_depth,
+			handle->cfg.num_dir_credits / dlb->num_ports);
+
+	cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
+	cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
+
+	cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
+	cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
+
+	/* Per QM values */
+
+	cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
+	cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
+
+	ret = dlb_iface_ldb_port_create(handle, &cfg, dlb->poll_mode);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: dlb_ldb_port_create error, ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		goto error_exit;
+	}
+
+	qm_port_id = response.id;
+
+	DLB_LOG_DBG("dlb: ev_port %d uses qm LB port %d <<<<<\n",
+		    ev_port->id, qm_port_id);
+
+	qm_port = &ev_port->qm_port;
+	qm_port->ev_port = ev_port; /* back ptr */
+	qm_port->dlb = dlb; /* back ptr */
+
+	/*
+	 * Allocate and init local qe struct(s).
+	 * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
+	 */
+
+	snprintf(mz_name, sizeof(mz_name), "ldb_port%d",
+		 ev_port->id);
+
+	ret = dlb_init_qe_mem(qm_port, mz_name);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
+		goto error_exit;
+	}
+
+	qm_port->pp_mmio_base = DLB_LDB_PP_BASE + PAGE_SIZE * qm_port_id;
+	qm_port->id = qm_port_id;
+
+	/* The credit window is one high water mark of QEs */
+	qm_port->ldb_pushcount_at_credit_expiry = 0;
+	qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
+	/* The credit window is one high water mark of QEs */
+	qm_port->dir_pushcount_at_credit_expiry = 0;
+	qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
+	qm_port->cq_depth = cfg.cq_depth;
+	/* CQs with depth < 8 use an 8-entry queue, but withhold credits so
+	 * the effective depth is smaller.
+	 */
+	qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
+	qm_port->cq_idx = 0;
+	qm_port->cq_idx_unmasked = 0;
+	if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
+		qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
+	else
+		qm_port->cq_depth_mask = qm_port->cq_depth - 1;
+
+	qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
+	/* starting value of gen bit - it toggles at wrap time */
+	qm_port->gen_bit = 1;
+
+	qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
+	qm_port->cq_rsvd_token_deficit = rsvd_tokens;
+	qm_port->int_armed = false;
+
+	/* Save off for later use in info and lookup APIs. */
+	qm_port->qid_mappings = &dlb->qm_ldb_to_ev_queue_id[0];
+
+	qm_port->dequeue_depth = dequeue_depth;
+
+	qm_port->owed_tokens = 0;
+	qm_port->issued_releases = 0;
+
+	/* update state */
+	qm_port->state = PORT_STARTED; /* enabled at create time */
+	qm_port->config_state = DLB_CONFIGURED;
+
+	qm_port->dir_credits = cfg.dir_credit_high_watermark;
+	qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
+
+	DLB_LOG_DBG("dlb: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
+		    qm_port_id,
+		    cq_depth,
+		    qm_port->ldb_credits,
+		    qm_port->dir_credits);
+
+	rte_spinlock_unlock(&handle->resource_lock);
+
+	return 0;
+
+error_exit:
+	if (qm_port) {
+		dlb_free_qe_mem(qm_port);
+		qm_port->pp_mmio_base = 0;
+	}
+
+	rte_spinlock_unlock(&handle->resource_lock);
+
+	DLB_LOG_ERR("dlb: create ldb port failed!\n");
+
+	return ret;
+}
+
+static int
+dlb_hw_create_dir_port(struct dlb_eventdev *dlb,
+		       struct dlb_eventdev_port *ev_port,
+		       uint32_t dequeue_depth,
+		       uint32_t cq_depth,
+		       uint32_t enqueue_depth,
+		       uint16_t rsvd_tokens,
+		       bool use_rsvd_token_scheme)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_create_dir_port_args cfg = {0};
+	struct dlb_cmd_response response = {0};
+	int ret;
+	struct dlb_port *qm_port = NULL;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	uint32_t qm_port_id;
+
+	if (dlb == NULL || handle == NULL)
+		return -EINVAL;
+
+	if (cq_depth < DLB_MIN_DIR_CQ_DEPTH) {
+		DLB_LOG_ERR("dlb: invalid cq_depth, must be at least %d\n",
+			    DLB_MIN_DIR_CQ_DEPTH);
+		return -EINVAL;
+	}
+
+	if (enqueue_depth < DLB_MIN_ENQUEUE_DEPTH) {
+		DLB_LOG_ERR("dlb: invalid enqueue_depth, must be at least %d\n",
+			    DLB_MIN_ENQUEUE_DEPTH);
+		return -EINVAL;
+	}
+
+	rte_spinlock_lock(&handle->resource_lock);
+
+	/* Directed queues are configured at link time. */
+	cfg.queue_id = -1;
+
+	cfg.response = (uintptr_t)&response;
+
+	/* We round up to the next power of 2 if necessary */
+	cfg.cq_depth = rte_align32pow2(cq_depth);
+	cfg.cq_depth_threshold = rsvd_tokens;
+
+	/* User controls the LDB high watermark via enqueue depth. The DIR high
+	 * watermark is equal, unless the directed credit pool is too small.
+	 */
+	cfg.ldb_credit_high_watermark = enqueue_depth;
+
+	/* Don't use enqueue_depth if it would require more directed credits
+	 * than are available.
+	 */
+	cfg.dir_credit_high_watermark =
+		RTE_MIN(enqueue_depth,
+			handle->cfg.num_dir_credits / dlb->num_ports);
+
+	cfg.ldb_credit_quantum = cfg.ldb_credit_high_watermark / 2;
+	cfg.ldb_credit_low_watermark = RTE_MIN(16, cfg.ldb_credit_quantum);
+
+	cfg.dir_credit_quantum = cfg.dir_credit_high_watermark / 2;
+	cfg.dir_credit_low_watermark = RTE_MIN(16, cfg.dir_credit_quantum);
+
+	/* Per QM values */
+
+	cfg.ldb_credit_pool_id = handle->cfg.ldb_credit_pool_id;
+	cfg.dir_credit_pool_id = handle->cfg.dir_credit_pool_id;
+
+	ret = dlb_iface_dir_port_create(handle, &cfg, dlb->poll_mode);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: dlb_dir_port_create error, ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		goto error_exit;
+	}
+
+	qm_port_id = response.id;
+
+	DLB_LOG_DBG("dlb: ev_port %d uses qm DIR port %d <<<<<\n",
+		    ev_port->id, qm_port_id);
+
+	qm_port = &ev_port->qm_port;
+	qm_port->ev_port = ev_port; /* back ptr */
+	qm_port->dlb = dlb;  /* back ptr */
+
+	/*
+	 * Init local qe struct(s).
+	 * Note: MOVDIR64 requires the enqueue QE to be aligned
+	 */
+
+	snprintf(mz_name, sizeof(mz_name), "dir_port%d",
+		 ev_port->id);
+
+	ret = dlb_init_qe_mem(qm_port, mz_name);
+
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: init_qe_mem failed, ret=%d\n", ret);
+		goto error_exit;
+	}
+
+	qm_port->pp_mmio_base = DLB_DIR_PP_BASE + PAGE_SIZE * qm_port_id;
+	qm_port->id = qm_port_id;
+
+	/* The credit window is one high water mark of QEs */
+	qm_port->ldb_pushcount_at_credit_expiry = 0;
+	qm_port->cached_ldb_credits = cfg.ldb_credit_high_watermark;
+	/* The credit window is one high water mark of QEs */
+	qm_port->dir_pushcount_at_credit_expiry = 0;
+	qm_port->cached_dir_credits = cfg.dir_credit_high_watermark;
+	qm_port->cq_depth = cfg.cq_depth;
+	qm_port->cq_idx = 0;
+	qm_port->cq_idx_unmasked = 0;
+	if (dlb->poll_mode == DLB_CQ_POLL_MODE_SPARSE)
+		qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
+	else
+		qm_port->cq_depth_mask = cfg.cq_depth - 1;
+
+	qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
+	/* starting value of gen bit - it toggles at wrap time */
+	qm_port->gen_bit = 1;
+
+	qm_port->use_rsvd_token_scheme = use_rsvd_token_scheme;
+	qm_port->cq_rsvd_token_deficit = rsvd_tokens;
+	qm_port->int_armed = false;
+
+	/* Save off for later use in info and lookup APIs. */
+	qm_port->qid_mappings = &dlb->qm_dir_to_ev_queue_id[0];
+
+	qm_port->dequeue_depth = dequeue_depth;
+
+	qm_port->owed_tokens = 0;
+	qm_port->issued_releases = 0;
+
+	/* update state */
+	qm_port->state = PORT_STARTED; /* enabled at create time */
+	qm_port->config_state = DLB_CONFIGURED;
+
+	qm_port->dir_credits = cfg.dir_credit_high_watermark;
+	qm_port->ldb_credits = cfg.ldb_credit_high_watermark;
+
+	DLB_LOG_DBG("dlb: created dir port %d, depth = %d cr=%d,%d\n",
+		    qm_port_id,
+		    cq_depth,
+		    cfg.dir_credit_high_watermark,
+		    cfg.ldb_credit_high_watermark);
+
+	rte_spinlock_unlock(&handle->resource_lock);
+
+	return 0;
+
+error_exit:
+	if (qm_port) {
+		qm_port->pp_mmio_base = 0;
+		dlb_free_qe_mem(qm_port);
+	}
+
+	rte_spinlock_unlock(&handle->resource_lock);
+
+	DLB_LOG_ERR("dlb: create dir port failed!\n");
+
+	return ret;
+}
+
 static int32_t
 dlb_hw_create_ldb_queue(struct dlb_eventdev *dlb,
 			struct dlb_queue *queue,
@@ -909,7 +1295,7 @@ dlb_eventdev_queue_setup(struct rte_eventdev *dev,
 	struct dlb_eventdev_queue *ev_queue;
 	int ret;
 
-	if (!queue_conf)
+	if (queue_conf == NULL)
 		return -EINVAL;
 
 	if (ev_qid >= dlb->num_queues)
@@ -949,6 +1335,133 @@ dlb_eventdev_queue_setup(struct rte_eventdev *dev,
 	return ret;
 }
 
+static void
+dlb_port_link_teardown(struct dlb_eventdev *dlb,
+		       struct dlb_eventdev_port *ev_port)
+{
+	struct dlb_eventdev_queue *ev_queue;
+	int i;
+
+	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+		if (!ev_port->link[i].valid)
+			continue;
+
+		ev_queue = &dlb->ev_queues[ev_port->link[i].queue_id];
+
+		ev_port->link[i].valid = false;
+		ev_port->num_links--;
+		ev_queue->num_links--;
+	}
+}
+
+static int
+dlb_eventdev_port_setup(struct rte_eventdev *dev,
+			uint8_t ev_port_id,
+			const struct rte_event_port_conf *port_conf)
+{
+	struct dlb_eventdev *dlb;
+	struct dlb_eventdev_port *ev_port;
+	bool use_rsvd_token_scheme;
+	uint32_t adj_cq_depth;
+	uint16_t rsvd_tokens;
+	int ret;
+
+	if (dev == NULL || port_conf == NULL) {
+		DLB_LOG_ERR("Null parameter\n");
+		return -EINVAL;
+	}
+
+	dlb = dlb_pmd_priv(dev);
+
+	if (ev_port_id >= DLB_MAX_NUM_PORTS)
+		return -EINVAL;
+
+	if (port_conf->dequeue_depth >
+		evdev_dlb_default_info.max_event_port_dequeue_depth ||
+	    port_conf->enqueue_depth >
+		evdev_dlb_default_info.max_event_port_enqueue_depth)
+		return -EINVAL;
+
+	ev_port = &dlb->ev_ports[ev_port_id];
+	/* configured? */
+	if (ev_port->setup_done) {
+		DLB_LOG_ERR("evport %d is already configured\n", ev_port_id);
+		return -EINVAL;
+	}
+
+	/* The reserved token interrupt arming scheme requires that one or more
+	 * CQ tokens be reserved by the PMD. This limits the amount of CQ space
+	 * usable by the DLB, so in order to give an *effective* CQ depth equal
+	 * to the user-requested value, we double CQ depth and reserve half of
+	 * its tokens. If the user requests the max CQ depth (256) then we
+	 * cannot double it, so we reserve one token and give an effective
+	 * depth of 255 entries.
+	 */
+	use_rsvd_token_scheme = true;
+	rsvd_tokens = 1;
+	adj_cq_depth = port_conf->dequeue_depth;
+
+	if (use_rsvd_token_scheme && adj_cq_depth < 256) {
+		rsvd_tokens = adj_cq_depth;
+		adj_cq_depth *= 2;
+	}
+
+	ev_port->qm_port.is_directed = port_conf->event_port_cfg &
+		RTE_EVENT_PORT_CFG_SINGLE_LINK;
+
+	if (!ev_port->qm_port.is_directed) {
+		ret = dlb_hw_create_ldb_port(dlb,
+					     ev_port,
+					     port_conf->dequeue_depth,
+					     adj_cq_depth,
+					     port_conf->enqueue_depth,
+					     rsvd_tokens,
+					     use_rsvd_token_scheme);
+		if (ret < 0) {
+			DLB_LOG_ERR("Failed to create the lB port ve portId=%d\n",
+				    ev_port_id);
+			return ret;
+		}
+	} else {
+		ret = dlb_hw_create_dir_port(dlb,
+					     ev_port,
+					     port_conf->dequeue_depth,
+					     adj_cq_depth,
+					     port_conf->enqueue_depth,
+					     rsvd_tokens,
+					     use_rsvd_token_scheme);
+		if (ret < 0) {
+			DLB_LOG_ERR("Failed to create the DIR port\n");
+			return ret;
+		}
+	}
+
+	/* Save off port config for reconfig */
+	dlb->ev_ports[ev_port_id].conf = *port_conf;
+
+	dlb->ev_ports[ev_port_id].id = ev_port_id;
+	dlb->ev_ports[ev_port_id].enq_configured = true;
+	dlb->ev_ports[ev_port_id].setup_done = true;
+	dlb->ev_ports[ev_port_id].inflight_max =
+		port_conf->new_event_threshold;
+	dlb->ev_ports[ev_port_id].implicit_release =
+		!(port_conf->event_port_cfg &
+		  RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
+	dlb->ev_ports[ev_port_id].outstanding_releases = 0;
+	dlb->ev_ports[ev_port_id].inflight_credits = 0;
+	dlb->ev_ports[ev_port_id].credit_update_quanta =
+		RTE_LIBRTE_PMD_DLB_SW_CREDIT_QUANTA;
+	dlb->ev_ports[ev_port_id].dlb = dlb; /* reverse link */
+
+	/* Tear down pre-existing port->queue links */
+	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
+		dlb_port_link_teardown(dlb, &dlb->ev_ports[ev_port_id]);
+
+	dev->data->ports[ev_port_id] = &dlb->ev_ports[ev_port_id];
+
+	return 0;
+}
+
 static int
 set_dev_id(const char *key __rte_unused,
 	   const char *value,
@@ -1028,6 +1541,7 @@ dlb_entry_points_init(struct rte_eventdev *dev)
 		.queue_def_conf   = dlb_eventdev_queue_default_conf_get,
 		.port_def_conf    = dlb_eventdev_port_default_conf_get,
 		.queue_setup      = dlb_eventdev_queue_setup,
+		.port_setup       = dlb_eventdev_port_setup,
 		.dump             = dlb_eventdev_dump,
 		.xstats_get       = dlb_eventdev_xstats_get,
 		.xstats_get_names = dlb_eventdev_xstats_get_names,
diff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c
index 219f79e..fbbf9d7 100644
--- a/drivers/event/dlb/dlb_iface.c
+++ b/drivers/event/dlb/dlb_iface.c
@@ -33,9 +33,20 @@ int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,
 int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
 					struct dlb_create_dir_pool_args *cfg);
 
+int (*dlb_iface_dir_queue_create)(struct dlb_hw_dev *handle,
+				  struct dlb_create_dir_queue_args *cfg);
+
 int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
 				  struct dlb_create_ldb_queue_args *cfg);
 
+int (*dlb_iface_ldb_port_create)(struct dlb_hw_dev *handle,
+				 struct dlb_create_ldb_port_args *cfg,
+				 enum dlb_cq_poll_modes poll_mode);
+
+int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle,
+				 struct dlb_create_dir_port_args *cfg,
+				 enum dlb_cq_poll_modes poll_mode);
+
 int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
 				  enum dlb_cq_poll_modes *mode);
 
diff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h
index af1416d..d578185 100644
--- a/drivers/event/dlb/dlb_iface.h
+++ b/drivers/event/dlb/dlb_iface.h
@@ -35,6 +35,20 @@ extern int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
 extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
 				  struct dlb_create_ldb_queue_args *cfg);
 
+extern int (*dlb_iface_dir_queue_create)(struct dlb_hw_dev *handle,
+				  struct dlb_create_dir_queue_args *cfg);
+
+extern int (*dlb_iface_ldb_port_create)(struct dlb_hw_dev *handle,
+					struct dlb_create_ldb_port_args *cfg,
+					enum dlb_cq_poll_modes poll_mode);
+
+extern int (*dlb_iface_dir_port_create)(struct dlb_hw_dev *handle,
+					struct dlb_create_dir_port_args *cfg,
+					enum dlb_cq_poll_modes poll_mode);
+
+extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
+				  struct dlb_create_ldb_queue_args *cfg);
+
 extern int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
 					 enum dlb_cq_poll_modes *mode);
 
diff --git a/drivers/event/dlb/pf/base/dlb_resource.c b/drivers/event/dlb/pf/base/dlb_resource.c
index ec567f6..c5e7a20 100644
--- a/drivers/event/dlb/pf/base/dlb_resource.c
+++ b/drivers/event/dlb/pf/base/dlb_resource.c
@@ -4453,7 +4453,7 @@ dlb_verify_create_ldb_queue_args(struct dlb_hw *hw,
 
 	domain = dlb_get_domain_from_id(hw, domain_id);
 
-	if (!domain) {
+	if (domain == NULL) {
 		resp->status = DLB_ST_INVALID_DOMAIN_ID;
 		return -1;
 	}
@@ -4555,7 +4555,7 @@ int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
 		return -EINVAL;
 
 	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (!domain) {
+	if (domain == NULL) {
 		DLB_HW_ERR(hw,
 			   "[%s():%d] Internal error: domain not found\n",
 			   __func__, __LINE__);
@@ -4565,7 +4565,7 @@ int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
 	queue = DLB_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
 
 	/* Verification should catch this. */
-	if (!queue) {
+	if (queue == NULL) {
 		DLB_HW_ERR(hw,
 			   "[%s():%d] Internal error: no available ldb queues\n",
 			   __func__, __LINE__);
@@ -4598,3 +4598,1433 @@ int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
 
 	return 0;
 }
+
+
+static void
+dlb_log_create_dir_queue_args(struct dlb_hw *hw,
+			      u32 domain_id,
+			      struct dlb_create_dir_queue_args *args)
+{
+	DLB_HW_INFO(hw, "DLB create directed queue arguments:\n");
+	DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
+	DLB_HW_INFO(hw, "\tPort ID:   %d\n", args->port_id);
+}
+
+static struct dlb_dir_pq_pair *
+dlb_get_domain_used_dir_pq(u32 id, struct dlb_domain *domain)
+{
+	struct dlb_list_entry *iter;
+	struct dlb_dir_pq_pair *port;
+	RTE_SET_USED(iter);
+
+	if (id >= DLB_MAX_NUM_DIR_PORTS)
+		return NULL;
+
+	DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
+		if (port->id == id)
+			return port;
+
+	return NULL;
+}
+
+static int
+dlb_verify_create_dir_queue_args(struct dlb_hw *hw,
+				 u32 domain_id,
+				 struct dlb_create_dir_queue_args *args,
+				 struct dlb_cmd_response *resp)
+{
+	struct dlb_domain *domain;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+
+	if (domain == NULL) {
+		resp->status = DLB_ST_INVALID_DOMAIN_ID;
+		return -1;
+	}
+
+	if (!domain->configured) {
+		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
+		return -1;
+	}
+
+	if (domain->started) {
+		resp->status = DLB_ST_DOMAIN_STARTED;
+		return -1;
+	}
+
+	/* If the user claims the port is already configured, validate the port
+	 * ID, its domain, and whether the port is configured.
+	 */
+	if (args->port_id != -1) {
+		struct dlb_dir_pq_pair *port;
+
+		port = dlb_get_domain_used_dir_pq(args->port_id, domain);
+
+		if (port  == NULL || port->domain_id != domain->id ||
+		    !port->port_configured) {
+			resp->status = DLB_ST_INVALID_PORT_ID;
+			return -1;
+		}
+	}
+
+	/* If the queue's port is not configured, validate that a free
+	 * port-queue pair is available.
+	 */
+	if (args->port_id == -1 &&
+	    dlb_list_empty(&domain->avail_dir_pq_pairs)) {
+		resp->status = DLB_ST_DIR_QUEUES_UNAVAILABLE;
+		return -1;
+	}
+
+	return 0;
+}
+
+static void dlb_configure_dir_queue(struct dlb_hw *hw,
+				    struct dlb_domain *domain,
+				    struct dlb_dir_pq_pair *queue)
+{
+	union dlb_sys_dir_vasqid_v r0 = { {0} };
+	union dlb_sys_dir_qid_v r1 = { {0} };
+	unsigned int offs;
+
+	/* QID write permissions are turned on when the domain is started */
+	r0.field.vasqid_v = 0;
+
+	offs = (domain->id * DLB_MAX_NUM_DIR_PORTS) + queue->id;
+
+	DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val);
+
+	r1.field.qid_v = 1;
+
+	DLB_CSR_WR(hw, DLB_SYS_DIR_QID_V(queue->id), r1.val);
+
+	queue->queue_configured = true;
+}
+
+/**
+ * dlb_hw_create_dir_queue() - Allocate and initialize a DLB DIR queue.
+ * @hw:	  Contains the current state of the DLB hardware.
+ * @args: User-provided arguments.
+ * @resp: Response to user.
+ *
+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
+ * satisfy a request, resp->status will be set accordingly.
+ */
+int dlb_hw_create_dir_queue(struct dlb_hw *hw,
+			    u32 domain_id,
+			    struct dlb_create_dir_queue_args *args,
+			    struct dlb_cmd_response *resp)
+{
+	struct dlb_dir_pq_pair *queue;
+	struct dlb_domain *domain;
+
+	dlb_log_create_dir_queue_args(hw, domain_id, args);
+
+	/* Verify that hardware resources are available before attempting to
+	 * satisfy the request. This simplifies the error unwinding code.
+	 */
+	if (dlb_verify_create_dir_queue_args(hw, domain_id, args, resp))
+		return -EINVAL;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+	if (domain == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: domain not found\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	if (args->port_id != -1)
+		queue = dlb_get_domain_used_dir_pq(args->port_id, domain);
+	else
+		queue = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
+					  typeof(*queue));
+
+	/* Verification should catch this. */
+	if (queue == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: no available dir queues\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	dlb_configure_dir_queue(hw, domain, queue);
+
+	/* Configuration succeeded, so move the resource from the 'avail' to
+	 * the 'used' list (if it's not already there).
+	 */
+	if (args->port_id == -1) {
+		dlb_list_del(&domain->avail_dir_pq_pairs, &queue->domain_list);
+
+		dlb_list_add(&domain->used_dir_pq_pairs, &queue->domain_list);
+	}
+
+	resp->status = 0;
+
+	resp->id = queue->id;
+
+	return 0;
+}
+
+static void dlb_log_create_ldb_port_args(struct dlb_hw *hw,
+					 u32 domain_id,
+					 u64 pop_count_dma_base,
+					 u64 cq_dma_base,
+					 struct dlb_create_ldb_port_args *args)
+{
+	DLB_HW_INFO(hw, "DLB create load-balanced port arguments:\n");
+	DLB_HW_INFO(hw, "\tDomain ID:                 %d\n",
+		    domain_id);
+	DLB_HW_INFO(hw, "\tLDB credit pool ID:        %d\n",
+		    args->ldb_credit_pool_id);
+	DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n",
+		    args->ldb_credit_high_watermark);
+	DLB_HW_INFO(hw, "\tLDB credit low watermark:  %d\n",
+		    args->ldb_credit_low_watermark);
+	DLB_HW_INFO(hw, "\tLDB credit quantum:        %d\n",
+		    args->ldb_credit_quantum);
+	DLB_HW_INFO(hw, "\tDIR credit pool ID:        %d\n",
+		    args->dir_credit_pool_id);
+	DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n",
+		    args->dir_credit_high_watermark);
+	DLB_HW_INFO(hw, "\tDIR credit low watermark:  %d\n",
+		    args->dir_credit_low_watermark);
+	DLB_HW_INFO(hw, "\tDIR credit quantum:        %d\n",
+		    args->dir_credit_quantum);
+	DLB_HW_INFO(hw, "\tpop_count_address:         0x%"PRIx64"\n",
+		    pop_count_dma_base);
+	DLB_HW_INFO(hw, "\tCQ depth:                  %d\n",
+		    args->cq_depth);
+	DLB_HW_INFO(hw, "\tCQ hist list size:         %d\n",
+		    args->cq_history_list_size);
+	DLB_HW_INFO(hw, "\tCQ base address:           0x%"PRIx64"\n",
+		    cq_dma_base);
+}
+
+static struct dlb_credit_pool *
+dlb_get_domain_ldb_pool(u32 id, struct dlb_domain *domain)
+{
+	struct dlb_list_entry *iter;
+	struct dlb_credit_pool *pool;
+	RTE_SET_USED(iter);
+
+	if (id >= DLB_MAX_NUM_LDB_CREDIT_POOLS)
+		return NULL;
+
+	DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
+		if (pool->id == id)
+			return pool;
+
+	return NULL;
+}
+
+static struct dlb_credit_pool *
+dlb_get_domain_dir_pool(u32 id, struct dlb_domain *domain)
+{
+	struct dlb_list_entry *iter;
+	struct dlb_credit_pool *pool;
+	RTE_SET_USED(iter);
+
+	if (id >= DLB_MAX_NUM_DIR_CREDIT_POOLS)
+		return NULL;
+
+	DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
+		if (pool->id == id)
+			return pool;
+
+	return NULL;
+}
+
+static int
+dlb_verify_create_ldb_port_args(struct dlb_hw *hw,
+				u32 domain_id,
+				u64 pop_count_dma_base,
+				u64 cq_dma_base,
+				struct dlb_create_ldb_port_args *args,
+				struct dlb_cmd_response *resp)
+{
+	struct dlb_domain *domain;
+	struct dlb_credit_pool *pool;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+
+	if (domain == NULL) {
+		resp->status = DLB_ST_INVALID_DOMAIN_ID;
+		return -1;
+	}
+
+	if (!domain->configured) {
+		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
+		return -1;
+	}
+
+	if (domain->started) {
+		resp->status = DLB_ST_DOMAIN_STARTED;
+		return -1;
+	}
+
+	if (dlb_list_empty(&domain->avail_ldb_ports)) {
+		resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
+		return -1;
+	}
+
+	/* If the scheduling domain has no LDB queues, we configure the
+	 * hardware to not supply the port with any LDB credits. In that
+	 * case, ignore the LDB credit arguments.
+	 */
+	if (!dlb_list_empty(&domain->used_ldb_queues) ||
+	    !dlb_list_empty(&domain->avail_ldb_queues)) {
+		pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
+					       domain);
+
+		if (pool  == NULL || !pool->configured ||
+		    pool->domain_id != domain->id) {
+			resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID;
+			return -1;
+		}
+
+		if (args->ldb_credit_high_watermark > pool->avail_credits) {
+			resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
+			return -1;
+		}
+
+		if (args->ldb_credit_low_watermark >=
+		    args->ldb_credit_high_watermark) {
+			resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK;
+			return -1;
+		}
+
+		if (args->ldb_credit_quantum >=
+		    args->ldb_credit_high_watermark) {
+			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
+			return -1;
+		}
+
+		if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
+			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
+			return -1;
+		}
+	}
+
+	/* Likewise, if the scheduling domain has no DIR queues, we configure
+	 * the hardware to not supply the port with any DIR credits. In that
+	 * case, ignore the DIR credit arguments.
+	 */
+	if (!dlb_list_empty(&domain->used_dir_pq_pairs) ||
+	    !dlb_list_empty(&domain->avail_dir_pq_pairs)) {
+		pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
+					       domain);
+
+		if (pool  == NULL || !pool->configured ||
+		    pool->domain_id != domain->id) {
+			resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID;
+			return -1;
+		}
+
+		if (args->dir_credit_high_watermark > pool->avail_credits) {
+			resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
+			return -1;
+		}
+
+		if (args->dir_credit_low_watermark >=
+		    args->dir_credit_high_watermark) {
+			resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK;
+			return -1;
+		}
+
+		if (args->dir_credit_quantum >=
+		    args->dir_credit_high_watermark) {
+			resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
+			return -1;
+		}
+
+		if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
+			resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
+			return -1;
+		}
+	}
+
+	/* Check cache-line alignment */
+	if ((pop_count_dma_base & 0x3F) != 0) {
+		resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR;
+		return -1;
+	}
+
+	if ((cq_dma_base & 0x3F) != 0) {
+		resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR;
+		return -1;
+	}
+
+	if (args->cq_depth != 1 &&
+	    args->cq_depth != 2 &&
+	    args->cq_depth != 4 &&
+	    args->cq_depth != 8 &&
+	    args->cq_depth != 16 &&
+	    args->cq_depth != 32 &&
+	    args->cq_depth != 64 &&
+	    args->cq_depth != 128 &&
+	    args->cq_depth != 256 &&
+	    args->cq_depth != 512 &&
+	    args->cq_depth != 1024) {
+		resp->status = DLB_ST_INVALID_CQ_DEPTH;
+		return -1;
+	}
+
+	/* The history list size must be >= 1 */
+	if (!args->cq_history_list_size) {
+		resp->status = DLB_ST_INVALID_HIST_LIST_DEPTH;
+		return -1;
+	}
+
+	if (args->cq_history_list_size > domain->avail_hist_list_entries) {
+		resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
+		return -1;
+	}
+
+	return 0;
+}
+
+static void dlb_ldb_pool_update_credit_count(struct dlb_hw *hw,
+					     u32 pool_id,
+					     u32 count)
+{
+	hw->rsrcs.ldb_credit_pools[pool_id].avail_credits -= count;
+}
+
+static void dlb_dir_pool_update_credit_count(struct dlb_hw *hw,
+					     u32 pool_id,
+					     u32 count)
+{
+	hw->rsrcs.dir_credit_pools[pool_id].avail_credits -= count;
+}
+
+static int dlb_ldb_port_configure_pp(struct dlb_hw *hw,
+				     struct dlb_domain *domain,
+				     struct dlb_ldb_port *port,
+				     struct dlb_create_ldb_port_args *args)
+{
+	union dlb_sys_ldb_pp2ldbpool r0 = { {0} };
+	union dlb_sys_ldb_pp2dirpool r1 = { {0} };
+	union dlb_sys_ldb_pp2vf_pf r2 = { {0} };
+	union dlb_sys_ldb_pp2vas r3 = { {0} };
+	union dlb_sys_ldb_pp_v r4 = { {0} };
+	union dlb_chp_ldb_pp_ldb_crd_hwm r6 = { {0} };
+	union dlb_chp_ldb_pp_dir_crd_hwm r7 = { {0} };
+	union dlb_chp_ldb_pp_ldb_crd_lwm r8 = { {0} };
+	union dlb_chp_ldb_pp_dir_crd_lwm r9 = { {0} };
+	union dlb_chp_ldb_pp_ldb_min_crd_qnt r10 = { {0} };
+	union dlb_chp_ldb_pp_dir_min_crd_qnt r11 = { {0} };
+	union dlb_chp_ldb_pp_ldb_crd_cnt r12 = { {0} };
+	union dlb_chp_ldb_pp_dir_crd_cnt r13 = { {0} };
+	union dlb_chp_ldb_ldb_pp2pool r14 = { {0} };
+	union dlb_chp_ldb_dir_pp2pool r15 = { {0} };
+	union dlb_chp_ldb_pp_crd_req_state r16 = { {0} };
+	union dlb_chp_ldb_pp_ldb_push_ptr r17 = { {0} };
+	union dlb_chp_ldb_pp_dir_push_ptr r18 = { {0} };
+
+	struct dlb_credit_pool *ldb_pool = NULL;
+	struct dlb_credit_pool *dir_pool = NULL;
+
+	if (port->ldb_pool_used) {
+		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
+						   domain);
+		if (ldb_pool == NULL) {
+			DLB_HW_ERR(hw,
+				   "[%s()] Internal error: port validation failed\n",
+				   __func__);
+			return -EFAULT;
+		}
+	}
+
+	if (port->dir_pool_used) {
+		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
+						   domain);
+		if (dir_pool == NULL) {
+			DLB_HW_ERR(hw,
+				   "[%s()] Internal error: port validation failed\n",
+				   __func__);
+			return -EFAULT;
+		}
+	}
+
+	r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0;
+
+	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2LDBPOOL(port->id), r0.val);
+
+	r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0;
+
+	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2DIRPOOL(port->id), r1.val);
+
+	r2.field.is_pf = 1;
+
+	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VF_PF(port->id), r2.val);
+
+	r3.field.vas = domain->id;
+
+	DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VAS(port->id), r3.val);
+
+	r6.field.hwm = args->ldb_credit_high_watermark;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_HWM(port->id), r6.val);
+
+	r7.field.hwm = args->dir_credit_high_watermark;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_HWM(port->id), r7.val);
+
+	r8.field.lwm = args->ldb_credit_low_watermark;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_LWM(port->id), r8.val);
+
+	r9.field.lwm = args->dir_credit_low_watermark;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_LWM(port->id), r9.val);
+
+	r10.field.quanta = args->ldb_credit_quantum;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id),
+		   r10.val);
+
+	r11.field.quanta = args->dir_credit_quantum;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id),
+		   r11.val);
+
+	r12.field.count = args->ldb_credit_high_watermark;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_CNT(port->id), r12.val);
+
+	r13.field.count = args->dir_credit_high_watermark;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_CNT(port->id), r13.val);
+
+	r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_LDB_PP2POOL(port->id), r14.val);
+
+	r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_DIR_PP2POOL(port->id), r15.val);
+
+	r16.field.no_pp_credit_update = 0;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id), r16.val);
+
+	r17.field.push_pointer = 0;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_PUSH_PTR(port->id), r17.val);
+
+	r18.field.push_pointer = 0;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_PUSH_PTR(port->id), r18.val);
+
+	r4.field.pp_v = 1;
+
+	DLB_CSR_WR(hw,
+		   DLB_SYS_LDB_PP_V(port->id),
+		   r4.val);
+
+	return 0;
+}
+
+static int dlb_ldb_port_configure_cq(struct dlb_hw *hw,
+				     struct dlb_ldb_port *port,
+				     u64 pop_count_dma_base,
+				     u64 cq_dma_base,
+				     struct dlb_create_ldb_port_args *args)
+{
+	int i;
+
+	union dlb_sys_ldb_cq_addr_l r0 = { {0} };
+	union dlb_sys_ldb_cq_addr_u r1 = { {0} };
+	union dlb_sys_ldb_cq2vf_pf r2 = { {0} };
+	union dlb_chp_ldb_cq_tkn_depth_sel r3 = { {0} };
+	union dlb_chp_hist_list_lim r4 = { {0} };
+	union dlb_chp_hist_list_base r5 = { {0} };
+	union dlb_lsp_cq_ldb_infl_lim r6 = { {0} };
+	union dlb_lsp_cq2priov r7 = { {0} };
+	union dlb_chp_hist_list_push_ptr r8 = { {0} };
+	union dlb_chp_hist_list_pop_ptr r9 = { {0} };
+	union dlb_lsp_cq_ldb_tkn_depth_sel r10 = { {0} };
+	union dlb_sys_ldb_pp_addr_l r11 = { {0} };
+	union dlb_sys_ldb_pp_addr_u r12 = { {0} };
+
+	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
+	r0.field.addr_l = cq_dma_base >> 6;
+
+	DLB_CSR_WR(hw,
+		   DLB_SYS_LDB_CQ_ADDR_L(port->id),
+		   r0.val);
+
+	r1.field.addr_u = cq_dma_base >> 32;
+
+	DLB_CSR_WR(hw,
+		   DLB_SYS_LDB_CQ_ADDR_U(port->id),
+		   r1.val);
+
+	r2.field.is_pf = 1;
+
+	DLB_CSR_WR(hw,
+		   DLB_SYS_LDB_CQ2VF_PF(port->id),
+		   r2.val);
+
+	if (args->cq_depth <= 8) {
+		r3.field.token_depth_select = 1;
+	} else if (args->cq_depth == 16) {
+		r3.field.token_depth_select = 2;
+	} else if (args->cq_depth == 32) {
+		r3.field.token_depth_select = 3;
+	} else if (args->cq_depth == 64) {
+		r3.field.token_depth_select = 4;
+	} else if (args->cq_depth == 128) {
+		r3.field.token_depth_select = 5;
+	} else if (args->cq_depth == 256) {
+		r3.field.token_depth_select = 6;
+	} else if (args->cq_depth == 512) {
+		r3.field.token_depth_select = 7;
+	} else if (args->cq_depth == 1024) {
+		r3.field.token_depth_select = 8;
+	} else {
+		DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id),
+		   r3.val);
+
+	r10.field.token_depth_select = r3.field.token_depth_select;
+	r10.field.ignore_depth = 0;
+	/* TDT algorithm: DLB must be able to write CQs with depth < 4 */
+	r10.field.enab_shallow_cq = 1;
+
+	DLB_CSR_WR(hw,
+		   DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id),
+		   r10.val);
+
+	/* To support CQs with depth less than 8, program the token count
+	 * register with a non-zero initial value. Operations such as domain
+	 * reset must take this initial value into account when quiescing the
+	 * CQ.
+	 */
+	port->init_tkn_cnt = 0;
+
+	if (args->cq_depth < 8) {
+		union dlb_lsp_cq_ldb_tkn_cnt r12 = { {0} };
+
+		port->init_tkn_cnt = 8 - args->cq_depth;
+
+		r12.field.token_count = port->init_tkn_cnt;
+
+		DLB_CSR_WR(hw,
+			   DLB_LSP_CQ_LDB_TKN_CNT(port->id),
+			   r12.val);
+	}
+
+	r4.field.limit = port->hist_list_entry_limit - 1;
+
+	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_LIM(port->id), r4.val);
+
+	r5.field.base = port->hist_list_entry_base;
+
+	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_BASE(port->id), r5.val);
+
+	r8.field.push_ptr = r5.field.base;
+	r8.field.generation = 0;
+
+	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_PUSH_PTR(port->id), r8.val);
+
+	r9.field.pop_ptr = r5.field.base;
+	r9.field.generation = 0;
+
+	DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_POP_PTR(port->id), r9.val);
+
+	/* The inflight limit sets a cap on the number of QEs for which this CQ
+	 * can owe completions at one time.
+	 */
+	r6.field.limit = args->cq_history_list_size;
+
+	DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_INFL_LIM(port->id), r6.val);
+
+	/* Disable the port's QID mappings */
+	r7.field.v = 0;
+
+	DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port->id), r7.val);
+
+	/* Two cache lines (128B) are dedicated for the port's pop counts */
+	r11.field.addr_l = pop_count_dma_base >> 7;
+
+	DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_L(port->id), r11.val);
+
+	r12.field.addr_u = pop_count_dma_base >> 32;
+
+	DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_U(port->id), r12.val);
+
+	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++)
+		port->qid_map[i].state = DLB_QUEUE_UNMAPPED;
+
+	return 0;
+}
+
+static void dlb_update_ldb_arb_threshold(struct dlb_hw *hw)
+{
+	union dlb_lsp_ctrl_config_0 r0 = { {0} };
+
+	/* From the hardware spec:
+	 * "The optimal value for ldb_arb_threshold is in the region of {8 *
+	 * #CQs}. It is expected therefore that the PF will change this value
+	 * dynamically as the number of active ports changes."
+	 */
+	r0.val = DLB_CSR_RD(hw, DLB_LSP_CTRL_CONFIG_0);
+
+	r0.field.ldb_arb_threshold = hw->pf.num_enabled_ldb_ports * 8;
+	r0.field.ldb_arb_ignore_empty = 1;
+	r0.field.ldb_arb_mode = 1;
+
+	DLB_CSR_WR(hw, DLB_LSP_CTRL_CONFIG_0, r0.val);
+
+	dlb_flush_csr(hw);
+}
+
+static int dlb_configure_ldb_port(struct dlb_hw *hw,
+				  struct dlb_domain *domain,
+				  struct dlb_ldb_port *port,
+				  u64 pop_count_dma_base,
+				  u64 cq_dma_base,
+				  struct dlb_create_ldb_port_args *args)
+{
+	struct dlb_credit_pool *ldb_pool, *dir_pool;
+	int ret;
+
+	port->hist_list_entry_base = domain->hist_list_entry_base +
+				     domain->hist_list_entry_offset;
+	port->hist_list_entry_limit = port->hist_list_entry_base +
+				      args->cq_history_list_size;
+
+	domain->hist_list_entry_offset += args->cq_history_list_size;
+	domain->avail_hist_list_entries -= args->cq_history_list_size;
+
+	port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) ||
+			      !dlb_list_empty(&domain->avail_ldb_queues);
+	port->dir_pool_used = !dlb_list_empty(&domain->used_dir_pq_pairs) ||
+			      !dlb_list_empty(&domain->avail_dir_pq_pairs);
+
+	if (port->ldb_pool_used) {
+		u32 cnt = args->ldb_credit_high_watermark;
+
+		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
+						   domain);
+		if (ldb_pool == NULL) {
+			DLB_HW_ERR(hw,
+				   "[%s()] Internal error: port validation failed\n",
+				   __func__);
+			return -EFAULT;
+		}
+
+		dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt);
+	} else {
+		args->ldb_credit_high_watermark = 0;
+		args->ldb_credit_low_watermark = 0;
+		args->ldb_credit_quantum = 0;
+	}
+
+	if (port->dir_pool_used) {
+		u32 cnt = args->dir_credit_high_watermark;
+
+		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
+						   domain);
+		if (dir_pool == NULL) {
+			DLB_HW_ERR(hw,
+				   "[%s()] Internal error: port validation failed\n",
+				   __func__);
+			return -EFAULT;
+		}
+
+		dlb_dir_pool_update_credit_count(hw, dir_pool->id, cnt);
+	} else {
+		args->dir_credit_high_watermark = 0;
+		args->dir_credit_low_watermark = 0;
+		args->dir_credit_quantum = 0;
+	}
+
+	ret = dlb_ldb_port_configure_cq(hw,
+					port,
+					pop_count_dma_base,
+					cq_dma_base,
+					args);
+	if (ret < 0)
+		return ret;
+
+	ret = dlb_ldb_port_configure_pp(hw, domain, port, args);
+	if (ret < 0)
+		return ret;
+
+	dlb_ldb_port_cq_enable(hw, port);
+
+	port->num_mappings = 0;
+
+	port->enabled = true;
+
+	hw->pf.num_enabled_ldb_ports++;
+
+	dlb_update_ldb_arb_threshold(hw);
+
+	port->configured = true;
+
+	return 0;
+}
+
+/**
+ * dlb_hw_create_ldb_port() - Allocate and initialize a load-balanced port and
+ *	its resources.
+ * @hw:	  Contains the current state of the DLB hardware.
+ * @args: User-provided arguments.
+ * @resp: Response to user.
+ *
+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
+ * satisfy a request, resp->status will be set accordingly.
+ */
+int dlb_hw_create_ldb_port(struct dlb_hw *hw,
+			   u32 domain_id,
+			   struct dlb_create_ldb_port_args *args,
+			   u64 pop_count_dma_base,
+			   u64 cq_dma_base,
+			   struct dlb_cmd_response *resp)
+{
+	struct dlb_ldb_port *port;
+	struct dlb_domain *domain;
+	int ret;
+
+	dlb_log_create_ldb_port_args(hw,
+				     domain_id,
+				     pop_count_dma_base,
+				     cq_dma_base,
+				     args);
+
+	/* Verify that hardware resources are available before attempting to
+	 * satisfy the request. This simplifies the error unwinding code.
+	 */
+	if (dlb_verify_create_ldb_port_args(hw,
+					    domain_id,
+					    pop_count_dma_base,
+					    cq_dma_base,
+					    args,
+					    resp))
+		return -EINVAL;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+	if (domain == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: domain not found\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	port = DLB_DOM_LIST_HEAD(domain->avail_ldb_ports, typeof(*port));
+
+	/* Verification should catch this. */
+	if (port == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: no available ldb ports\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	if (port->configured) {
+		DLB_HW_ERR(hw,
+			   "[%s()] Internal error: avail_ldb_ports contains configured ports.\n",
+			   __func__);
+		return -EFAULT;
+	}
+
+	ret = dlb_configure_ldb_port(hw,
+				     domain,
+				     port,
+				     pop_count_dma_base,
+				     cq_dma_base,
+				     args);
+	if (ret < 0)
+		return ret;
+
+	/* Configuration succeeded, so move the resource from the 'avail' to
+	 * the 'used' list.
+	 */
+	dlb_list_del(&domain->avail_ldb_ports, &port->domain_list);
+
+	dlb_list_add(&domain->used_ldb_ports, &port->domain_list);
+
+	resp->status = 0;
+	resp->id = port->id;
+
+	return 0;
+}
+
+static void dlb_log_create_dir_port_args(struct dlb_hw *hw,
+					 u32 domain_id,
+					 u64 pop_count_dma_base,
+					 u64 cq_dma_base,
+					 struct dlb_create_dir_port_args *args)
+{
+	DLB_HW_INFO(hw, "DLB create directed port arguments:\n");
+	DLB_HW_INFO(hw, "\tDomain ID:                 %d\n",
+		    domain_id);
+	DLB_HW_INFO(hw, "\tLDB credit pool ID:        %d\n",
+		    args->ldb_credit_pool_id);
+	DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n",
+		    args->ldb_credit_high_watermark);
+	DLB_HW_INFO(hw, "\tLDB credit low watermark:  %d\n",
+		    args->ldb_credit_low_watermark);
+	DLB_HW_INFO(hw, "\tLDB credit quantum:        %d\n",
+		    args->ldb_credit_quantum);
+	DLB_HW_INFO(hw, "\tDIR credit pool ID:        %d\n",
+		    args->dir_credit_pool_id);
+	DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n",
+		    args->dir_credit_high_watermark);
+	DLB_HW_INFO(hw, "\tDIR credit low watermark:  %d\n",
+		    args->dir_credit_low_watermark);
+	DLB_HW_INFO(hw, "\tDIR credit quantum:        %d\n",
+		    args->dir_credit_quantum);
+	DLB_HW_INFO(hw, "\tpop_count_address:         0x%"PRIx64"\n",
+		    pop_count_dma_base);
+	DLB_HW_INFO(hw, "\tCQ depth:                  %d\n",
+		    args->cq_depth);
+	DLB_HW_INFO(hw, "\tCQ base address:           0x%"PRIx64"\n",
+		    cq_dma_base);
+}
+
+static int
+dlb_verify_create_dir_port_args(struct dlb_hw *hw,
+				u32 domain_id,
+				u64 pop_count_dma_base,
+				u64 cq_dma_base,
+				struct dlb_create_dir_port_args *args,
+				struct dlb_cmd_response *resp)
+{
+	struct dlb_domain *domain;
+	struct dlb_credit_pool *pool;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+
+	if (domain == NULL) {
+		resp->status = DLB_ST_INVALID_DOMAIN_ID;
+		return -1;
+	}
+
+	if (!domain->configured) {
+		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
+		return -1;
+	}
+
+	if (domain->started) {
+		resp->status = DLB_ST_DOMAIN_STARTED;
+		return -1;
+	}
+
+	/* If the user claims the queue is already configured, validate
+	 * the queue ID, its domain, and whether the queue is configured.
+	 */
+	if (args->queue_id != -1) {
+		struct dlb_dir_pq_pair *queue;
+
+		queue = dlb_get_domain_used_dir_pq(args->queue_id,
+						   domain);
+
+		if (queue  == NULL || queue->domain_id != domain->id ||
+		    !queue->queue_configured) {
+			resp->status = DLB_ST_INVALID_DIR_QUEUE_ID;
+			return -1;
+		}
+	}
+
+	/* If the port's queue is not configured, validate that a free
+	 * port-queue pair is available.
+	 */
+	if (args->queue_id == -1 &&
+	    dlb_list_empty(&domain->avail_dir_pq_pairs)) {
+		resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
+		return -1;
+	}
+
+	/* If the scheduling domain has no LDB queues, we configure the
+	 * hardware to not supply the port with any LDB credits. In that
+	 * case, ignore the LDB credit arguments.
+	 */
+	if (!dlb_list_empty(&domain->used_ldb_queues) ||
+	    !dlb_list_empty(&domain->avail_ldb_queues)) {
+		pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
+					       domain);
+
+		if (pool  == NULL || !pool->configured ||
+		    pool->domain_id != domain->id) {
+			resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID;
+			return -1;
+		}
+
+		if (args->ldb_credit_high_watermark > pool->avail_credits) {
+			resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
+			return -1;
+		}
+
+		if (args->ldb_credit_low_watermark >=
+		    args->ldb_credit_high_watermark) {
+			resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK;
+			return -1;
+		}
+
+		if (args->ldb_credit_quantum >=
+		    args->ldb_credit_high_watermark) {
+			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
+			return -1;
+		}
+
+		if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
+			resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
+			return -1;
+		}
+	}
+
+	pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
+				       domain);
+
+	if (pool  == NULL || !pool->configured ||
+	    pool->domain_id != domain->id) {
+		resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID;
+		return -1;
+	}
+
+	if (args->dir_credit_high_watermark > pool->avail_credits) {
+		resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
+		return -1;
+	}
+
+	if (args->dir_credit_low_watermark >= args->dir_credit_high_watermark) {
+		resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK;
+		return -1;
+	}
+
+	if (args->dir_credit_quantum >= args->dir_credit_high_watermark) {
+		resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
+		return -1;
+	}
+
+	if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
+		resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
+		return -1;
+	}
+
+	/* Check cache-line alignment */
+	if ((pop_count_dma_base & 0x3F) != 0) {
+		resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR;
+		return -1;
+	}
+
+	if ((cq_dma_base & 0x3F) != 0) {
+		resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR;
+		return -1;
+	}
+
+	if (args->cq_depth != 8 &&
+	    args->cq_depth != 16 &&
+	    args->cq_depth != 32 &&
+	    args->cq_depth != 64 &&
+	    args->cq_depth != 128 &&
+	    args->cq_depth != 256 &&
+	    args->cq_depth != 512 &&
+	    args->cq_depth != 1024) {
+		resp->status = DLB_ST_INVALID_CQ_DEPTH;
+		return -1;
+	}
+
+	return 0;
+}
+
+static int dlb_dir_port_configure_pp(struct dlb_hw *hw,
+				     struct dlb_domain *domain,
+				     struct dlb_dir_pq_pair *port,
+				     struct dlb_create_dir_port_args *args)
+{
+	union dlb_sys_dir_pp2ldbpool r0 = { {0} };
+	union dlb_sys_dir_pp2dirpool r1 = { {0} };
+	union dlb_sys_dir_pp2vf_pf r2 = { {0} };
+	union dlb_sys_dir_pp2vas r3 = { {0} };
+	union dlb_sys_dir_pp_v r4 = { {0} };
+	union dlb_chp_dir_pp_ldb_crd_hwm r6 = { {0} };
+	union dlb_chp_dir_pp_dir_crd_hwm r7 = { {0} };
+	union dlb_chp_dir_pp_ldb_crd_lwm r8 = { {0} };
+	union dlb_chp_dir_pp_dir_crd_lwm r9 = { {0} };
+	union dlb_chp_dir_pp_ldb_min_crd_qnt r10 = { {0} };
+	union dlb_chp_dir_pp_dir_min_crd_qnt r11 = { {0} };
+	union dlb_chp_dir_pp_ldb_crd_cnt r12 = { {0} };
+	union dlb_chp_dir_pp_dir_crd_cnt r13 = { {0} };
+	union dlb_chp_dir_ldb_pp2pool r14 = { {0} };
+	union dlb_chp_dir_dir_pp2pool r15 = { {0} };
+	union dlb_chp_dir_pp_crd_req_state r16 = { {0} };
+	union dlb_chp_dir_pp_ldb_push_ptr r17 = { {0} };
+	union dlb_chp_dir_pp_dir_push_ptr r18 = { {0} };
+
+	struct dlb_credit_pool *ldb_pool = NULL;
+	struct dlb_credit_pool *dir_pool = NULL;
+
+	if (port->ldb_pool_used) {
+		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
+						   domain);
+		if (ldb_pool == NULL) {
+			DLB_HW_ERR(hw,
+				   "[%s()] Internal error: port validation failed\n",
+				   __func__);
+			return -EFAULT;
+		}
+	}
+
+	if (port->dir_pool_used) {
+		dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
+						   domain);
+		if (dir_pool == NULL) {
+			DLB_HW_ERR(hw,
+				   "[%s()] Internal error: port validation failed\n",
+				   __func__);
+			return -EFAULT;
+		}
+	}
+
+	r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0;
+
+	DLB_CSR_WR(hw,
+		   DLB_SYS_DIR_PP2LDBPOOL(port->id),
+		   r0.val);
+
+	r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0;
+
+	DLB_CSR_WR(hw,
+		   DLB_SYS_DIR_PP2DIRPOOL(port->id),
+		   r1.val);
+
+	r2.field.is_pf = 1;
+	r2.field.is_hw_dsi = 0;
+
+	DLB_CSR_WR(hw,
+		   DLB_SYS_DIR_PP2VF_PF(port->id),
+		   r2.val);
+
+	r3.field.vas = domain->id;
+
+	DLB_CSR_WR(hw,
+		   DLB_SYS_DIR_PP2VAS(port->id),
+		   r3.val);
+
+	r6.field.hwm = args->ldb_credit_high_watermark;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_PP_LDB_CRD_HWM(port->id),
+		   r6.val);
+
+	r7.field.hwm = args->dir_credit_high_watermark;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_PP_DIR_CRD_HWM(port->id),
+		   r7.val);
+
+	r8.field.lwm = args->ldb_credit_low_watermark;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_PP_LDB_CRD_LWM(port->id),
+		   r8.val);
+
+	r9.field.lwm = args->dir_credit_low_watermark;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_PP_DIR_CRD_LWM(port->id),
+		   r9.val);
+
+	r10.field.quanta = args->ldb_credit_quantum;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id),
+		   r10.val);
+
+	r11.field.quanta = args->dir_credit_quantum;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id),
+		   r11.val);
+
+	r12.field.count = args->ldb_credit_high_watermark;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_PP_LDB_CRD_CNT(port->id),
+		   r12.val);
+
+	r13.field.count = args->dir_credit_high_watermark;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_PP_DIR_CRD_CNT(port->id),
+		   r13.val);
+
+	r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_LDB_PP2POOL(port->id),
+		   r14.val);
+
+	r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_DIR_PP2POOL(port->id),
+		   r15.val);
+
+	r16.field.no_pp_credit_update = 0;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
+		   r16.val);
+
+	r17.field.push_pointer = 0;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_PP_LDB_PUSH_PTR(port->id),
+		   r17.val);
+
+	r18.field.push_pointer = 0;
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_PP_DIR_PUSH_PTR(port->id),
+		   r18.val);
+
+	r4.field.pp_v = 1;
+	r4.field.mb_dm = 0;
+
+	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_V(port->id), r4.val);
+
+	return 0;
+}
+
+static int dlb_dir_port_configure_cq(struct dlb_hw *hw,
+				     struct dlb_dir_pq_pair *port,
+				     u64 pop_count_dma_base,
+				     u64 cq_dma_base,
+				     struct dlb_create_dir_port_args *args)
+{
+	union dlb_sys_dir_cq_addr_l r0 = { {0} };
+	union dlb_sys_dir_cq_addr_u r1 = { {0} };
+	union dlb_sys_dir_cq2vf_pf r2 = { {0} };
+	union dlb_chp_dir_cq_tkn_depth_sel r3 = { {0} };
+	union dlb_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} };
+	union dlb_sys_dir_pp_addr_l r5 = { {0} };
+	union dlb_sys_dir_pp_addr_u r6 = { {0} };
+
+	/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
+	r0.field.addr_l = cq_dma_base >> 6;
+
+	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_L(port->id), r0.val);
+
+	r1.field.addr_u = cq_dma_base >> 32;
+
+	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_U(port->id), r1.val);
+
+	r2.field.is_pf = 1;
+
+	DLB_CSR_WR(hw, DLB_SYS_DIR_CQ2VF_PF(port->id), r2.val);
+
+	if (args->cq_depth == 8) {
+		r3.field.token_depth_select = 1;
+	} else if (args->cq_depth == 16) {
+		r3.field.token_depth_select = 2;
+	} else if (args->cq_depth == 32) {
+		r3.field.token_depth_select = 3;
+	} else if (args->cq_depth == 64) {
+		r3.field.token_depth_select = 4;
+	} else if (args->cq_depth == 128) {
+		r3.field.token_depth_select = 5;
+	} else if (args->cq_depth == 256) {
+		r3.field.token_depth_select = 6;
+	} else if (args->cq_depth == 512) {
+		r3.field.token_depth_select = 7;
+	} else if (args->cq_depth == 1024) {
+		r3.field.token_depth_select = 8;
+	} else {
+		DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	DLB_CSR_WR(hw,
+		   DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id),
+		   r3.val);
+
+	r4.field.token_depth_select = r3.field.token_depth_select;
+	r4.field.disable_wb_opt = 0;
+
+	DLB_CSR_WR(hw,
+		   DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id),
+		   r4.val);
+
+	/* Two cache lines (128B) are dedicated for the port's pop counts */
+	r5.field.addr_l = pop_count_dma_base >> 7;
+
+	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_L(port->id), r5.val);
+
+	r6.field.addr_u = pop_count_dma_base >> 32;
+
+	DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_U(port->id), r6.val);
+
+	return 0;
+}
+
+static int dlb_configure_dir_port(struct dlb_hw *hw,
+				  struct dlb_domain *domain,
+				  struct dlb_dir_pq_pair *port,
+				  u64 pop_count_dma_base,
+				  u64 cq_dma_base,
+				  struct dlb_create_dir_port_args *args)
+{
+	struct dlb_credit_pool *ldb_pool, *dir_pool;
+	int ret;
+
+	port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) ||
+			      !dlb_list_empty(&domain->avail_ldb_queues);
+
+	/* Each directed port has a directed queue, hence this port requires
+	 * directed credits.
+	 */
+	port->dir_pool_used = true;
+
+	if (port->ldb_pool_used) {
+		u32 cnt = args->ldb_credit_high_watermark;
+
+		ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
+						   domain);
+		if (ldb_pool == NULL) {
+			DLB_HW_ERR(hw,
+				   "[%s()] Internal error: port validation failed\n",
+				   __func__);
+			return -EFAULT;
+		}
+
+		dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt);
+	} else {
+		args->ldb_credit_high_watermark = 0;
+		args->ldb_credit_low_watermark = 0;
+		args->ldb_credit_quantum = 0;
+	}
+
+	dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id, domain);
+	if (dir_pool == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s()] Internal error: port validation failed\n",
+			   __func__);
+		return -EFAULT;
+	}
+
+	dlb_dir_pool_update_credit_count(hw,
+					 dir_pool->id,
+					 args->dir_credit_high_watermark);
+
+	ret = dlb_dir_port_configure_cq(hw,
+					port,
+					pop_count_dma_base,
+					cq_dma_base,
+					args);
+
+	if (ret < 0)
+		return ret;
+
+	ret = dlb_dir_port_configure_pp(hw, domain, port, args);
+	if (ret < 0)
+		return ret;
+
+	dlb_dir_port_cq_enable(hw, port);
+
+	port->enabled = true;
+
+	port->port_configured = true;
+
+	return 0;
+}
+
+/**
+ * dlb_hw_create_dir_port() - Allocate and initialize a DLB directed port and
+ *	queue. The port/queue pair have the same ID and name.
+ * @hw:	  Contains the current state of the DLB hardware.
+ * @args: User-provided arguments.
+ * @resp: Response to user.
+ *
+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
+ * satisfy a request, resp->status will be set accordingly.
+ */
+int dlb_hw_create_dir_port(struct dlb_hw *hw,
+			   u32 domain_id,
+			   struct dlb_create_dir_port_args *args,
+			   u64 pop_count_dma_base,
+			   u64 cq_dma_base,
+			   struct dlb_cmd_response *resp)
+{
+	struct dlb_dir_pq_pair *port;
+	struct dlb_domain *domain;
+	int ret;
+
+	dlb_log_create_dir_port_args(hw,
+				     domain_id,
+				     pop_count_dma_base,
+				     cq_dma_base,
+				     args);
+
+	/* Verify that hardware resources are available before attempting to
+	 * satisfy the request. This simplifies the error unwinding code.
+	 */
+	if (dlb_verify_create_dir_port_args(hw,
+					    domain_id,
+					    pop_count_dma_base,
+					    cq_dma_base,
+					    args,
+					    resp))
+		return -EINVAL;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+	if (domain == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: domain not found\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	if (args->queue_id != -1)
+		port = dlb_get_domain_used_dir_pq(args->queue_id,
+						  domain);
+	else
+		port = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
+					 typeof(*port));
+
+	/* Verification should catch this. */
+	if (port == NULL) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: no available dir ports\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	ret = dlb_configure_dir_port(hw,
+				     domain,
+				     port,
+				     pop_count_dma_base,
+				     cq_dma_base,
+				     args);
+	if (ret < 0)
+		return ret;
+
+	/* Configuration succeeded, so move the resource from the 'avail' to
+	 * the 'used' list (if it's not already there).
+	 */
+	if (args->queue_id == -1) {
+		dlb_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
+
+		dlb_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
+	}
+
+	resp->status = 0;
+	resp->id = port->id;
+
+	return 0;
+}
+
diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c
index fffb88b..cc91db1 100644
--- a/drivers/event/dlb/pf/dlb_pf.c
+++ b/drivers/event/dlb/pf/dlb_pf.c
@@ -221,6 +221,209 @@ dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle,
 }
 
 static int
+dlb_pf_dir_queue_create(struct dlb_hw_dev *handle,
+			struct dlb_create_dir_queue_args *cfg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_create_dir_queue(&dlb_dev->hw,
+				      handle->domain_id,
+				      cfg,
+				      &response);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static void *
+dlb_alloc_coherent_aligned(const struct rte_memzone **mz, rte_iova_t *phys,
+			   size_t size, int align)
+{
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	uint32_t core_id = rte_lcore_id();
+	unsigned int socket_id;
+
+	snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb_port_mem_%lx",
+		 (unsigned long)rte_get_timer_cycles());
+	if (core_id == (unsigned int)LCORE_ID_ANY)
+		core_id = rte_get_master_lcore();
+	socket_id = rte_lcore_to_socket_id(core_id);
+	*mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
+					 RTE_MEMZONE_IOVA_CONTIG, align);
+	if (*mz == NULL) {
+		DLB_LOG_ERR("Unable to allocate DMA memory of size %zu bytes\n",
+			    size);
+		*phys = 0;
+		return NULL;
+	}
+	*phys = (*mz)->iova;
+	return (*mz)->addr;
+}
+
+static int
+dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
+		       struct dlb_create_ldb_port_args *cfg,
+		       enum dlb_cq_poll_modes poll_mode)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+	uint8_t *port_base;
+	const struct rte_memzone *mz;
+	int alloc_sz, qe_sz, cq_alloc_depth;
+	rte_iova_t pp_dma_base;
+	rte_iova_t pc_dma_base;
+	rte_iova_t cq_dma_base;
+	int is_dir = false;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	if (poll_mode == DLB_CQ_POLL_MODE_STD)
+		qe_sz = sizeof(struct dlb_dequeue_qe);
+	else
+		qe_sz = RTE_CACHE_LINE_SIZE;
+
+	/* The hardware always uses a CQ depth of at least
+	 * DLB_MIN_HARDWARE_CQ_DEPTH, even though from the user
+	 * perspective we support a depth as low as 1 for LDB ports.
+	 */
+	cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB_MIN_HARDWARE_CQ_DEPTH);
+
+	/* Calculate the port memory required, including two cache lines for
+	 * credit pop counts. Round up to the nearest cache line.
+	 */
+	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cq_alloc_depth * qe_sz;
+	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
+
+	port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
+					       alloc_sz, PAGE_SIZE);
+	if (port_base == NULL)
+		return -ENOMEM;
+
+	/* Lock the page in memory */
+	ret = rte_mem_lock_page(port_base);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
+		goto create_port_err;
+	}
+
+	memset(port_base, 0, alloc_sz);
+	cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
+
+	ret = dlb_hw_create_ldb_port(&dlb_dev->hw,
+				     handle->domain_id,
+				     cfg,
+				     pc_dma_base,
+				     cq_dma_base,
+				     &response);
+	if (ret)
+		goto create_port_err;
+
+	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
+	dlb_port[response.id][DLB_LDB].pp_addr =
+		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
+
+	dlb_port[response.id][DLB_LDB].cq_base =
+		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
+
+	dlb_port[response.id][DLB_LDB].ldb_popcount =
+		(void *)(uintptr_t)port_base;
+	dlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t)
+		(port_base + RTE_CACHE_LINE_SIZE);
+	dlb_port[response.id][DLB_LDB].mz = mz;
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+create_port_err:
+
+	return ret;
+}
+
+static int
+dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
+		       struct dlb_create_dir_port_args *cfg,
+		       enum dlb_cq_poll_modes poll_mode)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+	uint8_t *port_base;
+	const struct rte_memzone *mz;
+	int alloc_sz, qe_sz;
+	rte_iova_t pp_dma_base;
+	rte_iova_t pc_dma_base;
+	rte_iova_t cq_dma_base;
+	int is_dir = true;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	if (poll_mode == DLB_CQ_POLL_MODE_STD)
+		qe_sz = sizeof(struct dlb_dequeue_qe);
+	else
+		qe_sz = RTE_CACHE_LINE_SIZE;
+
+	/* Calculate the port memory required, including two cache lines for
+	 * credit pop counts. Round up to the nearest cache line.
+	 */
+	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;
+	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
+
+	port_base = dlb_alloc_coherent_aligned(&mz, &pc_dma_base,
+					       alloc_sz, PAGE_SIZE);
+	if (port_base == NULL)
+		return -ENOMEM;
+
+	/* Lock the page in memory */
+	ret = rte_mem_lock_page(port_base);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb pf pmd could not lock page for device i/o\n");
+		goto create_port_err;
+	}
+
+	memset(port_base, 0, alloc_sz);
+	cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
+
+	ret = dlb_hw_create_dir_port(&dlb_dev->hw,
+				     handle->domain_id,
+				     cfg,
+				     pc_dma_base,
+				     cq_dma_base,
+				     &response);
+	if (ret)
+		goto create_port_err;
+
+	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
+	dlb_port[response.id][DLB_DIR].pp_addr =
+		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
+
+	dlb_port[response.id][DLB_DIR].cq_base =
+		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
+
+	dlb_port[response.id][DLB_DIR].ldb_popcount =
+		(void *)(uintptr_t)port_base;
+	dlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t)
+		(port_base + RTE_CACHE_LINE_SIZE);
+	dlb_port[response.id][DLB_DIR].mz = mz;
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+create_port_err:
+
+	return ret;
+}
+
+static int
 dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle,
 			 struct dlb_get_sn_allocation_args *args)
 {
@@ -287,6 +490,9 @@ dlb_pf_iface_fn_ptrs_init(void)
 	dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;
 	dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;
 	dlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create;
+	dlb_iface_dir_queue_create = dlb_pf_dir_queue_create;
+	dlb_iface_ldb_port_create = dlb_pf_ldb_port_create;
+	dlb_iface_dir_port_create = dlb_pf_dir_port_create;
 	dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
 	dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
 	dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;
-- 
2.6.4


  parent reply	other threads:[~2020-10-17 19:06 UTC|newest]

Thread overview: 95+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1593232671-5690-0-git-send-email-timothy.mcdaniel@intel.com>
2020-07-30 19:49 ` [dpdk-dev] [PATCH 00/27] Add Intel DLM PMD to 20.11 McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 01/27] eventdev: dlb upstream prerequisites McDaniel, Timothy
2020-08-11 17:44     ` Jerin Jacob
2020-10-17 19:03     ` [dpdk-dev] [PATCH v5 00/22] Add DLB PMD Timothy McDaniel
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 01/22] event/dlb: add documentation and meson infrastructure Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 02/22] event/dlb: add dynamic logging Timothy McDaniel
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 03/22] event/dlb: add private data structures and constants Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 04/22] event/dlb: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 05/22] event/dlb: add inline functions Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 06/22] event/dlb: add probe Timothy McDaniel
2020-10-18 12:49         ` Jerin Jacob
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 07/22] event/dlb: add xstats Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 08/22] event/dlb: add infos get and configure Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 09/22] event/dlb: add queue and port default conf Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 10/22] event/dlb: add queue setup Timothy McDaniel
2020-10-17 19:04       ` Timothy McDaniel [this message]
2020-10-20 20:06         ` [dpdk-dev] [PATCH v5 11/22] event/dlb: add port setup Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 12/22] event/dlb: add port link Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 13/22] event/dlb: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 14/22] event/dlb: add eventdev start Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 15/22] event/dlb: add enqueue and its burst variants Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 16/22] event/dlb: add dequeue " Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 17/22] event/dlb: add eventdev stop and close Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 18/22] event/dlb: add PMD's token pop public interface Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 19/22] event/dlb: add PMD self-tests Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 20/22] event/dlb: add queue and port release Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 21/22] event/dlb: add timeout ticks entry point Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 22/22] doc: Add new DLB eventdev driver to relnotes Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-23 18:32     ` [dpdk-dev] [PATCH v6 00/23] Add DLB PMD Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 01/23] event/dlb: add documentation and meson infrastructure Timothy McDaniel
2020-10-24 13:05         ` Jerin Jacob
2020-10-26 16:02           ` McDaniel, Timothy
2020-10-24 14:05         ` Thomas Monjalon
2020-10-26 16:12           ` McDaniel, Timothy
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 02/23] event/dlb: add dynamic logging Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 03/23] event/dlb: add private data structures and constants Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 04/23] event/dlb: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 05/23] event/dlb: add inline functions Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 06/23] event/dlb: add eventdev probe Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 07/23] event/dlb: add flexible interface Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 08/23] event/dlb: add probe-time hardware init Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 09/23] event/dlb: add xstats Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 10/23] event/dlb: add infos get and configure Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 11/23] event/dlb: add queue and port default conf Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 12/23] event/dlb: add queue setup Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 13/23] event/dlb: add port setup Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 14/23] event/dlb: add port link Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 15/23] event/dlb: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 16/23] event/dlb: add eventdev start Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 17/23] event/dlb: add enqueue and its burst variants Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 18/23] event/dlb: add dequeue " Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 19/23] event/dlb: add eventdev stop and close Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 20/23] event/dlb: add PMD's token pop public interface Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 21/23] event/dlb: add PMD self-tests Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 22/23] event/dlb: add queue and port release Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 23/23] event/dlb: add timeout ticks entry point Timothy McDaniel
2020-07-30 19:49   ` [dpdk-dev] [PATCH 02/27] eventdev: do not pass disable_implicit_release bit to trace macro McDaniel, Timothy
2020-08-11 17:48     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 03/27] event/dlb: add shared code version 10.7.9 McDaniel, Timothy
2020-08-11 18:22     ` [dpdk-dev] [EXT] " Jerin Jacob Kollanukkaran
2020-07-30 19:49   ` [dpdk-dev] [PATCH 04/27] event/dlb: add make and meson build infrastructure McDaniel, Timothy
2020-08-11 18:24     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 05/27] event/dlb: add DLB documentation McDaniel, Timothy
2020-08-11 18:26     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 06/27] event/dlb: add dynamic logging McDaniel, Timothy
2020-08-11 18:27     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 07/27] event/dlb: add private data structures and constants McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 08/27] event/dlb: add definitions shared with LKM or shared code McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 09/27] event/dlb: add inline functions used in multiple files McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 10/27] event/dlb: add PFPMD-specific interface layer to shared code McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 11/27] event/dlb: add flexible PMD to device interfaces McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 12/27] event/dlb: add the PMD's public interfaces McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 13/27] event/dlb: add xstats support McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 14/27] event/dlb: add PMD self-tests McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 15/27] event/dlb: add probe McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 16/27] event/dlb: add infos_get and configure McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 17/27] event/dlb: add queue_def_conf and port_def_conf McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 18/27] event/dlb: add queue setup McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 19/27] event/dlb: add port_setup McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 20/27] event/dlb: add port_link McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 21/27] event/dlb: add queue_release and port_release McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 22/27] event/dlb: add port_unlink and port_unlinks_in_progress McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 23/27] event/dlb: add eventdev_start McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 24/27] event/dlb: add timeout_ticks, dump, xstats, and selftest McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 25/27] event/dlb: add enqueue and its burst variants McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 26/27] event/dlb: add dequeue, dequeue_burst, and variants McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 27/27] event/dlb: add eventdev_stop and eventdev_close McDaniel, Timothy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1602961456-17392-12-git-send-email-timothy.mcdaniel@intel.com \
    --to=timothy.mcdaniel@intel.com \
    --cc=dev@dpdk.org \
    --cc=erik.g.carrillo@intel.com \
    --cc=gage.eads@intel.com \
    --cc=harry.van.haaren@intel.com \
    --cc=jerinj@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).