DPDK patches and discussions
 help / color / mirror / Atom feed
From: Timothy McDaniel <timothy.mcdaniel@intel.com>
Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,
	harry.van.haaren@intel.com, jerinj@marvell.com
Subject: [dpdk-dev] [PATCH v4 10/22] event/dlb: add queue setup
Date: Fri, 11 Sep 2020 14:18:28 -0500	[thread overview]
Message-ID: <1599851920-16802-11-git-send-email-timothy.mcdaniel@intel.com> (raw)
In-Reply-To: <1599851920-16802-1-git-send-email-timothy.mcdaniel@intel.com>

Load balanced (ldb) queues are setup here.
Directed queues are not set up until link time, at which
point we know the directed port ID. Directed queue setup
will only fail if this queue is already setup or there are
no directed queues left to configure.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb/dlb.c                  | 294 +++++++++++++
 drivers/event/dlb/dlb_iface.c            |  12 +
 drivers/event/dlb/dlb_iface.h            |  12 +
 drivers/event/dlb/pf/base/dlb_resource.c | 710 ++++++++++++++++++++++++-------
 drivers/event/dlb/pf/dlb_pf.c            |  81 ++++
 5 files changed, 947 insertions(+), 162 deletions(-)

diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
index fa9213c..0b474a5 100644
--- a/drivers/event/dlb/dlb.c
+++ b/drivers/event/dlb/dlb.c
@@ -662,6 +662,299 @@ dlb_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
 	queue_conf->priority = 0;
 }
 
+static int32_t
+dlb_hw_create_ldb_queue(struct dlb_eventdev *dlb,
+			struct dlb_queue *queue,
+			const struct rte_event_queue_conf *evq_conf)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_create_ldb_queue_args cfg;
+	struct dlb_cmd_response response;
+	int32_t ret;
+	uint32_t qm_qid;
+	int sched_type = -1;
+
+	if (evq_conf == NULL)
+		return -EINVAL;
+
+	if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
+		if (evq_conf->nb_atomic_order_sequences != 0)
+			sched_type = RTE_SCHED_TYPE_ORDERED;
+		else
+			sched_type = RTE_SCHED_TYPE_PARALLEL;
+	} else {
+		sched_type = evq_conf->schedule_type;
+	}
+
+	cfg.response = (uintptr_t)&response;
+	cfg.num_atomic_inflights = dlb->num_atm_inflights_per_queue;
+	cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
+	cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
+
+	if (sched_type != RTE_SCHED_TYPE_ORDERED) {
+		cfg.num_sequence_numbers = 0;
+		cfg.num_qid_inflights = DLB_DEF_UNORDERED_QID_INFLIGHTS;
+	}
+
+	ret = dlb_iface_ldb_queue_create(handle, &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: create LB event queue error, ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		return -EINVAL;
+	}
+
+	qm_qid = response.id;
+
+	/* Save off queue config for debug, resource lookups, and reconfig */
+	queue->num_qid_inflights = cfg.num_qid_inflights;
+	queue->num_atm_inflights = cfg.num_atomic_inflights;
+
+	queue->sched_type = sched_type;
+	queue->config_state = DLB_CONFIGURED;
+
+	DLB_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
+		    qm_qid,
+		    cfg.num_atomic_inflights,
+		    cfg.num_sequence_numbers,
+		    cfg.num_qid_inflights);
+
+	return qm_qid;
+}
+
+static int32_t
+dlb_get_sn_allocation(struct dlb_eventdev *dlb, int group)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_get_sn_allocation_args cfg;
+	struct dlb_cmd_response response;
+	int ret;
+
+	cfg.group = group;
+	cfg.response = (uintptr_t)&response;
+
+	ret = dlb_iface_get_sn_allocation(handle, &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: get_sn_allocation ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		return ret;
+	}
+
+	return response.id;
+}
+
+static int
+dlb_set_sn_allocation(struct dlb_eventdev *dlb, int group, int num)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_set_sn_allocation_args cfg;
+	struct dlb_cmd_response response;
+	int ret;
+
+	cfg.num = num;
+	cfg.group = group;
+	cfg.response = (uintptr_t)&response;
+
+	ret = dlb_iface_set_sn_allocation(handle, &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: set_sn_allocation ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int32_t
+dlb_get_sn_occupancy(struct dlb_eventdev *dlb, int group)
+{
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_get_sn_occupancy_args cfg;
+	struct dlb_cmd_response response;
+	int ret;
+
+	cfg.group = group;
+	cfg.response = (uintptr_t)&response;
+
+	ret = dlb_iface_get_sn_occupancy(handle, &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: get_sn_occupancy ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		return ret;
+	}
+
+	return response.id;
+}
+
+/* Query the current sequence number allocations and, if they conflict with the
+ * requested LDB queue configuration, attempt to re-allocate sequence numbers.
+ * This is best-effort; if it fails, the PMD will attempt to configure the
+ * load-balanced queue and return an error.
+ */
+static void
+dlb_program_sn_allocation(struct dlb_eventdev *dlb,
+			  const struct rte_event_queue_conf *queue_conf)
+{
+	int grp_occupancy[DLB_NUM_SN_GROUPS];
+	int grp_alloc[DLB_NUM_SN_GROUPS];
+	int i, sequence_numbers;
+
+	sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
+
+	for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
+		int total_slots;
+
+		grp_alloc[i] = dlb_get_sn_allocation(dlb, i);
+		if (grp_alloc[i] < 0)
+			return;
+
+		total_slots = DLB_MAX_LDB_SN_ALLOC / grp_alloc[i];
+
+		grp_occupancy[i] = dlb_get_sn_occupancy(dlb, i);
+		if (grp_occupancy[i] < 0)
+			return;
+
+		/* DLB has at least one available slot for the requested
+		 * sequence numbers, so no further configuration required.
+		 */
+		if (grp_alloc[i] == sequence_numbers &&
+		    grp_occupancy[i] < total_slots)
+			return;
+	}
+
+	/* None of the sequence number groups are configured for the requested
+	 * sequence numbers, so we have to reconfigure one of them. This is
+	 * only possible if a group is not in use.
+	 */
+	for (i = 0; i < DLB_NUM_SN_GROUPS; i++) {
+		if (grp_occupancy[i] == 0)
+			break;
+	}
+
+	if (i == DLB_NUM_SN_GROUPS) {
+		printf("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
+		       __func__, sequence_numbers);
+		return;
+	}
+
+	/* Attempt to configure slot i with the requested number of sequence
+	 * numbers. Ignore the return value -- if this fails, the error will be
+	 * caught during subsequent queue configuration.
+	 */
+	dlb_set_sn_allocation(dlb, i, sequence_numbers);
+}
+
+static int
+dlb_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
+			     struct dlb_eventdev_queue *ev_queue,
+			     const struct rte_event_queue_conf *queue_conf)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	int32_t qm_qid;
+
+	if (queue_conf->nb_atomic_order_sequences)
+		dlb_program_sn_allocation(dlb, queue_conf);
+
+	qm_qid = dlb_hw_create_ldb_queue(dlb,
+					 &ev_queue->qm_queue,
+					 queue_conf);
+	if (qm_qid < 0) {
+		DLB_LOG_ERR("Failed to create the load-balanced queue\n");
+
+		return qm_qid;
+	}
+
+	dlb->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
+
+	ev_queue->qm_queue.id = qm_qid;
+
+	return 0;
+}
+
+static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb)
+{
+	int i, num = 0;
+
+	for (i = 0; i < dlb->num_queues; i++) {
+		if (dlb->ev_queues[i].setup_done &&
+		    dlb->ev_queues[i].qm_queue.is_directed)
+			num++;
+	}
+
+	return num;
+}
+
+static void
+dlb_queue_link_teardown(struct dlb_eventdev *dlb,
+			struct dlb_eventdev_queue *ev_queue)
+{
+	struct dlb_eventdev_port *ev_port;
+	int i, j;
+
+	for (i = 0; i < dlb->num_ports; i++) {
+		ev_port = &dlb->ev_ports[i];
+
+		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
+			if (!ev_port->link[j].valid ||
+			    ev_port->link[j].queue_id != ev_queue->id)
+				continue;
+
+			ev_port->link[j].valid = false;
+			ev_port->num_links--;
+		}
+	}
+
+	ev_queue->num_links = 0;
+}
+
+static int
+dlb_eventdev_queue_setup(struct rte_eventdev *dev,
+			 uint8_t ev_qid,
+			 const struct rte_event_queue_conf *queue_conf)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	struct dlb_eventdev_queue *ev_queue;
+	int ret;
+
+	if (!queue_conf)
+		return -EINVAL;
+
+	if (ev_qid >= dlb->num_queues)
+		return -EINVAL;
+
+	ev_queue = &dlb->ev_queues[ev_qid];
+
+	ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
+		RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+	ev_queue->id = ev_qid;
+	ev_queue->conf = *queue_conf;
+
+	if (!ev_queue->qm_queue.is_directed) {
+		ret = dlb_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
+	} else {
+		/* The directed queue isn't setup until link time, at which
+		 * point we know its directed port ID. Directed queue setup
+		 * will only fail if this queue is already setup or there are
+		 * no directed queues left to configure.
+		 */
+		ret = 0;
+
+		ev_queue->qm_queue.config_state = DLB_NOT_CONFIGURED;
+
+		if (ev_queue->setup_done ||
+		    dlb_num_dir_queues_setup(dlb) == dlb->num_dir_queues)
+			ret = -EINVAL;
+	}
+
+	/* Tear down pre-existing port->queue links */
+	if (!ret && dlb->run_state == DLB_RUN_STATE_STOPPED)
+		dlb_queue_link_teardown(dlb, ev_queue);
+
+	if (!ret)
+		ev_queue->setup_done = true;
+
+	return ret;
+}
+
 static int
 set_dev_id(const char *key __rte_unused,
 	   const char *value,
@@ -740,6 +1033,7 @@ dlb_entry_points_init(struct rte_eventdev *dev)
 		.dev_configure    = dlb_eventdev_configure,
 		.queue_def_conf   = dlb_eventdev_queue_default_conf_get,
 		.port_def_conf    = dlb_eventdev_port_default_conf_get,
+		.queue_setup      = dlb_eventdev_queue_setup,
 		.dump             = dlb_eventdev_dump,
 		.xstats_get       = dlb_eventdev_xstats_get,
 		.xstats_get_names = dlb_eventdev_xstats_get_names,
diff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c
index 9f74d43..b5757c9 100644
--- a/drivers/event/dlb/dlb_iface.c
+++ b/drivers/event/dlb/dlb_iface.c
@@ -47,6 +47,18 @@ int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,
 int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
 					struct dlb_create_dir_pool_args *cfg);
 
+int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
+				  struct dlb_create_ldb_queue_args *cfg);
+
 int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
 				  enum dlb_cq_poll_modes *mode);
 
+int (*dlb_iface_get_sn_allocation)(struct dlb_hw_dev *handle,
+				   struct dlb_get_sn_allocation_args *args);
+
+int (*dlb_iface_set_sn_allocation)(struct dlb_hw_dev *handle,
+				   struct dlb_set_sn_allocation_args *args);
+
+int (*dlb_iface_get_sn_occupancy)(struct dlb_hw_dev *handle,
+				  struct dlb_get_sn_occupancy_args *args);
+
diff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h
index d576232..af1416d 100644
--- a/drivers/event/dlb/dlb_iface.h
+++ b/drivers/event/dlb/dlb_iface.h
@@ -32,7 +32,19 @@ extern int (*dlb_iface_ldb_credit_pool_create)(struct dlb_hw_dev *handle,
 extern int (*dlb_iface_dir_credit_pool_create)(struct dlb_hw_dev *handle,
 					struct dlb_create_dir_pool_args *cfg);
 
+extern int (*dlb_iface_ldb_queue_create)(struct dlb_hw_dev *handle,
+				  struct dlb_create_ldb_queue_args *cfg);
+
 extern int (*dlb_iface_get_cq_poll_mode)(struct dlb_hw_dev *handle,
 					 enum dlb_cq_poll_modes *mode);
 
+extern int (*dlb_iface_get_sn_allocation)(struct dlb_hw_dev *handle,
+				  struct dlb_get_sn_allocation_args *args);
+
+extern int (*dlb_iface_set_sn_allocation)(struct dlb_hw_dev *handle,
+				  struct dlb_set_sn_allocation_args *args);
+
+extern int (*dlb_iface_get_sn_occupancy)(struct dlb_hw_dev *handle,
+				  struct dlb_get_sn_occupancy_args *args);
+
 #endif /* _DLB_IFACE_H */
diff --git a/drivers/event/dlb/pf/base/dlb_resource.c b/drivers/event/dlb/pf/base/dlb_resource.c
index 0d0bbf9..2b80e03 100644
--- a/drivers/event/dlb/pf/base/dlb_resource.c
+++ b/drivers/event/dlb/pf/base/dlb_resource.c
@@ -1823,168 +1823,6 @@ int dlb_hw_create_sched_domain(struct dlb_hw *hw,
 }
 
 static void
-dlb_configure_ldb_credit_pool(struct dlb_hw *hw,
-			      struct dlb_domain *domain,
-			      struct dlb_create_ldb_pool_args *args,
-			      struct dlb_credit_pool *pool)
-{
-	union dlb_sys_ldb_pool_enbld r0 = { {0} };
-	union dlb_chp_ldb_pool_crd_lim r1 = { {0} };
-	union dlb_chp_ldb_pool_crd_cnt r2 = { {0} };
-	union dlb_chp_qed_fl_base  r3 = { {0} };
-	union dlb_chp_qed_fl_lim r4 = { {0} };
-	union dlb_chp_qed_fl_push_ptr r5 = { {0} };
-	union dlb_chp_qed_fl_pop_ptr  r6 = { {0} };
-
-	r1.field.limit = args->num_ldb_credits;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_LIM(pool->id), r1.val);
-
-	r2.field.count = args->num_ldb_credits;
-
-	DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_CNT(pool->id), r2.val);
-
-	r3.field.base = domain->qed_freelist.base + domain->qed_freelist.offset;
-
-	DLB_CSR_WR(hw, DLB_CHP_QED_FL_BASE(pool->id), r3.val);
-
-	r4.field.freelist_disable = 0;
-	r4.field.limit = r3.field.base + args->num_ldb_credits - 1;
-
-	DLB_CSR_WR(hw, DLB_CHP_QED_FL_LIM(pool->id), r4.val);
-
-	r5.field.push_ptr = r3.field.base;
-	r5.field.generation = 1;
-
-	DLB_CSR_WR(hw, DLB_CHP_QED_FL_PUSH_PTR(pool->id), r5.val);
-
-	r6.field.pop_ptr = r3.field.base;
-	r6.field.generation = 0;
-
-	DLB_CSR_WR(hw, DLB_CHP_QED_FL_POP_PTR(pool->id), r6.val);
-
-	r0.field.pool_enabled = 1;
-
-	DLB_CSR_WR(hw, DLB_SYS_LDB_POOL_ENBLD(pool->id), r0.val);
-
-	pool->avail_credits = args->num_ldb_credits;
-	pool->total_credits = args->num_ldb_credits;
-	domain->qed_freelist.offset += args->num_ldb_credits;
-
-	pool->configured = true;
-}
-
-static int
-dlb_verify_create_ldb_pool_args(struct dlb_hw *hw,
-				u32 domain_id,
-				struct dlb_create_ldb_pool_args *args,
-				struct dlb_cmd_response *resp)
-{
-	struct dlb_freelist *qed_freelist;
-	struct dlb_domain *domain;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-
-	if (!domain) {
-		resp->status = DLB_ST_INVALID_DOMAIN_ID;
-		return -1;
-	}
-
-	if (!domain->configured) {
-		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
-		return -1;
-	}
-
-	qed_freelist = &domain->qed_freelist;
-
-	if (dlb_freelist_count(qed_freelist) < args->num_ldb_credits) {
-		resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
-		return -1;
-	}
-
-	if (dlb_list_empty(&domain->avail_ldb_credit_pools)) {
-		resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
-		return -1;
-	}
-
-	if (domain->started) {
-		resp->status = DLB_ST_DOMAIN_STARTED;
-		return -1;
-	}
-
-	return 0;
-}
-
-static void
-dlb_log_create_ldb_pool_args(struct dlb_hw *hw,
-			     u32 domain_id,
-			     struct dlb_create_ldb_pool_args *args)
-{
-	DLB_HW_INFO(hw, "DLB create load-balanced credit pool arguments:\n");
-	DLB_HW_INFO(hw, "\tDomain ID:             %d\n", domain_id);
-	DLB_HW_INFO(hw, "\tNumber of LDB credits: %d\n",
-		    args->num_ldb_credits);
-}
-
-/**
- * dlb_hw_create_ldb_pool() - Allocate and initialize a DLB credit pool.
- * @hw:	  Contains the current state of the DLB hardware.
- * @args: User-provided arguments.
- * @resp: Response to user.
- *
- * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
- * satisfy a request, resp->status will be set accordingly.
- */
-int dlb_hw_create_ldb_pool(struct dlb_hw *hw,
-			   u32 domain_id,
-			   struct dlb_create_ldb_pool_args *args,
-			   struct dlb_cmd_response *resp)
-{
-	struct dlb_credit_pool *pool;
-	struct dlb_domain *domain;
-
-	dlb_log_create_ldb_pool_args(hw, domain_id, args);
-
-	/* Verify that hardware resources are available before attempting to
-	 * satisfy the request. This simplifies the error unwinding code.
-	 */
-	if (dlb_verify_create_ldb_pool_args(hw, domain_id, args, resp))
-		return -EINVAL;
-
-	domain = dlb_get_domain_from_id(hw, domain_id);
-	if (!domain) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: domain not found\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	pool = DLB_DOM_LIST_HEAD(domain->avail_ldb_credit_pools, typeof(*pool));
-
-	/* Verification should catch this. */
-	if (!pool) {
-		DLB_HW_ERR(hw,
-			   "[%s():%d] Internal error: no available ldb credit pools\n",
-			   __func__, __LINE__);
-		return -EFAULT;
-	}
-
-	dlb_configure_ldb_credit_pool(hw, domain, args, pool);
-
-	/* Configuration succeeded, so move the resource from the 'avail' to
-	 * the 'used' list.
-	 */
-	dlb_list_del(&domain->avail_ldb_credit_pools, &pool->domain_list);
-
-	dlb_list_add(&domain->used_ldb_credit_pools, &pool->domain_list);
-
-	resp->status = 0;
-	resp->id = pool->id;
-
-	return 0;
-}
-
-static void
 dlb_configure_dir_credit_pool(struct dlb_hw *hw,
 			      struct dlb_domain *domain,
 			      struct dlb_create_dir_pool_args *args,
@@ -4212,3 +4050,551 @@ void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw)
 
 	DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
 }
+
+static void
+dlb_configure_ldb_credit_pool(struct dlb_hw *hw,
+			      struct dlb_domain *domain,
+			      struct dlb_create_ldb_pool_args *args,
+			      struct dlb_credit_pool *pool)
+{
+	union dlb_sys_ldb_pool_enbld r0 = { {0} };
+	union dlb_chp_ldb_pool_crd_lim r1 = { {0} };
+	union dlb_chp_ldb_pool_crd_cnt r2 = { {0} };
+	union dlb_chp_qed_fl_base  r3 = { {0} };
+	union dlb_chp_qed_fl_lim r4 = { {0} };
+	union dlb_chp_qed_fl_push_ptr r5 = { {0} };
+	union dlb_chp_qed_fl_pop_ptr  r6 = { {0} };
+
+	r1.field.limit = args->num_ldb_credits;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_LIM(pool->id), r1.val);
+
+	r2.field.count = args->num_ldb_credits;
+
+	DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_CNT(pool->id), r2.val);
+
+	r3.field.base = domain->qed_freelist.base + domain->qed_freelist.offset;
+
+	DLB_CSR_WR(hw, DLB_CHP_QED_FL_BASE(pool->id), r3.val);
+
+	r4.field.freelist_disable = 0;
+	r4.field.limit = r3.field.base + args->num_ldb_credits - 1;
+
+	DLB_CSR_WR(hw, DLB_CHP_QED_FL_LIM(pool->id), r4.val);
+
+	r5.field.push_ptr = r3.field.base;
+	r5.field.generation = 1;
+
+	DLB_CSR_WR(hw, DLB_CHP_QED_FL_PUSH_PTR(pool->id), r5.val);
+
+	r6.field.pop_ptr = r3.field.base;
+	r6.field.generation = 0;
+
+	DLB_CSR_WR(hw, DLB_CHP_QED_FL_POP_PTR(pool->id), r6.val);
+
+	r0.field.pool_enabled = 1;
+
+	DLB_CSR_WR(hw, DLB_SYS_LDB_POOL_ENBLD(pool->id), r0.val);
+
+	pool->avail_credits = args->num_ldb_credits;
+	pool->total_credits = args->num_ldb_credits;
+	domain->qed_freelist.offset += args->num_ldb_credits;
+
+	pool->configured = true;
+}
+
+static int
+dlb_verify_create_ldb_pool_args(struct dlb_hw *hw,
+				u32 domain_id,
+				struct dlb_create_ldb_pool_args *args,
+				struct dlb_cmd_response *resp)
+{
+	struct dlb_freelist *qed_freelist;
+	struct dlb_domain *domain;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+
+	if (!domain) {
+		resp->status = DLB_ST_INVALID_DOMAIN_ID;
+		return -1;
+	}
+
+	if (!domain->configured) {
+		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
+		return -1;
+	}
+
+	qed_freelist = &domain->qed_freelist;
+
+	if (dlb_freelist_count(qed_freelist) < args->num_ldb_credits) {
+		resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
+		return -1;
+	}
+
+	if (dlb_list_empty(&domain->avail_ldb_credit_pools)) {
+		resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
+		return -1;
+	}
+
+	if (domain->started) {
+		resp->status = DLB_ST_DOMAIN_STARTED;
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+dlb_log_create_ldb_pool_args(struct dlb_hw *hw,
+			     u32 domain_id,
+			     struct dlb_create_ldb_pool_args *args)
+{
+	DLB_HW_INFO(hw, "DLB create load-balanced credit pool arguments:\n");
+	DLB_HW_INFO(hw, "\tDomain ID:             %d\n", domain_id);
+	DLB_HW_INFO(hw, "\tNumber of LDB credits: %d\n",
+		    args->num_ldb_credits);
+}
+
+static void dlb_configure_ldb_queue(struct dlb_hw *hw,
+				    struct dlb_domain *domain,
+				    struct dlb_ldb_queue *queue,
+				    struct dlb_create_ldb_queue_args *args)
+{
+	union dlb_sys_ldb_vasqid_v r0 = { {0} };
+	union dlb_lsp_qid_ldb_infl_lim r1 = { {0} };
+	union dlb_lsp_qid_aqed_active_lim r2 = { {0} };
+	union dlb_aqed_pipe_fl_lim r3 = { {0} };
+	union dlb_aqed_pipe_fl_base r4 = { {0} };
+	union dlb_chp_ord_qid_sn_map r7 = { {0} };
+	union dlb_sys_ldb_qid_cfg_v r10 = { {0} };
+	union dlb_sys_ldb_qid_v r11 = { {0} };
+	union dlb_aqed_pipe_fl_push_ptr r5 = { {0} };
+	union dlb_aqed_pipe_fl_pop_ptr r6 = { {0} };
+	union dlb_aqed_pipe_qid_fid_lim r8 = { {0} };
+	union dlb_ro_pipe_qid2grpslt r9 = { {0} };
+	struct dlb_sn_group *sn_group;
+	unsigned int offs;
+
+	/* QID write permissions are turned on when the domain is started */
+	r0.field.vasqid_v = 0;
+
+	offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + queue->id;
+
+	DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val);
+
+	/*
+	 * Unordered QIDs get 4K inflights, ordered get as many as the number
+	 * of sequence numbers.
+	 */
+	r1.field.limit = args->num_qid_inflights;
+
+	DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), r1.val);
+
+	r2.field.limit = queue->aqed_freelist.bound -
+			 queue->aqed_freelist.base;
+
+	if (r2.field.limit > DLB_MAX_NUM_AQOS_ENTRIES)
+		r2.field.limit = DLB_MAX_NUM_AQOS_ENTRIES;
+
+	/* AQOS */
+	DLB_CSR_WR(hw, DLB_LSP_QID_AQED_ACTIVE_LIM(queue->id), r2.val);
+
+	r3.field.freelist_disable = 0;
+	r3.field.limit = queue->aqed_freelist.bound - 1;
+
+	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_LIM(queue->id), r3.val);
+
+	r4.field.base = queue->aqed_freelist.base;
+
+	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_BASE(queue->id), r4.val);
+
+	r5.field.push_ptr = r4.field.base;
+	r5.field.generation = 1;
+
+	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_PUSH_PTR(queue->id), r5.val);
+
+	r6.field.pop_ptr = r4.field.base;
+	r6.field.generation = 0;
+
+	DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_POP_PTR(queue->id), r6.val);
+
+	/* Configure SNs */
+	sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
+	r7.field.mode = sn_group->mode;
+	r7.field.slot = queue->sn_slot;
+	r7.field.grp  = sn_group->id;
+
+	DLB_CSR_WR(hw, DLB_CHP_ORD_QID_SN_MAP(queue->id), r7.val);
+
+	/*
+	 * This register limits the number of inflight flows a queue can have
+	 * at one time.  It has an upper bound of 2048, but can be
+	 * over-subscribed. 512 is chosen so that a single queue doesn't use
+	 * the entire atomic storage, but can use a substantial portion if
+	 * needed.
+	 */
+	r8.field.qid_fid_limit = 512;
+
+	DLB_CSR_WR(hw, DLB_AQED_PIPE_QID_FID_LIM(queue->id), r8.val);
+
+	r9.field.group = sn_group->id;
+	r9.field.slot = queue->sn_slot;
+
+	DLB_CSR_WR(hw, DLB_RO_PIPE_QID2GRPSLT(queue->id), r9.val);
+
+	r10.field.sn_cfg_v = (args->num_sequence_numbers != 0);
+	r10.field.fid_cfg_v = (args->num_atomic_inflights != 0);
+
+	DLB_CSR_WR(hw, DLB_SYS_LDB_QID_CFG_V(queue->id), r10.val);
+
+	r11.field.qid_v = 1;
+
+	DLB_CSR_WR(hw, DLB_SYS_LDB_QID_V(queue->id), r11.val);
+}
+
+/**
+ * dlb_hw_create_ldb_pool() - Allocate and initialize a DLB credit pool.
+ * @hw:	  Contains the current state of the DLB hardware.
+ * @args: User-provided arguments.
+ * @resp: Response to user.
+ *
+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
+ * satisfy a request, resp->status will be set accordingly.
+ */
+int dlb_hw_create_ldb_pool(struct dlb_hw *hw,
+			   u32 domain_id,
+			   struct dlb_create_ldb_pool_args *args,
+			   struct dlb_cmd_response *resp)
+{
+	struct dlb_credit_pool *pool;
+	struct dlb_domain *domain;
+
+	dlb_log_create_ldb_pool_args(hw, domain_id, args);
+
+	/* Verify that hardware resources are available before attempting to
+	 * satisfy the request. This simplifies the error unwinding code.
+	 */
+	if (dlb_verify_create_ldb_pool_args(hw, domain_id, args, resp))
+		return -EINVAL;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+	if (!domain) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: domain not found\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	pool = DLB_DOM_LIST_HEAD(domain->avail_ldb_credit_pools, typeof(*pool));
+
+	/* Verification should catch this. */
+	if (!pool) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: no available ldb credit pools\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	dlb_configure_ldb_credit_pool(hw, domain, args, pool);
+
+	/* Configuration succeeded, so move the resource from the 'avail' to
+	 * the 'used' list.
+	 */
+	dlb_list_del(&domain->avail_ldb_credit_pools, &pool->domain_list);
+
+	dlb_list_add(&domain->used_ldb_credit_pools, &pool->domain_list);
+
+	resp->status = 0;
+	resp->id = pool->id;
+
+	return 0;
+}
+
+int dlb_get_group_sequence_numbers(struct dlb_hw *hw, unsigned int group_id)
+{
+	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
+		return -EINVAL;
+
+	return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
+}
+
+int dlb_get_group_sequence_number_occupancy(struct dlb_hw *hw,
+					    unsigned int group_id)
+{
+	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
+		return -EINVAL;
+
+	return dlb_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
+}
+
+static void dlb_log_set_group_sequence_numbers(struct dlb_hw *hw,
+					       unsigned int group_id,
+					       unsigned long val)
+{
+	DLB_HW_INFO(hw, "DLB set group sequence numbers:\n");
+	DLB_HW_INFO(hw, "\tGroup ID: %u\n", group_id);
+	DLB_HW_INFO(hw, "\tValue:    %lu\n", val);
+}
+
+int dlb_set_group_sequence_numbers(struct dlb_hw *hw,
+				   unsigned int group_id,
+				   unsigned long val)
+{
+	u32 valid_allocations[6] = {32, 64, 128, 256, 512, 1024};
+	union dlb_ro_pipe_grp_sn_mode r0 = { {0} };
+	struct dlb_sn_group *group;
+	int mode;
+
+	if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
+		return -EINVAL;
+
+	group = &hw->rsrcs.sn_groups[group_id];
+
+	/* Once the first load-balanced queue using an SN group is configured,
+	 * the group cannot be changed.
+	 */
+	if (group->slot_use_bitmap != 0)
+		return -EPERM;
+
+	for (mode = 0; mode < DLB_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
+		if (val == valid_allocations[mode])
+			break;
+
+	if (mode == DLB_MAX_NUM_SEQUENCE_NUMBER_MODES)
+		return -EINVAL;
+
+	group->mode = mode;
+	group->sequence_numbers_per_queue = val;
+
+	r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;
+	r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;
+	r0.field.sn_mode_2 = hw->rsrcs.sn_groups[2].mode;
+	r0.field.sn_mode_3 = hw->rsrcs.sn_groups[3].mode;
+
+	DLB_CSR_WR(hw, DLB_RO_PIPE_GRP_SN_MODE, r0.val);
+
+	dlb_log_set_group_sequence_numbers(hw, group_id, val);
+
+	return 0;
+}
+
+static int
+dlb_ldb_queue_attach_to_sn_group(struct dlb_hw *hw,
+				 struct dlb_ldb_queue *queue,
+				 struct dlb_create_ldb_queue_args *args)
+{
+	int slot = -1;
+	int i;
+
+	queue->sn_cfg_valid = false;
+
+	if (args->num_sequence_numbers == 0)
+		return 0;
+
+	for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
+		struct dlb_sn_group *group = &hw->rsrcs.sn_groups[i];
+
+		if (group->sequence_numbers_per_queue ==
+		    args->num_sequence_numbers &&
+		    !dlb_sn_group_full(group)) {
+			slot = dlb_sn_group_alloc_slot(group);
+			if (slot >= 0)
+				break;
+		}
+	}
+
+	if (slot == -1) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: no sequence number slots available\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	queue->sn_cfg_valid = true;
+	queue->sn_group = i;
+	queue->sn_slot = slot;
+	return 0;
+}
+
+static int
+dlb_ldb_queue_attach_resources(struct dlb_hw *hw,
+			       struct dlb_domain *domain,
+			       struct dlb_ldb_queue *queue,
+			       struct dlb_create_ldb_queue_args *args)
+{
+	int ret;
+
+	ret = dlb_ldb_queue_attach_to_sn_group(hw, queue, args);
+	if (ret)
+		return ret;
+
+	/* Attach QID inflights */
+	queue->num_qid_inflights = args->num_qid_inflights;
+
+	/* Attach atomic inflights */
+	queue->aqed_freelist.base = domain->aqed_freelist.base +
+				    domain->aqed_freelist.offset;
+	queue->aqed_freelist.bound = queue->aqed_freelist.base +
+				     args->num_atomic_inflights;
+	domain->aqed_freelist.offset += args->num_atomic_inflights;
+
+	return 0;
+}
+
+static int
+dlb_verify_create_ldb_queue_args(struct dlb_hw *hw,
+				 u32 domain_id,
+				 struct dlb_create_ldb_queue_args *args,
+				 struct dlb_cmd_response *resp)
+{
+	struct dlb_freelist *aqed_freelist;
+	struct dlb_domain *domain;
+	int i;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+
+	if (!domain) {
+		resp->status = DLB_ST_INVALID_DOMAIN_ID;
+		return -1;
+	}
+
+	if (!domain->configured) {
+		resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
+		return -1;
+	}
+
+	if (domain->started) {
+		resp->status = DLB_ST_DOMAIN_STARTED;
+		return -1;
+	}
+
+	if (dlb_list_empty(&domain->avail_ldb_queues)) {
+		resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
+		return -1;
+	}
+
+	if (args->num_sequence_numbers) {
+		for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
+			struct dlb_sn_group *group = &hw->rsrcs.sn_groups[i];
+
+			if (group->sequence_numbers_per_queue ==
+			    args->num_sequence_numbers &&
+			    !dlb_sn_group_full(group))
+				break;
+		}
+
+		if (i == DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
+			resp->status = DLB_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
+			return -1;
+		}
+	}
+
+	if (args->num_qid_inflights > 4096) {
+		resp->status = DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION;
+		return -1;
+	}
+
+	/* Inflights must be <= number of sequence numbers if ordered */
+	if (args->num_sequence_numbers != 0 &&
+	    args->num_qid_inflights > args->num_sequence_numbers) {
+		resp->status = DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION;
+		return -1;
+	}
+
+	aqed_freelist = &domain->aqed_freelist;
+
+	if (dlb_freelist_count(aqed_freelist) < args->num_atomic_inflights) {
+		resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
+		return -1;
+	}
+
+	return 0;
+}
+
+static void
+dlb_log_create_ldb_queue_args(struct dlb_hw *hw,
+			      u32 domain_id,
+			      struct dlb_create_ldb_queue_args *args)
+{
+	DLB_HW_INFO(hw, "DLB create load-balanced queue arguments:\n");
+	DLB_HW_INFO(hw, "\tDomain ID:                  %d\n",
+		    domain_id);
+	DLB_HW_INFO(hw, "\tNumber of sequence numbers: %d\n",
+		    args->num_sequence_numbers);
+	DLB_HW_INFO(hw, "\tNumber of QID inflights:    %d\n",
+		    args->num_qid_inflights);
+	DLB_HW_INFO(hw, "\tNumber of ATM inflights:    %d\n",
+		    args->num_atomic_inflights);
+}
+
+/**
+ * dlb_hw_create_ldb_queue() - Allocate and initialize a DLB LDB queue.
+ * @hw:	  Contains the current state of the DLB hardware.
+ * @args: User-provided arguments.
+ * @resp: Response to user.
+ *
+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
+ * satisfy a request, resp->status will be set accordingly.
+ */
+int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
+			    u32 domain_id,
+			    struct dlb_create_ldb_queue_args *args,
+			    struct dlb_cmd_response *resp)
+{
+	struct dlb_ldb_queue *queue;
+	struct dlb_domain *domain;
+	int ret;
+
+	dlb_log_create_ldb_queue_args(hw, domain_id, args);
+
+	/* Verify that hardware resources are available before attempting to
+	 * satisfy the request. This simplifies the error unwinding code.
+	 */
+	/* At least one available queue */
+	if (dlb_verify_create_ldb_queue_args(hw, domain_id, args, resp))
+		return -EINVAL;
+
+	domain = dlb_get_domain_from_id(hw, domain_id);
+	if (!domain) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: domain not found\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	queue = DLB_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
+
+	/* Verification should catch this. */
+	if (!queue) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: no available ldb queues\n",
+			   __func__, __LINE__);
+		return -EFAULT;
+	}
+
+	ret = dlb_ldb_queue_attach_resources(hw, domain, queue, args);
+	if (ret < 0) {
+		DLB_HW_ERR(hw,
+			   "[%s():%d] Internal error: failed to attach the ldb queue resources\n",
+			   __func__, __LINE__);
+		return ret;
+	}
+
+	dlb_configure_ldb_queue(hw, domain, queue, args);
+
+	queue->num_mappings = 0;
+
+	queue->configured = true;
+
+	/* Configuration succeeded, so move the resource from the 'avail' to
+	 * the 'used' list.
+	 */
+	dlb_list_del(&domain->avail_ldb_queues, &queue->domain_list);
+
+	dlb_list_add(&domain->used_ldb_queues, &queue->domain_list);
+
+	resp->status = 0;
+	resp->id = queue->id;
+
+	return 0;
+}
diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c
index 57a150c..fffb88b 100644
--- a/drivers/event/dlb/pf/dlb_pf.c
+++ b/drivers/event/dlb/pf/dlb_pf.c
@@ -198,6 +198,83 @@ dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
 	return 0;
 }
 
+static int
+dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle,
+			struct dlb_create_ldb_queue_args *cfg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_create_ldb_queue(&dlb_dev->hw,
+				      handle->domain_id,
+				      cfg,
+				      &response);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle,
+			 struct dlb_get_sn_allocation_args *args)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	ret = dlb_get_group_sequence_numbers(&dlb_dev->hw, args->group);
+
+	response.id = ret;
+	response.status = 0;
+
+	*(struct dlb_cmd_response *)args->response = response;
+
+	return ret;
+}
+
+static int
+dlb_pf_set_sn_allocation(struct dlb_hw_dev *handle,
+			 struct dlb_set_sn_allocation_args *args)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	ret = dlb_set_group_sequence_numbers(&dlb_dev->hw, args->group,
+					     args->num);
+
+	response.status = 0;
+
+	*(struct dlb_cmd_response *)args->response = response;
+
+	return ret;
+}
+
+static int
+dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,
+			struct dlb_get_sn_occupancy_args *args)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	ret = dlb_get_group_sequence_number_occupancy(&dlb_dev->hw,
+						      args->group);
+
+	response.id = ret;
+	response.status = 0;
+
+	*(struct dlb_cmd_response *)args->response = response;
+
+	return ret;
+}
+
 static void
 dlb_pf_iface_fn_ptrs_init(void)
 {
@@ -209,7 +286,11 @@ dlb_pf_iface_fn_ptrs_init(void)
 	dlb_iface_sched_domain_create = dlb_pf_sched_domain_create;
 	dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;
 	dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;
+	dlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create;
 	dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
+	dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
+	dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;
+	dlb_iface_get_sn_occupancy = dlb_pf_get_sn_occupancy;
 }
 
 /* PCI DEV HOOKS */
-- 
2.6.4


  parent reply	other threads:[~2020-09-11 19:23 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-11 19:18 [dpdk-dev] [PATCH v4 00/22] Add DLB PMD Timothy McDaniel
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 01/22] event/dlb: add documentation and meson infrastructure Timothy McDaniel
2020-09-14 20:56   ` Eads, Gage
2020-09-16 21:05     ` McDaniel, Timothy
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 02/22] event/dlb: add dynamic logging Timothy McDaniel
2020-09-14 21:00   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 03/22] event/dlb: add private data structures and constants Timothy McDaniel
2020-09-14 22:08   ` Eads, Gage
2020-10-08 18:14   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 04/22] event/dlb: add definitions shared with LKM or shared code Timothy McDaniel
2020-09-15 18:20   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 05/22] event/dlb: add inline functions Timothy McDaniel
2020-10-08 18:22   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 06/22] event/dlb: add probe Timothy McDaniel
2020-10-08 18:51   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 07/22] event/dlb: add xstats Timothy McDaniel
2020-10-08 20:53   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 08/22] event/dlb: add infos get and configure Timothy McDaniel
2020-10-08 21:01   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 09/22] event/dlb: add queue and port default conf Timothy McDaniel
2020-10-08 21:02   ` Eads, Gage
2020-09-11 19:18 ` Timothy McDaniel [this message]
2020-10-08 21:15   ` [dpdk-dev] [PATCH v4 10/22] event/dlb: add queue setup Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 11/22] event/dlb: add port setup Timothy McDaniel
2020-10-08 21:28   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 12/22] event/dlb: add port link Timothy McDaniel
2020-10-08 21:31   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 13/22] event/dlb: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-08 21:38   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 14/22] event/dlb: add eventdev start Timothy McDaniel
2020-10-08 21:41   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 15/22] event/dlb: add enqueue and its burst variants Timothy McDaniel
2020-10-08 21:43   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 16/22] event/dlb: add dequeue " Timothy McDaniel
2020-10-08 21:48   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 17/22] event/dlb: add eventdev stop and close Timothy McDaniel
2020-10-08 21:49   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 18/22] event/dlb: add PMD's token pop public interface Timothy McDaniel
2020-10-08 21:50   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 19/22] event/dlb: add PMD self-tests Timothy McDaniel
2020-10-08 21:56   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 20/22] event/dlb: add queue and port release Timothy McDaniel
2020-10-08 21:57   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 21/22] event/dlb: add timeout ticks entry point Timothy McDaniel
2020-10-08 22:01   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 22/22] doc: Add new DLB eventdev driver to relnotes Timothy McDaniel
2020-10-08 22:03   ` Eads, Gage

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1599851920-16802-11-git-send-email-timothy.mcdaniel@intel.com \
    --to=timothy.mcdaniel@intel.com \
    --cc=dev@dpdk.org \
    --cc=erik.g.carrillo@intel.com \
    --cc=gage.eads@intel.com \
    --cc=harry.van.haaren@intel.com \
    --cc=jerinj@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).