From: Timothy McDaniel <timothy.mcdaniel@intel.com>
Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,
harry.van.haaren@intel.com, jerinj@marvell.com
Subject: [dpdk-dev] [PATCH 08/22] event/dlb2: add infos get and configure
Date: Fri, 11 Sep 2020 15:26:13 -0500 [thread overview]
Message-ID: <1599855987-25976-9-git-send-email-timothy.mcdaniel@intel.com> (raw)
In-Reply-To: <1599855987-25976-1-git-send-email-timothy.mcdaniel@intel.com>
Add support for configuring the DLB hardware.
Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
drivers/event/dlb2/dlb2.c | 334 +++
drivers/event/dlb2/dlb2_iface.c | 7 +-
drivers/event/dlb2/dlb2_iface.h | 5 +
drivers/event/dlb2/pf/base/dlb2_resource.c | 3234 ++++++++++++++++++++++++++++
drivers/event/dlb2/pf/dlb2_main.c | 14 +
drivers/event/dlb2/pf/dlb2_pf.c | 44 +
6 files changed, 3637 insertions(+), 1 deletion(-)
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index 0d6fea4..58e953b 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -92,6 +92,28 @@ dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
return 0;
}
+static void
+dlb2_free_qe_mem(struct dlb2_port *qm_port)
+{
+ if (qm_port == NULL)
+ return;
+
+ if (qm_port->qe4) {
+ rte_free(qm_port->qe4);
+ qm_port->qe4 = NULL;
+ }
+
+ if (qm_port->int_arm_qe) {
+ rte_free(qm_port->int_arm_qe);
+ qm_port->int_arm_qe = NULL;
+ }
+
+ if (qm_port->consume_qe) {
+ rte_free(qm_port->consume_qe);
+ qm_port->consume_qe = NULL;
+ }
+}
+
/* override defaults with value(s) provided on command line */
static void
dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
@@ -366,10 +388,322 @@ set_qid_depth_thresh(const char *key __rte_unused,
}
static void
+dlb2_eventdev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
+ int ret;
+
+ ret = dlb2_hw_query_resources(dlb2);
+ if (ret) {
+ const struct rte_eventdev_data *data = dev->data;
+
+ DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
+ ret, data->dev_id);
+ /* fn is void, so fall through and return values set up in
+ * probe
+ */
+ }
+
+ /* Add num resources currently owned by this domain.
+ * These would become available if the scheduling domain were reset due
+ * to the application recalling eventdev_configure to *reconfigure* the
+ * domain.
+ */
+ evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
+ evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
+ evdev_dlb2_default_info.max_num_events += dlb2->max_ldb_credits;
+
+ evdev_dlb2_default_info.max_event_queues =
+ RTE_MIN(evdev_dlb2_default_info.max_event_queues,
+ RTE_EVENT_MAX_QUEUES_PER_DEV);
+
+ evdev_dlb2_default_info.max_num_events =
+ RTE_MIN(evdev_dlb2_default_info.max_num_events,
+ dlb2->max_num_events_override);
+
+ *dev_info = evdev_dlb2_default_info;
+}
+
+static int
+dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
+ const struct dlb2_hw_rsrcs *resources_asked)
+{
+ int ret = 0;
+ struct dlb2_create_sched_domain_args *config_params;
+
+ if (resources_asked == NULL) {
+ DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n");
+ ret = EINVAL;
+ goto error_exit;
+ }
+
+ /* Map generic qm resources to dlb2 resources */
+ config_params = &handle->cfg.resources;
+
+ /* DIR ports and queues */
+
+ config_params->num_dir_ports =
+ resources_asked->num_dir_ports;
+
+ config_params->num_dir_credits =
+ resources_asked->num_dir_credits;
+
+ /* LDB queues */
+
+ config_params->num_ldb_queues =
+ resources_asked->num_ldb_queues;
+
+ /* LDB ports */
+
+ config_params->cos_strict = 0; /* Best effort */
+ config_params->num_cos_ldb_ports[0] = 0;
+ config_params->num_cos_ldb_ports[1] = 0;
+ config_params->num_cos_ldb_ports[2] = 0;
+ config_params->num_cos_ldb_ports[3] = 0;
+
+ switch (handle->cos_id) {
+ case DLB2_COS_0:
+ config_params->num_ldb_ports = 0; /* no don't care ports */
+ config_params->num_cos_ldb_ports[0] =
+ resources_asked->num_ldb_ports;
+ break;
+ case DLB2_COS_1:
+ config_params->num_ldb_ports = 0; /* no don't care ports */
+ config_params->num_cos_ldb_ports[1] =
+ resources_asked->num_ldb_ports;
+ break;
+ case DLB2_COS_2:
+ config_params->num_ldb_ports = 0; /* no don't care ports */
+ config_params->num_cos_ldb_ports[2] =
+ resources_asked->num_ldb_ports;
+ break;
+ case DLB2_COS_3:
+ config_params->num_ldb_ports = 0; /* no don't care ports */
+ config_params->num_cos_ldb_ports[3] =
+ resources_asked->num_ldb_ports;
+ break;
+ case DLB2_COS_DEFAULT:
+ /* all ldb ports are don't care ports from a cos perspective */
+ config_params->num_ldb_ports =
+ resources_asked->num_ldb_ports;
+ break;
+ }
+
+ config_params->num_ldb_credits =
+ resources_asked->num_ldb_credits;
+
+ config_params->num_atomic_inflights =
+ DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
+ config_params->num_ldb_queues;
+
+ config_params->num_hist_list_entries = resources_asked->num_ldb_ports *
+ DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
+
+ DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
+ config_params->num_ldb_queues,
+ resources_asked->num_ldb_ports,
+ config_params->num_dir_ports,
+ config_params->num_atomic_inflights,
+ config_params->num_hist_list_entries,
+ config_params->num_ldb_credits,
+ config_params->num_dir_credits);
+
+ /* Configure the QM */
+
+ ret = dlb2_iface_sched_domain_create(handle, config_params);
+ if (ret < 0) {
+ DLB2_LOG_ERR("dlb2: domain create failed, device_id = %d, (driver ret = %d, extra status: %s)\n",
+ handle->device_id,
+ ret,
+ dlb2_error_strings
+ [config_params->response.status]);
+
+ goto error_exit;
+ }
+
+ handle->domain_id = config_params->response.id;
+ handle->domain_id_valid = 1;
+ handle->cfg.configured = true;
+
+error_exit:
+
+ return ret;
+}
+
+static void
+dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
+{
+ struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
+ enum dlb2_configuration_state config_state;
+ int i, j;
+
+ dlb2_iface_domain_reset(dlb2);
+
+ /* Free all dynamically allocated port memory */
+ for (i = 0; i < dlb2->num_ports; i++)
+ dlb2_free_qe_mem(&dlb2->ev_ports[i].qm_port);
+
+ /* If reconfiguring, mark the device's queues and ports as "previously
+ * configured." If the user doesn't reconfigure them, the PMD will
+ * reapply their previous configuration when the device is started.
+ */
+ config_state = (reconfig) ? DLB2_PREV_CONFIGURED :
+ DLB2_NOT_CONFIGURED;
+
+ for (i = 0; i < dlb2->num_ports; i++) {
+ dlb2->ev_ports[i].qm_port.config_state = config_state;
+ /* Reset setup_done so ports can be reconfigured */
+ dlb2->ev_ports[i].setup_done = false;
+ for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
+ dlb2->ev_ports[i].link[j].mapped = false;
+ }
+
+ for (i = 0; i < dlb2->num_queues; i++)
+ dlb2->ev_queues[i].qm_queue.config_state = config_state;
+
+ for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++)
+ dlb2->ev_queues[i].setup_done = false;
+
+ dlb2->num_ports = 0;
+ dlb2->num_ldb_ports = 0;
+ dlb2->num_dir_ports = 0;
+ dlb2->num_queues = 0;
+ dlb2->num_ldb_queues = 0;
+ dlb2->num_dir_queues = 0;
+ dlb2->configured = false;
+}
+
+/* Note: 1 QM instance per QM device, QM instance/device == event device */
+static int
+dlb2_eventdev_configure(const struct rte_eventdev *dev)
+{
+ struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
+ struct dlb2_hw_dev *handle = &dlb2->qm_instance;
+ struct dlb2_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
+ const struct rte_eventdev_data *data = dev->data;
+ const struct rte_event_dev_config *config = &data->dev_conf;
+ int ret;
+
+ /* If this eventdev is already configured, we must release the current
+ * scheduling domain before attempting to configure a new one.
+ */
+ if (dlb2->configured) {
+ dlb2_hw_reset_sched_domain(dev, true);
+
+ ret = dlb2_hw_query_resources(dlb2);
+ if (ret) {
+ DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
+ ret, data->dev_id);
+ return ret;
+ }
+ }
+
+ if (config->nb_event_queues > rsrcs->num_queues) {
+ DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
+ config->nb_event_queues,
+ rsrcs->num_queues);
+ return -EINVAL;
+ }
+ if (config->nb_event_ports > (rsrcs->num_ldb_ports
+ + rsrcs->num_dir_ports)) {
+ DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
+ config->nb_event_ports,
+ (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
+ return -EINVAL;
+ }
+ if (config->nb_events_limit > rsrcs->nb_events_limit) {
+ DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
+ config->nb_events_limit,
+ rsrcs->nb_events_limit);
+ return -EINVAL;
+ }
+
+ if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ dlb2->global_dequeue_wait = false;
+ } else {
+ uint32_t timeout32;
+
+ dlb2->global_dequeue_wait = true;
+
+ /* Craziness here is due to size mismatch in eventdev lib.
+ * TODO: Submit patch so dequeue API and config use same bit
+ * width timeout value and same units or time, instead of one
+ * being 32b ns and the other being 64b ticks.
+ */
+
+ timeout32 = config->dequeue_timeout_ns;
+
+ dlb2->global_dequeue_wait_ticks =
+ timeout32 * (rte_get_timer_hz() / 1E9);
+ }
+
+ /* Does this platform support umonitor/umwait? */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_UMWAIT)) {
+ if (RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 0 &&
+ RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 1) {
+ DLB2_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE, must be 0 or 1.\n",
+ RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE);
+ return -EINVAL;
+ }
+ dlb2->umwait_allowed = true;
+ }
+
+ /* FIXME: DLB should revert to load-balanced ports if dir are not
+ * available
+ */
+
+ rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
+ rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports;
+ /* 1 dir queue per dir port */
+ rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
+
+ /* Scale down nb_events_limit by 4 for directed credits, since there
+ * are 4x as many load-balanced credits.
+ */
+ rsrcs->num_ldb_credits = 0;
+ rsrcs->num_dir_credits = 0;
+
+ if (rsrcs->num_ldb_queues)
+ rsrcs->num_ldb_credits = config->nb_events_limit;
+ if (rsrcs->num_dir_ports)
+ rsrcs->num_dir_credits = config->nb_events_limit / 4;
+ if (dlb2->num_dir_credits_override != -1)
+ rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
+
+ if (dlb2_hw_create_sched_domain(handle, rsrcs) < 0) {
+ DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
+ return -ENODEV;
+ }
+
+ dlb2->new_event_limit = config->nb_events_limit;
+ __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
+
+
+ /* Save number of ports/queues for this event dev */
+ dlb2->num_ports = config->nb_event_ports;
+ dlb2->num_queues = config->nb_event_queues;
+ dlb2->num_dir_ports = rsrcs->num_dir_ports;
+ dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
+ dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
+ dlb2->num_dir_queues = dlb2->num_dir_ports;
+ dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
+ dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
+ dlb2->dir_credit_pool = rsrcs->num_dir_credits;
+ dlb2->max_dir_credits = rsrcs->num_dir_credits;
+
+ dlb2->configured = true;
+
+ return 0;
+}
+
+static void
dlb2_entry_points_init(struct rte_eventdev *dev)
{
/* Expose PMD's eventdev interface */
static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
+ .dev_infos_get = dlb2_eventdev_info_get,
+ .dev_configure = dlb2_eventdev_configure,
.dump = dlb2_eventdev_dump,
.xstats_get = dlb2_eventdev_xstats_get,
.xstats_get_names = dlb2_eventdev_xstats_get_names,
diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c
index fefdf78..5c11736 100644
--- a/drivers/event/dlb2/dlb2_iface.c
+++ b/drivers/event/dlb2/dlb2_iface.c
@@ -39,4 +39,9 @@ int (*dlb2_iface_get_cq_poll_mode)(struct dlb2_hw_dev *handle,
enum dlb2_cq_poll_modes *mode);
int (*dlb2_iface_get_num_resources)(struct dlb2_hw_dev *handle,
- struct dlb2_get_num_resources_args *rsrcs);
+ struct dlb2_get_num_resources_args *rsrcs);
+
+int (*dlb2_iface_sched_domain_create)(struct dlb2_hw_dev *handle,
+ struct dlb2_create_sched_domain_args *args);
+
+void (*dlb2_iface_domain_reset)(struct dlb2_eventdev *dlb2);
diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h
index 4fb416e..576c1c3 100644
--- a/drivers/event/dlb2/dlb2_iface.h
+++ b/drivers/event/dlb2/dlb2_iface.h
@@ -26,4 +26,9 @@ extern int (*dlb2_iface_get_cq_poll_mode)(struct dlb2_hw_dev *handle,
extern int (*dlb2_iface_get_num_resources)(struct dlb2_hw_dev *handle,
struct dlb2_get_num_resources_args *rsrcs);
+extern int (*dlb2_iface_sched_domain_create)(struct dlb2_hw_dev *handle,
+ struct dlb2_create_sched_domain_args *args);
+
+extern void (*dlb2_iface_domain_reset)(struct dlb2_eventdev *dlb2);
+
#endif /* _DLB2_IFACE_H_ */
diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c
index 6de8b95..f83f8a1 100644
--- a/drivers/event/dlb2/pf/base/dlb2_resource.c
+++ b/drivers/event/dlb2/pf/base/dlb2_resource.c
@@ -12,6 +12,24 @@
#include "dlb2_regs.h"
#include "dlb2_resource.h"
+#define DLB2_DOM_LIST_HEAD(head, type) \
+ DLB2_LIST_HEAD((head), type, domain_list)
+
+#define DLB2_FUNC_LIST_HEAD(head, type) \
+ DLB2_LIST_HEAD((head), type, func_list)
+
+#define DLB2_DOM_LIST_FOR(head, ptr, iter) \
+ DLB2_LIST_FOR_EACH(head, ptr, domain_list, iter)
+
+#define DLB2_FUNC_LIST_FOR(head, ptr, iter) \
+ DLB2_LIST_FOR_EACH(head, ptr, func_list, iter)
+
+#define DLB2_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
+ DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
+
+#define DLB2_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
+ DLB2_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
+
static void dlb2_init_domain_rsrc_lists(struct dlb2_hw_domain *domain)
{
int i;
@@ -272,3 +290,3219 @@ void dlb2_clr_pmcsr_disable(struct dlb2_hw *hw)
DLB2_CSR_WR(hw, DLB2_CFG_MSTR_CFG_PM_PMCSR_DISABLE, r0.val);
}
+
+static void dlb2_configure_domain_credits(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ union dlb2_chp_cfg_ldb_vas_crd r0 = { {0} };
+ union dlb2_chp_cfg_dir_vas_crd r1 = { {0} };
+
+ r0.field.count = domain->num_ldb_credits;
+
+ DLB2_CSR_WR(hw, DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id), r0.val);
+
+ r1.field.count = domain->num_dir_credits;
+
+ DLB2_CSR_WR(hw, DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id), r1.val);
+}
+
+static struct dlb2_ldb_port *
+dlb2_get_next_ldb_port(struct dlb2_hw *hw,
+ struct dlb2_function_resources *rsrcs,
+ u32 domain_id,
+ u32 cos_id)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_port *port;
+ RTE_SET_USED(iter);
+ /*
+ * To reduce the odds of consecutive load-balanced ports mapping to the
+ * same queue(s), the driver attempts to allocate ports whose neighbors
+ * are owned by a different domain.
+ */
+ DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
+ u32 next, prev;
+ u32 phys_id;
+
+ phys_id = port->id.phys_id;
+ next = phys_id + 1;
+ prev = phys_id - 1;
+
+ if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
+ next = 0;
+ if (phys_id == 0)
+ prev = DLB2_MAX_NUM_LDB_PORTS - 1;
+
+ if (!hw->rsrcs.ldb_ports[next].owned ||
+ hw->rsrcs.ldb_ports[next].domain_id.phys_id == domain_id)
+ continue;
+
+ if (!hw->rsrcs.ldb_ports[prev].owned ||
+ hw->rsrcs.ldb_ports[prev].domain_id.phys_id == domain_id)
+ continue;
+
+ return port;
+ }
+
+ /*
+ * Failing that, the driver looks for a port with one neighbor owned by
+ * a different domain and the other unallocated.
+ */
+ DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
+ u32 next, prev;
+ u32 phys_id;
+
+ phys_id = port->id.phys_id;
+ next = phys_id + 1;
+ prev = phys_id - 1;
+
+ if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
+ next = 0;
+ if (phys_id == 0)
+ prev = DLB2_MAX_NUM_LDB_PORTS - 1;
+
+ if (!hw->rsrcs.ldb_ports[prev].owned &&
+ hw->rsrcs.ldb_ports[next].owned &&
+ hw->rsrcs.ldb_ports[next].domain_id.phys_id != domain_id)
+ return port;
+
+ if (!hw->rsrcs.ldb_ports[next].owned &&
+ hw->rsrcs.ldb_ports[prev].owned &&
+ hw->rsrcs.ldb_ports[prev].domain_id.phys_id != domain_id)
+ return port;
+ }
+
+ /*
+ * Failing that, the driver looks for a port with both neighbors
+ * unallocated.
+ */
+ DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_ports[cos_id], port, iter) {
+ u32 next, prev;
+ u32 phys_id;
+
+ phys_id = port->id.phys_id;
+ next = phys_id + 1;
+ prev = phys_id - 1;
+
+ if (phys_id == DLB2_MAX_NUM_LDB_PORTS - 1)
+ next = 0;
+ if (phys_id == 0)
+ prev = DLB2_MAX_NUM_LDB_PORTS - 1;
+
+ if (!hw->rsrcs.ldb_ports[prev].owned &&
+ !hw->rsrcs.ldb_ports[next].owned)
+ return port;
+ }
+
+ /* If all else fails, the driver returns the next available port. */
+ return DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports[cos_id],
+ typeof(*port));
+}
+
+static int __dlb2_attach_ldb_ports(struct dlb2_hw *hw,
+ struct dlb2_function_resources *rsrcs,
+ struct dlb2_hw_domain *domain,
+ u32 num_ports,
+ u32 cos_id,
+ struct dlb2_cmd_response *resp)
+{
+ unsigned int i;
+
+ if (rsrcs->num_avail_ldb_ports[cos_id] < num_ports) {
+ resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_ports; i++) {
+ struct dlb2_ldb_port *port;
+
+ port = dlb2_get_next_ldb_port(hw, rsrcs,
+ domain->id.phys_id, cos_id);
+ if (!port) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: domain validation failed\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ dlb2_list_del(&rsrcs->avail_ldb_ports[cos_id],
+ &port->func_list);
+
+ port->domain_id = domain->id;
+ port->owned = true;
+
+ dlb2_list_add(&domain->avail_ldb_ports[cos_id],
+ &port->domain_list);
+ }
+
+ rsrcs->num_avail_ldb_ports[cos_id] -= num_ports;
+
+ return 0;
+}
+
+static int dlb2_attach_ldb_ports(struct dlb2_hw *hw,
+ struct dlb2_function_resources *rsrcs,
+ struct dlb2_hw_domain *domain,
+ struct dlb2_create_sched_domain_args *args,
+ struct dlb2_cmd_response *resp)
+{
+ unsigned int i, j;
+ int ret;
+
+ if (args->cos_strict) {
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ u32 num = args->num_cos_ldb_ports[i];
+
+ /* Allocate ports from specific classes-of-service */
+ ret = __dlb2_attach_ldb_ports(hw,
+ rsrcs,
+ domain,
+ num,
+ i,
+ resp);
+ if (ret)
+ return ret;
+ }
+ } else {
+ unsigned int k;
+ u32 cos_id;
+
+ /*
+ * Attempt to allocate from specific class-of-service, but
+ * fallback to the other classes if that fails.
+ */
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ for (j = 0; j < args->num_cos_ldb_ports[i]; j++) {
+ for (k = 0; k < DLB2_NUM_COS_DOMAINS; k++) {
+ cos_id = (i + k) % DLB2_NUM_COS_DOMAINS;
+
+ ret = __dlb2_attach_ldb_ports(hw,
+ rsrcs,
+ domain,
+ 1,
+ cos_id,
+ resp);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ /* Allocate num_ldb_ports from any class-of-service */
+ for (i = 0; i < args->num_ldb_ports; i++) {
+ for (j = 0; j < DLB2_NUM_COS_DOMAINS; j++) {
+ ret = __dlb2_attach_ldb_ports(hw,
+ rsrcs,
+ domain,
+ 1,
+ j,
+ resp);
+ if (ret == 0)
+ break;
+ }
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dlb2_attach_dir_ports(struct dlb2_hw *hw,
+ struct dlb2_function_resources *rsrcs,
+ struct dlb2_hw_domain *domain,
+ u32 num_ports,
+ struct dlb2_cmd_response *resp)
+{
+ unsigned int i;
+
+ if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
+ resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_ports; i++) {
+ struct dlb2_dir_pq_pair *port;
+
+ port = DLB2_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
+ typeof(*port));
+ if (!port) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: domain validation failed\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ dlb2_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
+
+ port->domain_id = domain->id;
+ port->owned = true;
+
+ dlb2_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
+ }
+
+ rsrcs->num_avail_dir_pq_pairs -= num_ports;
+
+ return 0;
+}
+
+static int dlb2_attach_ldb_credits(struct dlb2_function_resources *rsrcs,
+ struct dlb2_hw_domain *domain,
+ u32 num_credits,
+ struct dlb2_cmd_response *resp)
+{
+ if (rsrcs->num_avail_qed_entries < num_credits) {
+ resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ rsrcs->num_avail_qed_entries -= num_credits;
+ domain->num_ldb_credits += num_credits;
+ return 0;
+}
+
+static int dlb2_attach_dir_credits(struct dlb2_function_resources *rsrcs,
+ struct dlb2_hw_domain *domain,
+ u32 num_credits,
+ struct dlb2_cmd_response *resp)
+{
+ if (rsrcs->num_avail_dqed_entries < num_credits) {
+ resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ rsrcs->num_avail_dqed_entries -= num_credits;
+ domain->num_dir_credits += num_credits;
+ return 0;
+}
+
+static int dlb2_attach_atomic_inflights(struct dlb2_function_resources *rsrcs,
+ struct dlb2_hw_domain *domain,
+ u32 num_atomic_inflights,
+ struct dlb2_cmd_response *resp)
+{
+ if (rsrcs->num_avail_aqed_entries < num_atomic_inflights) {
+ resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ rsrcs->num_avail_aqed_entries -= num_atomic_inflights;
+ domain->num_avail_aqed_entries += num_atomic_inflights;
+ return 0;
+}
+
+static int
+dlb2_attach_domain_hist_list_entries(struct dlb2_function_resources *rsrcs,
+ struct dlb2_hw_domain *domain,
+ u32 num_hist_list_entries,
+ struct dlb2_cmd_response *resp)
+{
+ struct dlb2_bitmap *bitmap;
+ int base;
+
+ if (num_hist_list_entries) {
+ bitmap = rsrcs->avail_hist_list_entries;
+
+ base = dlb2_bitmap_find_set_bit_range(bitmap,
+ num_hist_list_entries);
+ if (base < 0)
+ goto error;
+
+ domain->total_hist_list_entries = num_hist_list_entries;
+ domain->avail_hist_list_entries = num_hist_list_entries;
+ domain->hist_list_entry_base = base;
+ domain->hist_list_entry_offset = 0;
+
+ dlb2_bitmap_clear_range(bitmap, base, num_hist_list_entries);
+ }
+ return 0;
+
+error:
+ resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
+ return -EINVAL;
+}
+
+static int dlb2_attach_ldb_queues(struct dlb2_hw *hw,
+ struct dlb2_function_resources *rsrcs,
+ struct dlb2_hw_domain *domain,
+ u32 num_queues,
+ struct dlb2_cmd_response *resp)
+{
+ unsigned int i;
+
+ if (rsrcs->num_avail_ldb_queues < num_queues) {
+ resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ struct dlb2_ldb_queue *queue;
+
+ queue = DLB2_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
+ typeof(*queue));
+ if (!queue) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: domain validation failed\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ dlb2_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
+
+ queue->domain_id = domain->id;
+ queue->owned = true;
+
+ dlb2_list_add(&domain->avail_ldb_queues, &queue->domain_list);
+ }
+
+ rsrcs->num_avail_ldb_queues -= num_queues;
+
+ return 0;
+}
+
+static int
+dlb2_domain_attach_resources(struct dlb2_hw *hw,
+ struct dlb2_function_resources *rsrcs,
+ struct dlb2_hw_domain *domain,
+ struct dlb2_create_sched_domain_args *args,
+ struct dlb2_cmd_response *resp)
+{
+ int ret;
+
+ ret = dlb2_attach_ldb_queues(hw,
+ rsrcs,
+ domain,
+ args->num_ldb_queues,
+ resp);
+ if (ret < 0)
+ return ret;
+
+ ret = dlb2_attach_ldb_ports(hw,
+ rsrcs,
+ domain,
+ args,
+ resp);
+ if (ret < 0)
+ return ret;
+
+ ret = dlb2_attach_dir_ports(hw,
+ rsrcs,
+ domain,
+ args->num_dir_ports,
+ resp);
+ if (ret < 0)
+ return ret;
+
+ ret = dlb2_attach_ldb_credits(rsrcs,
+ domain,
+ args->num_ldb_credits,
+ resp);
+ if (ret < 0)
+ return ret;
+
+ ret = dlb2_attach_dir_credits(rsrcs,
+ domain,
+ args->num_dir_credits,
+ resp);
+ if (ret < 0)
+ return ret;
+
+ ret = dlb2_attach_domain_hist_list_entries(rsrcs,
+ domain,
+ args->num_hist_list_entries,
+ resp);
+ if (ret < 0)
+ return ret;
+
+ ret = dlb2_attach_atomic_inflights(rsrcs,
+ domain,
+ args->num_atomic_inflights,
+ resp);
+ if (ret < 0)
+ return ret;
+
+ dlb2_configure_domain_credits(hw, domain);
+
+ domain->configured = true;
+
+ domain->started = false;
+
+ rsrcs->num_avail_domains--;
+
+ return 0;
+}
+
+static int
+dlb2_verify_create_sched_dom_args(struct dlb2_function_resources *rsrcs,
+ struct dlb2_create_sched_domain_args *args,
+ struct dlb2_cmd_response *resp)
+{
+ u32 num_avail_ldb_ports, req_ldb_ports;
+ struct dlb2_bitmap *avail_hl_entries;
+ unsigned int max_contig_hl_range;
+ int i;
+
+ avail_hl_entries = rsrcs->avail_hist_list_entries;
+
+ max_contig_hl_range = dlb2_bitmap_longest_set_range(avail_hl_entries);
+
+ num_avail_ldb_ports = 0;
+ req_ldb_ports = 0;
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ num_avail_ldb_ports += rsrcs->num_avail_ldb_ports[i];
+
+ req_ldb_ports += args->num_cos_ldb_ports[i];
+ }
+
+ req_ldb_ports += args->num_ldb_ports;
+
+ if (rsrcs->num_avail_domains < 1) {
+ resp->status = DLB2_ST_DOMAIN_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues) {
+ resp->status = DLB2_ST_LDB_QUEUES_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ if (req_ldb_ports > num_avail_ldb_ports) {
+ resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ for (i = 0; args->cos_strict && i < DLB2_NUM_COS_DOMAINS; i++) {
+ if (args->num_cos_ldb_ports[i] >
+ rsrcs->num_avail_ldb_ports[i]) {
+ resp->status = DLB2_ST_LDB_PORTS_UNAVAILABLE;
+ return -EINVAL;
+ }
+ }
+
+ if (args->num_ldb_queues > 0 && req_ldb_ports == 0) {
+ resp->status = DLB2_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
+ return -EINVAL;
+ }
+
+ if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports) {
+ resp->status = DLB2_ST_DIR_PORTS_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ if (rsrcs->num_avail_qed_entries < args->num_ldb_credits) {
+ resp->status = DLB2_ST_LDB_CREDITS_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ if (rsrcs->num_avail_dqed_entries < args->num_dir_credits) {
+ resp->status = DLB2_ST_DIR_CREDITS_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ if (rsrcs->num_avail_aqed_entries < args->num_atomic_inflights) {
+ resp->status = DLB2_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ if (max_contig_hl_range < args->num_hist_list_entries) {
+ resp->status = DLB2_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+dlb2_log_create_sched_domain_args(struct dlb2_hw *hw,
+ struct dlb2_create_sched_domain_args *args,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ DLB2_HW_DBG(hw, "DLB2 create sched domain arguments:\n");
+ if (vdev_req)
+ DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
+ DLB2_HW_DBG(hw, "\tNumber of LDB queues: %d\n",
+ args->num_ldb_queues);
+ DLB2_HW_DBG(hw, "\tNumber of LDB ports (any CoS): %d\n",
+ args->num_ldb_ports);
+ DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 0): %d\n",
+ args->num_cos_ldb_ports[0]);
+ DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 1): %d\n",
+ args->num_cos_ldb_ports[1]);
+ DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 2): %d\n",
+ args->num_cos_ldb_ports[1]);
+ DLB2_HW_DBG(hw, "\tNumber of LDB ports (CoS 3): %d\n",
+ args->num_cos_ldb_ports[1]);
+ DLB2_HW_DBG(hw, "\tStrict CoS allocation: %d\n",
+ args->cos_strict);
+ DLB2_HW_DBG(hw, "\tNumber of DIR ports: %d\n",
+ args->num_dir_ports);
+ DLB2_HW_DBG(hw, "\tNumber of ATM inflights: %d\n",
+ args->num_atomic_inflights);
+ DLB2_HW_DBG(hw, "\tNumber of hist list entries: %d\n",
+ args->num_hist_list_entries);
+ DLB2_HW_DBG(hw, "\tNumber of LDB credits: %d\n",
+ args->num_ldb_credits);
+ DLB2_HW_DBG(hw, "\tNumber of DIR credits: %d\n",
+ args->num_dir_credits);
+}
+
+/**
+ * dlb2_hw_create_sched_domain() - Allocate and initialize a DLB scheduling
+ * domain and its resources.
+ * @hw: Contains the current state of the DLB2 hardware.
+ * @args: User-provided arguments.
+ * @resp: Response to user.
+ * @vdev_req: Request came from a virtual device.
+ * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
+ *
+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
+ * satisfy a request, resp->status will be set accordingly.
+ */
+int dlb2_hw_create_sched_domain(struct dlb2_hw *hw,
+ struct dlb2_create_sched_domain_args *args,
+ struct dlb2_cmd_response *resp,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ struct dlb2_function_resources *rsrcs;
+ struct dlb2_hw_domain *domain;
+ int ret;
+
+ rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
+
+ dlb2_log_create_sched_domain_args(hw, args, vdev_req, vdev_id);
+
+ /*
+ * Verify that hardware resources are available before attempting to
+ * satisfy the request. This simplifies the error unwinding code.
+ */
+ ret = dlb2_verify_create_sched_dom_args(rsrcs, args, resp);
+ if (ret)
+ return ret;
+
+ domain = DLB2_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
+ if (!domain) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: no available domains\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if (domain->configured) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: avail_domains contains configured domains.\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ dlb2_init_domain_rsrc_lists(domain);
+
+ ret = dlb2_domain_attach_resources(hw, rsrcs, domain, args, resp);
+ if (ret < 0) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: failed to verify args.\n",
+ __func__);
+
+ return ret;
+ }
+
+ dlb2_list_del(&rsrcs->avail_domains, &domain->func_list);
+
+ dlb2_list_add(&rsrcs->used_domains, &domain->func_list);
+
+ resp->id = (vdev_req) ? domain->id.virt_id : domain->id.phys_id;
+ resp->status = 0;
+
+ return 0;
+}
+
+/*
+ * The PF driver cannot assume that a register write will affect subsequent HCW
+ * writes. To ensure a write completes, the driver must read back a CSR. This
+ * function only need be called for configuration that can occur after the
+ * domain has started; prior to starting, applications can't send HCWs.
+ */
+static inline void dlb2_flush_csr(struct dlb2_hw *hw)
+{
+ DLB2_CSR_RD(hw, DLB2_SYS_TOTAL_VAS);
+}
+
+static void dlb2_dir_port_cq_disable(struct dlb2_hw *hw,
+ struct dlb2_dir_pq_pair *port)
+{
+ union dlb2_lsp_cq_dir_dsbl reg;
+
+ reg.field.disabled = 1;
+
+ DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
+
+ dlb2_flush_csr(hw);
+}
+
+static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
+ struct dlb2_dir_pq_pair *port)
+{
+ union dlb2_lsp_cq_dir_tkn_cnt r0;
+
+ r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id));
+
+ /*
+ * Account for the initial token count, which is used in order to
+ * provide a CQ with depth less than 8.
+ */
+
+ return r0.field.count - port->init_tkn_cnt;
+}
+
+static int dlb2_drain_dir_cq(struct dlb2_hw *hw,
+ struct dlb2_dir_pq_pair *port)
+{
+ unsigned int port_id = port->id.phys_id;
+ u32 cnt;
+
+ /* Return any outstanding tokens */
+ cnt = dlb2_dir_cq_token_count(hw, port);
+
+ if (cnt != 0) {
+ struct dlb2_hcw hcw_mem[8], *hcw;
+ void *pp_addr;
+
+ pp_addr = os_map_producer_port(hw, port_id, false);
+
+ /* Point hcw to a 64B-aligned location */
+ hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
+
+ /*
+ * Program the first HCW for a batch token return and
+ * the rest as NOOPS
+ */
+ memset(hcw, 0, 4 * sizeof(*hcw));
+ hcw->cq_token = 1;
+ hcw->lock_id = cnt - 1;
+
+ os_enqueue_four_hcws(hw, hcw, pp_addr);
+
+ os_fence_hcw(hw, pp_addr);
+
+ os_unmap_producer_port(hw, pp_addr);
+ }
+
+ return 0;
+}
+
+static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
+ struct dlb2_dir_pq_pair *port)
+{
+ union dlb2_lsp_cq_dir_dsbl reg;
+
+ reg.field.disabled = 0;
+
+ DLB2_CSR_WR(hw, DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id), reg.val);
+
+ dlb2_flush_csr(hw);
+}
+
+static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ bool toggle_port)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_dir_pq_pair *port;
+ int ret;
+ RTE_SET_USED(iter);
+
+ DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
+ /*
+ * Can't drain a port if it's not configured, and there's
+ * nothing to drain if its queue is unconfigured.
+ */
+ if (!port->port_configured || !port->queue_configured)
+ continue;
+
+ if (toggle_port)
+ dlb2_dir_port_cq_disable(hw, port);
+
+ ret = dlb2_drain_dir_cq(hw, port);
+ if (ret < 0)
+ return ret;
+
+ if (toggle_port)
+ dlb2_dir_port_cq_enable(hw, port);
+ }
+
+ return 0;
+}
+
+static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
+ struct dlb2_dir_pq_pair *queue)
+{
+ union dlb2_lsp_qid_dir_enqueue_cnt r0;
+
+ r0.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID_DIR_ENQUEUE_CNT(queue->id.phys_id));
+
+ return r0.field.count;
+}
+
+static bool dlb2_dir_queue_is_empty(struct dlb2_hw *hw,
+ struct dlb2_dir_pq_pair *queue)
+{
+ return dlb2_dir_queue_depth(hw, queue) == 0;
+}
+
+static bool dlb2_domain_dir_queues_empty(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_dir_pq_pair *queue;
+ RTE_SET_USED(iter);
+
+ DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
+ if (!dlb2_dir_queue_is_empty(hw, queue))
+ return false;
+ }
+
+ return true;
+}
+
+static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ int i, ret;
+
+ /* If the domain hasn't been started, there's no traffic to drain */
+ if (!domain->started)
+ return 0;
+
+ for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
+ ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
+ if (ret < 0)
+ return ret;
+
+ if (dlb2_domain_dir_queues_empty(hw, domain))
+ break;
+ }
+
+ if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: failed to empty queues\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ /*
+ * Drain the CQs one more time. For the queues to go empty, they would
+ * have scheduled one or more QEs.
+ */
+ ret = dlb2_domain_drain_dir_cqs(hw, domain, true);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void dlb2_ldb_port_cq_enable(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port)
+{
+ union dlb2_lsp_cq_ldb_dsbl reg;
+
+ /*
+ * Don't re-enable the port if a removal is pending. The caller should
+ * mark this port as enabled (if it isn't already), and when the
+ * removal completes the port will be enabled.
+ */
+ if (port->num_pending_removals)
+ return;
+
+ reg.field.disabled = 0;
+
+ DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
+
+ dlb2_flush_csr(hw);
+}
+
+static void dlb2_ldb_port_cq_disable(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port)
+{
+ union dlb2_lsp_cq_ldb_dsbl reg;
+
+ reg.field.disabled = 1;
+
+ DLB2_CSR_WR(hw, DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id), reg.val);
+
+ dlb2_flush_csr(hw);
+}
+
+static u32 dlb2_ldb_cq_inflight_count(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port)
+{
+ union dlb2_lsp_cq_ldb_infl_cnt r0;
+
+ r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
+
+ return r0.field.count;
+}
+
+static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port)
+{
+ union dlb2_lsp_cq_ldb_tkn_cnt r0;
+
+ r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id));
+
+ /*
+ * Account for the initial token count, which is used in order to
+ * provide a CQ with depth less than 8.
+ */
+
+ return r0.field.token_count - port->init_tkn_cnt;
+}
+
+static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
+{
+ u32 infl_cnt, tkn_cnt;
+ unsigned int i;
+
+ infl_cnt = dlb2_ldb_cq_inflight_count(hw, port);
+ tkn_cnt = dlb2_ldb_cq_token_count(hw, port);
+
+ if (infl_cnt || tkn_cnt) {
+ struct dlb2_hcw hcw_mem[8], *hcw;
+ void *pp_addr;
+
+ pp_addr = os_map_producer_port(hw, port->id.phys_id, true);
+
+ /* Point hcw to a 64B-aligned location */
+ hcw = (struct dlb2_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
+
+ /*
+ * Program the first HCW for a completion and token return and
+ * the other HCWs as NOOPS
+ */
+
+ memset(hcw, 0, 4 * sizeof(*hcw));
+ hcw->qe_comp = (infl_cnt > 0);
+ hcw->cq_token = (tkn_cnt > 0);
+ hcw->lock_id = tkn_cnt - 1;
+
+ /* Return tokens in the first HCW */
+ os_enqueue_four_hcws(hw, hcw, pp_addr);
+
+ hcw->cq_token = 0;
+
+ /* Issue remaining completions (if any) */
+ for (i = 1; i < infl_cnt; i++)
+ os_enqueue_four_hcws(hw, hcw, pp_addr);
+
+ os_fence_hcw(hw, pp_addr);
+
+ os_unmap_producer_port(hw, pp_addr);
+ }
+
+ return 0;
+}
+
+static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ bool toggle_port)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_port *port;
+ int ret, i;
+ RTE_SET_USED(iter);
+
+ /* If the domain hasn't been started, there's no traffic to drain */
+ if (!domain->started)
+ return 0;
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+ if (toggle_port)
+ dlb2_ldb_port_cq_disable(hw, port);
+
+ ret = dlb2_drain_ldb_cq(hw, port);
+ if (ret < 0)
+ return ret;
+
+ if (toggle_port)
+ dlb2_ldb_port_cq_enable(hw, port);
+ }
+ }
+
+ return 0;
+}
+
+static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
+ struct dlb2_ldb_queue *queue)
+{
+ union dlb2_lsp_qid_aqed_active_cnt r0;
+ union dlb2_lsp_qid_atm_active r1;
+ union dlb2_lsp_qid_ldb_enqueue_cnt r2;
+
+ r0.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
+ r1.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID_ATM_ACTIVE(queue->id.phys_id));
+
+ r2.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
+
+ return r0.field.count + r1.field.count + r2.field.count;
+}
+
+static bool dlb2_ldb_queue_is_empty(struct dlb2_hw *hw,
+ struct dlb2_ldb_queue *queue)
+{
+ return dlb2_ldb_queue_depth(hw, queue) == 0;
+}
+
+static bool dlb2_domain_mapped_queues_empty(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_queue *queue;
+ RTE_SET_USED(iter);
+
+ DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
+ if (queue->num_mappings == 0)
+ continue;
+
+ if (!dlb2_ldb_queue_is_empty(hw, queue))
+ return false;
+ }
+
+ return true;
+}
+
+static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ int i, ret;
+
+ /* If the domain hasn't been started, there's no traffic to drain */
+ if (!domain->started)
+ return 0;
+
+ if (domain->num_pending_removals > 0) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: failed to unmap domain queues\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
+ ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
+ if (ret < 0)
+ return ret;
+
+ if (dlb2_domain_mapped_queues_empty(hw, domain))
+ break;
+ }
+
+ if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: failed to empty queues\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ /*
+ * Drain the CQs one more time. For the queues to go empty, they would
+ * have scheduled one or more QEs.
+ */
+ ret = dlb2_domain_drain_ldb_cqs(hw, domain, true);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void dlb2_domain_enable_ldb_cqs(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_port *port;
+ int i;
+ RTE_SET_USED(iter);
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+ port->enabled = true;
+
+ dlb2_ldb_port_cq_enable(hw, port);
+ }
+ }
+}
+
+static struct dlb2_ldb_queue *
+dlb2_get_ldb_queue_from_id(struct dlb2_hw *hw,
+ u32 id,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ struct dlb2_list_entry *iter1;
+ struct dlb2_list_entry *iter2;
+ struct dlb2_function_resources *rsrcs;
+ struct dlb2_hw_domain *domain;
+ struct dlb2_ldb_queue *queue;
+ RTE_SET_USED(iter1);
+ RTE_SET_USED(iter2);
+
+ if (id >= DLB2_MAX_NUM_LDB_QUEUES)
+ return NULL;
+
+ rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
+
+ if (!vdev_req)
+ return &hw->rsrcs.ldb_queues[id];
+
+ DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iter1) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter2)
+ if (queue->id.virt_id == id)
+ return queue;
+ }
+
+ DLB2_FUNC_LIST_FOR(rsrcs->avail_ldb_queues, queue, iter1)
+ if (queue->id.virt_id == id)
+ return queue;
+
+ return NULL;
+}
+
+static struct dlb2_hw_domain *dlb2_get_domain_from_id(struct dlb2_hw *hw,
+ u32 id,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ struct dlb2_list_entry *iteration;
+ struct dlb2_function_resources *rsrcs;
+ struct dlb2_hw_domain *domain;
+ RTE_SET_USED(iteration);
+
+ if (id >= DLB2_MAX_NUM_DOMAINS)
+ return NULL;
+
+ if (!vdev_req)
+ return &hw->domains[id];
+
+ rsrcs = &hw->vdev[vdev_id];
+
+ DLB2_FUNC_LIST_FOR(rsrcs->used_domains, domain, iteration)
+ if (domain->id.virt_id == id)
+ return domain;
+
+ return NULL;
+}
+
+static int dlb2_port_slot_state_transition(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port,
+ struct dlb2_ldb_queue *queue,
+ int slot,
+ enum dlb2_qid_map_state new_state)
+{
+ enum dlb2_qid_map_state curr_state = port->qid_map[slot].state;
+ struct dlb2_hw_domain *domain;
+ int domain_id;
+
+ domain_id = port->domain_id.phys_id;
+
+ domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
+ if (!domain) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: unable to find domain %d\n",
+ __func__, domain_id);
+ return -EINVAL;
+ }
+
+ switch (curr_state) {
+ case DLB2_QUEUE_UNMAPPED:
+ switch (new_state) {
+ case DLB2_QUEUE_MAPPED:
+ queue->num_mappings++;
+ port->num_mappings++;
+ break;
+ case DLB2_QUEUE_MAP_IN_PROG:
+ queue->num_pending_additions++;
+ domain->num_pending_additions++;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case DLB2_QUEUE_MAPPED:
+ switch (new_state) {
+ case DLB2_QUEUE_UNMAPPED:
+ queue->num_mappings--;
+ port->num_mappings--;
+ break;
+ case DLB2_QUEUE_UNMAP_IN_PROG:
+ port->num_pending_removals++;
+ domain->num_pending_removals++;
+ break;
+ case DLB2_QUEUE_MAPPED:
+ /* Priority change, nothing to update */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case DLB2_QUEUE_MAP_IN_PROG:
+ switch (new_state) {
+ case DLB2_QUEUE_UNMAPPED:
+ queue->num_pending_additions--;
+ domain->num_pending_additions--;
+ break;
+ case DLB2_QUEUE_MAPPED:
+ queue->num_mappings++;
+ port->num_mappings++;
+ queue->num_pending_additions--;
+ domain->num_pending_additions--;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case DLB2_QUEUE_UNMAP_IN_PROG:
+ switch (new_state) {
+ case DLB2_QUEUE_UNMAPPED:
+ port->num_pending_removals--;
+ domain->num_pending_removals--;
+ queue->num_mappings--;
+ port->num_mappings--;
+ break;
+ case DLB2_QUEUE_MAPPED:
+ port->num_pending_removals--;
+ domain->num_pending_removals--;
+ break;
+ case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
+ /* Nothing to update */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP:
+ switch (new_state) {
+ case DLB2_QUEUE_UNMAP_IN_PROG:
+ /* Nothing to update */
+ break;
+ case DLB2_QUEUE_UNMAPPED:
+ /*
+ * An UNMAP_IN_PROG_PENDING_MAP slot briefly
+ * becomes UNMAPPED before it transitions to
+ * MAP_IN_PROG.
+ */
+ queue->num_mappings--;
+ port->num_mappings--;
+ port->num_pending_removals--;
+ domain->num_pending_removals--;
+ break;
+ default:
+ goto error;
+ }
+ break;
+ default:
+ goto error;
+ }
+
+ port->qid_map[slot].state = new_state;
+
+ DLB2_HW_DBG(hw,
+ "[%s()] queue %d -> port %d state transition (%d -> %d)\n",
+ __func__, queue->id.phys_id, port->id.phys_id,
+ curr_state, new_state);
+ return 0;
+
+error:
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
+ __func__, queue->id.phys_id, port->id.phys_id,
+ curr_state, new_state);
+ return -EFAULT;
+}
+
+static bool dlb2_port_find_slot(struct dlb2_ldb_port *port,
+ enum dlb2_qid_map_state state,
+ int *slot)
+{
+ int i;
+
+ for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+ if (port->qid_map[i].state == state)
+ break;
+ }
+
+ *slot = i;
+
+ return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
+}
+
+static bool dlb2_port_find_slot_queue(struct dlb2_ldb_port *port,
+ enum dlb2_qid_map_state state,
+ struct dlb2_ldb_queue *queue,
+ int *slot)
+{
+ int i;
+
+ for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+ if (port->qid_map[i].state == state &&
+ port->qid_map[i].qid == queue->id.phys_id)
+ break;
+ }
+
+ *slot = i;
+
+ return (i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ);
+}
+
+/*
+ * dlb2_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
+ * their function names imply, and should only be called by the dynamic CQ
+ * mapping code.
+ */
+static void dlb2_ldb_queue_disable_mapped_cqs(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ struct dlb2_ldb_queue *queue)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_port *port;
+ int slot, i;
+ RTE_SET_USED(iter);
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+ enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
+
+ if (!dlb2_port_find_slot_queue(port, state,
+ queue, &slot))
+ continue;
+
+ if (port->enabled)
+ dlb2_ldb_port_cq_disable(hw, port);
+ }
+ }
+}
+
+static void dlb2_ldb_queue_enable_mapped_cqs(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ struct dlb2_ldb_queue *queue)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_port *port;
+ int slot, i;
+ RTE_SET_USED(iter);
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+ enum dlb2_qid_map_state state = DLB2_QUEUE_MAPPED;
+
+ if (!dlb2_port_find_slot_queue(port, state,
+ queue, &slot))
+ continue;
+
+ if (port->enabled)
+ dlb2_ldb_port_cq_enable(hw, port);
+ }
+ }
+}
+
+static void dlb2_ldb_port_clear_queue_if_status(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port,
+ int slot)
+{
+ union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
+
+ r0.field.cq = port->id.phys_id;
+ r0.field.qidix = slot;
+ r0.field.value = 0;
+ r0.field.inflight_ok_v = 1;
+
+ DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
+
+ dlb2_flush_csr(hw);
+}
+
+static void dlb2_ldb_port_set_queue_if_status(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port,
+ int slot)
+{
+ union dlb2_lsp_ldb_sched_ctrl r0 = { {0} };
+
+ r0.field.cq = port->id.phys_id;
+ r0.field.qidix = slot;
+ r0.field.value = 1;
+ r0.field.inflight_ok_v = 1;
+
+ DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r0.val);
+
+ dlb2_flush_csr(hw);
+}
+
+static int dlb2_ldb_port_map_qid_static(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *p,
+ struct dlb2_ldb_queue *q,
+ u8 priority)
+{
+ union dlb2_lsp_cq2priov r0;
+ union dlb2_lsp_cq2qid0 r1;
+ union dlb2_atm_qid2cqidix_00 r2;
+ union dlb2_lsp_qid2cqidix_00 r3;
+ union dlb2_lsp_qid2cqidix2_00 r4;
+ enum dlb2_qid_map_state state;
+ int i;
+
+ /* Look for a pending or already mapped slot, else an unused slot */
+ if (!dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAP_IN_PROG, q, &i) &&
+ !dlb2_port_find_slot_queue(p, DLB2_QUEUE_MAPPED, q, &i) &&
+ !dlb2_port_find_slot(p, DLB2_QUEUE_UNMAPPED, &i)) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: CQ has no available QID mapping slots\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: port slot tracking failed\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ /* Read-modify-write the priority and valid bit register */
+ r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id));
+
+ r0.field.v |= 1 << i;
+ r0.field.prio |= (priority & 0x7) << i * 3;
+
+ DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(p->id.phys_id), r0.val);
+
+ /* Read-modify-write the QID map register */
+ if (i < 4)
+ r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID0(p->id.phys_id));
+ else
+ r1.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2QID1(p->id.phys_id));
+
+ if (i == 0 || i == 4)
+ r1.field.qid_p0 = q->id.phys_id;
+ if (i == 1 || i == 5)
+ r1.field.qid_p1 = q->id.phys_id;
+ if (i == 2 || i == 6)
+ r1.field.qid_p2 = q->id.phys_id;
+ if (i == 3 || i == 7)
+ r1.field.qid_p3 = q->id.phys_id;
+
+ if (i < 4)
+ DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID0(p->id.phys_id), r1.val);
+ else
+ DLB2_CSR_WR(hw, DLB2_LSP_CQ2QID1(p->id.phys_id), r1.val);
+
+ r2.val = DLB2_CSR_RD(hw,
+ DLB2_ATM_QID2CQIDIX(q->id.phys_id,
+ p->id.phys_id / 4));
+
+ r3.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID2CQIDIX(q->id.phys_id,
+ p->id.phys_id / 4));
+
+ r4.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID2CQIDIX2(q->id.phys_id,
+ p->id.phys_id / 4));
+
+ switch (p->id.phys_id % 4) {
+ case 0:
+ r2.field.cq_p0 |= 1 << i;
+ r3.field.cq_p0 |= 1 << i;
+ r4.field.cq_p0 |= 1 << i;
+ break;
+
+ case 1:
+ r2.field.cq_p1 |= 1 << i;
+ r3.field.cq_p1 |= 1 << i;
+ r4.field.cq_p1 |= 1 << i;
+ break;
+
+ case 2:
+ r2.field.cq_p2 |= 1 << i;
+ r3.field.cq_p2 |= 1 << i;
+ r4.field.cq_p2 |= 1 << i;
+ break;
+
+ case 3:
+ r2.field.cq_p3 |= 1 << i;
+ r3.field.cq_p3 |= 1 << i;
+ r4.field.cq_p3 |= 1 << i;
+ break;
+ }
+
+ DLB2_CSR_WR(hw,
+ DLB2_ATM_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
+ r2.val);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID2CQIDIX(q->id.phys_id, p->id.phys_id / 4),
+ r3.val);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID2CQIDIX2(q->id.phys_id, p->id.phys_id / 4),
+ r4.val);
+
+ dlb2_flush_csr(hw);
+
+ p->qid_map[i].qid = q->id.phys_id;
+ p->qid_map[i].priority = priority;
+
+ state = DLB2_QUEUE_MAPPED;
+
+ return dlb2_port_slot_state_transition(hw, p, q, i, state);
+}
+
+static int dlb2_ldb_port_set_has_work_bits(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port,
+ struct dlb2_ldb_queue *queue,
+ int slot)
+{
+ union dlb2_lsp_qid_aqed_active_cnt r0;
+ union dlb2_lsp_qid_ldb_enqueue_cnt r1;
+ union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
+
+ /* Set the atomic scheduling haswork bit */
+ r0.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
+
+ r2.field.cq = port->id.phys_id;
+ r2.field.qidix = slot;
+ r2.field.value = 1;
+ r2.field.rlist_haswork_v = r0.field.count > 0;
+
+ /* Set the non-atomic scheduling haswork bit */
+ DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
+
+ r1.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
+
+ memset(&r2, 0, sizeof(r2));
+
+ r2.field.cq = port->id.phys_id;
+ r2.field.qidix = slot;
+ r2.field.value = 1;
+ r2.field.nalb_haswork_v = (r1.field.count > 0);
+
+ DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
+
+ dlb2_flush_csr(hw);
+
+ return 0;
+}
+
+static void dlb2_ldb_port_clear_has_work_bits(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port,
+ u8 slot)
+{
+ union dlb2_lsp_ldb_sched_ctrl r2 = { {0} };
+
+ r2.field.cq = port->id.phys_id;
+ r2.field.qidix = slot;
+ r2.field.value = 0;
+ r2.field.rlist_haswork_v = 1;
+
+ DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
+
+ memset(&r2, 0, sizeof(r2));
+
+ r2.field.cq = port->id.phys_id;
+ r2.field.qidix = slot;
+ r2.field.value = 0;
+ r2.field.nalb_haswork_v = 1;
+
+ DLB2_CSR_WR(hw, DLB2_LSP_LDB_SCHED_CTRL, r2.val);
+
+ dlb2_flush_csr(hw);
+}
+
+static void dlb2_ldb_queue_set_inflight_limit(struct dlb2_hw *hw,
+ struct dlb2_ldb_queue *queue)
+{
+ union dlb2_lsp_qid_ldb_infl_lim r0 = { {0} };
+
+ r0.field.limit = queue->num_qid_inflights;
+
+ DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), r0.val);
+}
+
+static void dlb2_ldb_queue_clear_inflight_limit(struct dlb2_hw *hw,
+ struct dlb2_ldb_queue *queue)
+{
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id),
+ DLB2_LSP_QID_LDB_INFL_LIM_RST);
+}
+
+static int dlb2_ldb_port_finish_map_qid_dynamic(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ struct dlb2_ldb_port *port,
+ struct dlb2_ldb_queue *queue)
+{
+ struct dlb2_list_entry *iter;
+ union dlb2_lsp_qid_ldb_infl_cnt r0;
+ enum dlb2_qid_map_state state;
+ int slot, ret, i;
+ u8 prio;
+ RTE_SET_USED(iter);
+
+ r0.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
+
+ if (r0.field.count) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: non-zero QID inflight count\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Static map the port and set its corresponding has_work bits.
+ */
+ state = DLB2_QUEUE_MAP_IN_PROG;
+ if (!dlb2_port_find_slot_queue(port, state, queue, &slot))
+ return -EINVAL;
+
+ if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: port slot tracking failed\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ prio = port->qid_map[slot].priority;
+
+ /*
+ * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
+ * the port's qid_map state.
+ */
+ ret = dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
+ if (ret)
+ return ret;
+
+ ret = dlb2_ldb_port_set_has_work_bits(hw, port, queue, slot);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure IF_status(cq,qid) is 0 before enabling the port to
+ * prevent spurious schedules to cause the queue's inflight
+ * count to increase.
+ */
+ dlb2_ldb_port_clear_queue_if_status(hw, port, slot);
+
+ /* Reset the queue's inflight status */
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+ state = DLB2_QUEUE_MAPPED;
+ if (!dlb2_port_find_slot_queue(port, state,
+ queue, &slot))
+ continue;
+
+ dlb2_ldb_port_set_queue_if_status(hw, port, slot);
+ }
+ }
+
+ dlb2_ldb_queue_set_inflight_limit(hw, queue);
+
+ /* Re-enable CQs mapped to this queue */
+ dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
+
+ /* If this queue has other mappings pending, clear its inflight limit */
+ if (queue->num_pending_additions > 0)
+ dlb2_ldb_queue_clear_inflight_limit(hw, queue);
+
+ return 0;
+}
+
+/**
+ * dlb2_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
+ * @hw: dlb2_hw handle for a particular device.
+ * @port: load-balanced port
+ * @queue: load-balanced queue
+ * @priority: queue servicing priority
+ *
+ * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
+ * at a later point, and <0 if an error occurred.
+ */
+static int dlb2_ldb_port_map_qid_dynamic(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port,
+ struct dlb2_ldb_queue *queue,
+ u8 priority)
+{
+ union dlb2_lsp_qid_ldb_infl_cnt r0 = { {0} };
+ enum dlb2_qid_map_state state;
+ struct dlb2_hw_domain *domain;
+ int domain_id, slot, ret;
+
+ domain_id = port->domain_id.phys_id;
+
+ domain = dlb2_get_domain_from_id(hw, domain_id, false, 0);
+ if (!domain) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: unable to find domain %d\n",
+ __func__, port->domain_id.phys_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Set the QID inflight limit to 0 to prevent further scheduling of the
+ * queue.
+ */
+ DLB2_CSR_WR(hw, DLB2_LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0);
+
+ if (!dlb2_port_find_slot(port, DLB2_QUEUE_UNMAPPED, &slot)) {
+ DLB2_HW_ERR(hw,
+ "Internal error: No available unmapped slots\n");
+ return -EFAULT;
+ }
+
+ if (slot >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: port slot tracking failed\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ port->qid_map[slot].qid = queue->id.phys_id;
+ port->qid_map[slot].priority = priority;
+
+ state = DLB2_QUEUE_MAP_IN_PROG;
+ ret = dlb2_port_slot_state_transition(hw, port, queue, slot, state);
+ if (ret)
+ return ret;
+
+ r0.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
+
+ if (r0.field.count) {
+ /*
+ * The queue is owed completions so it's not safe to map it
+ * yet. Schedule a kernel thread to complete the mapping later,
+ * once software has completed all the queue's inflight events.
+ */
+ if (!os_worker_active(hw))
+ os_schedule_work(hw);
+
+ return 1;
+ }
+
+ /*
+ * Disable the affected CQ, and the CQs already mapped to the QID,
+ * before reading the QID's inflight count a second time. There is an
+ * unlikely race in which the QID may schedule one more QE after we
+ * read an inflight count of 0, and disabling the CQs guarantees that
+ * the race will not occur after a re-read of the inflight count
+ * register.
+ */
+ if (port->enabled)
+ dlb2_ldb_port_cq_disable(hw, port);
+
+ dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
+
+ r0.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
+
+ if (r0.field.count) {
+ if (port->enabled)
+ dlb2_ldb_port_cq_enable(hw, port);
+
+ dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
+
+ /*
+ * The queue is owed completions so it's not safe to map it
+ * yet. Schedule a kernel thread to complete the mapping later,
+ * once software has completed all the queue's inflight events.
+ */
+ if (!os_worker_active(hw))
+ os_schedule_work(hw);
+
+ return 1;
+ }
+
+ return dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
+}
+
+static void dlb2_domain_finish_map_port(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ struct dlb2_ldb_port *port)
+{
+ int i;
+
+ for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+ union dlb2_lsp_qid_ldb_infl_cnt r0;
+ struct dlb2_ldb_queue *queue;
+ int qid;
+
+ if (port->qid_map[i].state != DLB2_QUEUE_MAP_IN_PROG)
+ continue;
+
+ qid = port->qid_map[i].qid;
+
+ queue = dlb2_get_ldb_queue_from_id(hw, qid, false, 0);
+
+ if (!queue) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: unable to find queue %d\n",
+ __func__, qid);
+ continue;
+ }
+
+ r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
+
+ if (r0.field.count)
+ continue;
+
+ /*
+ * Disable the affected CQ, and the CQs already mapped to the
+ * QID, before reading the QID's inflight count a second time.
+ * There is an unlikely race in which the QID may schedule one
+ * more QE after we read an inflight count of 0, and disabling
+ * the CQs guarantees that the race will not occur after a
+ * re-read of the inflight count register.
+ */
+ if (port->enabled)
+ dlb2_ldb_port_cq_disable(hw, port);
+
+ dlb2_ldb_queue_disable_mapped_cqs(hw, domain, queue);
+
+ r0.val = DLB2_CSR_RD(hw, DLB2_LSP_QID_LDB_INFL_CNT(qid));
+
+ if (r0.field.count) {
+ if (port->enabled)
+ dlb2_ldb_port_cq_enable(hw, port);
+
+ dlb2_ldb_queue_enable_mapped_cqs(hw, domain, queue);
+
+ continue;
+ }
+
+ dlb2_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
+ }
+}
+
+static unsigned int
+dlb2_domain_finish_map_qid_procedures(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_port *port;
+ int i;
+ RTE_SET_USED(iter);
+
+ if (!domain->configured || domain->num_pending_additions == 0)
+ return 0;
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
+ dlb2_domain_finish_map_port(hw, domain, port);
+ }
+
+ return domain->num_pending_additions;
+}
+
+static int dlb2_ldb_port_unmap_qid(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port,
+ struct dlb2_ldb_queue *queue)
+{
+ enum dlb2_qid_map_state mapped, in_progress, pending_map, unmapped;
+ union dlb2_lsp_cq2priov r0;
+ union dlb2_atm_qid2cqidix_00 r1;
+ union dlb2_lsp_qid2cqidix_00 r2;
+ union dlb2_lsp_qid2cqidix2_00 r3;
+ u32 queue_id;
+ u32 port_id;
+ int i;
+
+ /* Find the queue's slot */
+ mapped = DLB2_QUEUE_MAPPED;
+ in_progress = DLB2_QUEUE_UNMAP_IN_PROG;
+ pending_map = DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP;
+
+ if (!dlb2_port_find_slot_queue(port, mapped, queue, &i) &&
+ !dlb2_port_find_slot_queue(port, in_progress, queue, &i) &&
+ !dlb2_port_find_slot_queue(port, pending_map, queue, &i)) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: QID %d isn't mapped\n",
+ __func__, __LINE__, queue->id.phys_id);
+ return -EFAULT;
+ }
+
+ if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+ DLB2_HW_ERR(hw,
+ "[%s():%d] Internal error: port slot tracking failed\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ port_id = port->id.phys_id;
+ queue_id = queue->id.phys_id;
+
+ /* Read-modify-write the priority and valid bit register */
+ r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ2PRIOV(port_id));
+
+ r0.field.v &= ~(1 << i);
+
+ DLB2_CSR_WR(hw, DLB2_LSP_CQ2PRIOV(port_id), r0.val);
+
+ r1.val = DLB2_CSR_RD(hw,
+ DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4));
+
+ r2.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4));
+
+ r3.val = DLB2_CSR_RD(hw,
+ DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4));
+
+ switch (port_id % 4) {
+ case 0:
+ r1.field.cq_p0 &= ~(1 << i);
+ r2.field.cq_p0 &= ~(1 << i);
+ r3.field.cq_p0 &= ~(1 << i);
+ break;
+
+ case 1:
+ r1.field.cq_p1 &= ~(1 << i);
+ r2.field.cq_p1 &= ~(1 << i);
+ r3.field.cq_p1 &= ~(1 << i);
+ break;
+
+ case 2:
+ r1.field.cq_p2 &= ~(1 << i);
+ r2.field.cq_p2 &= ~(1 << i);
+ r3.field.cq_p2 &= ~(1 << i);
+ break;
+
+ case 3:
+ r1.field.cq_p3 &= ~(1 << i);
+ r2.field.cq_p3 &= ~(1 << i);
+ r3.field.cq_p3 &= ~(1 << i);
+ break;
+ }
+
+ DLB2_CSR_WR(hw,
+ DLB2_ATM_QID2CQIDIX(queue_id, port_id / 4),
+ r1.val);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID2CQIDIX(queue_id, port_id / 4),
+ r2.val);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID2CQIDIX2(queue_id, port_id / 4),
+ r3.val);
+
+ dlb2_flush_csr(hw);
+
+ unmapped = DLB2_QUEUE_UNMAPPED;
+
+ return dlb2_port_slot_state_transition(hw, port, queue, i, unmapped);
+}
+
+static int dlb2_ldb_port_map_qid(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ struct dlb2_ldb_port *port,
+ struct dlb2_ldb_queue *queue,
+ u8 prio)
+{
+ if (domain->started)
+ return dlb2_ldb_port_map_qid_dynamic(hw, port, queue, prio);
+ else
+ return dlb2_ldb_port_map_qid_static(hw, port, queue, prio);
+}
+
+static void
+dlb2_domain_finish_unmap_port_slot(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ struct dlb2_ldb_port *port,
+ int slot)
+{
+ enum dlb2_qid_map_state state;
+ struct dlb2_ldb_queue *queue;
+
+ queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
+
+ state = port->qid_map[slot].state;
+
+ /* Update the QID2CQIDX and CQ2QID vectors */
+ dlb2_ldb_port_unmap_qid(hw, port, queue);
+
+ /*
+ * Ensure the QID will not be serviced by this {CQ, slot} by clearing
+ * the has_work bits
+ */
+ dlb2_ldb_port_clear_has_work_bits(hw, port, slot);
+
+ /* Reset the {CQ, slot} to its default state */
+ dlb2_ldb_port_set_queue_if_status(hw, port, slot);
+
+ /* Re-enable the CQ if it wasn't manually disabled by the user */
+ if (port->enabled)
+ dlb2_ldb_port_cq_enable(hw, port);
+
+ /*
+ * If there is a mapping that is pending this slot's removal, perform
+ * the mapping now.
+ */
+ if (state == DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP) {
+ struct dlb2_ldb_port_qid_map *map;
+ struct dlb2_ldb_queue *map_queue;
+ u8 prio;
+
+ map = &port->qid_map[slot];
+
+ map->qid = map->pending_qid;
+ map->priority = map->pending_priority;
+
+ map_queue = &hw->rsrcs.ldb_queues[map->qid];
+ prio = map->priority;
+
+ dlb2_ldb_port_map_qid(hw, domain, port, map_queue, prio);
+ }
+}
+
+static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ struct dlb2_ldb_port *port)
+{
+ union dlb2_lsp_cq_ldb_infl_cnt r0;
+ int i;
+
+ if (port->num_pending_removals == 0)
+ return false;
+
+ /*
+ * The unmap requires all the CQ's outstanding inflights to be
+ * completed.
+ */
+ r0.val = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(port->id.phys_id));
+ if (r0.field.count > 0)
+ return false;
+
+ for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+ struct dlb2_ldb_port_qid_map *map;
+
+ map = &port->qid_map[i];
+
+ if (map->state != DLB2_QUEUE_UNMAP_IN_PROG &&
+ map->state != DLB2_QUEUE_UNMAP_IN_PROG_PENDING_MAP)
+ continue;
+
+ dlb2_domain_finish_unmap_port_slot(hw, domain, port, i);
+ }
+
+ return true;
+}
+
+static unsigned int
+dlb2_domain_finish_unmap_qid_procedures(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_port *port;
+ int i;
+ RTE_SET_USED(iter);
+
+ if (!domain->configured || domain->num_pending_removals == 0)
+ return 0;
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
+ dlb2_domain_finish_unmap_port(hw, domain, port);
+ }
+
+ return domain->num_pending_removals;
+}
+
+static void dlb2_domain_disable_ldb_cqs(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_port *port;
+ int i;
+ RTE_SET_USED(iter);
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+ port->enabled = false;
+
+ dlb2_ldb_port_cq_disable(hw, port);
+ }
+ }
+}
+
+static void dlb2_log_reset_domain(struct dlb2_hw *hw,
+ u32 domain_id,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ DLB2_HW_DBG(hw, "DLB2 reset domain:\n");
+ if (vdev_req)
+ DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id);
+ DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id);
+}
+
+static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ unsigned int vdev_id)
+{
+ struct dlb2_list_entry *iter;
+ union dlb2_sys_vf_dir_vpp_v r1;
+ struct dlb2_dir_pq_pair *port;
+ RTE_SET_USED(iter);
+
+ r1.field.vpp_v = 0;
+
+ DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
+ unsigned int offs;
+ u32 virt_id;
+
+ if (hw->virt_mode == DLB2_VIRT_SRIOV)
+ virt_id = port->id.virt_id;
+ else
+ virt_id = port->id.phys_id;
+
+ offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
+
+ DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r1.val);
+ }
+}
+
+static void dlb2_domain_disable_ldb_vpps(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ unsigned int vdev_id)
+{
+ struct dlb2_list_entry *iter;
+ union dlb2_sys_vf_ldb_vpp_v r1;
+ struct dlb2_ldb_port *port;
+ int i;
+ RTE_SET_USED(iter);
+
+ r1.field.vpp_v = 0;
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+ unsigned int offs;
+ u32 virt_id;
+
+ if (hw->virt_mode == DLB2_VIRT_SRIOV)
+ virt_id = port->id.virt_id;
+ else
+ virt_id = port->id.phys_id;
+
+ offs = vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
+
+ DLB2_CSR_WR(hw, DLB2_SYS_VF_LDB_VPP_V(offs), r1.val);
+ }
+ }
+}
+
+static void
+dlb2_domain_disable_ldb_port_interrupts(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ union dlb2_chp_ldb_cq_int_enb r0 = { {0} };
+ union dlb2_chp_ldb_cq_wd_enb r1 = { {0} };
+ struct dlb2_ldb_port *port;
+ int i;
+ RTE_SET_USED(iter);
+
+ r0.field.en_tim = 0;
+ r0.field.en_depth = 0;
+
+ r1.field.wd_enable = 0;
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
+ r0.val);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_LDB_CQ_WD_ENB(port->id.phys_id),
+ r1.val);
+ }
+ }
+}
+
+static void
+dlb2_domain_disable_dir_port_interrupts(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ union dlb2_chp_dir_cq_int_enb r0 = { {0} };
+ union dlb2_chp_dir_cq_wd_enb r1 = { {0} };
+ struct dlb2_dir_pq_pair *port;
+ RTE_SET_USED(iter);
+
+ r0.field.en_tim = 0;
+ r0.field.en_depth = 0;
+
+ r1.field.wd_enable = 0;
+
+ DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
+ r0.val);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_DIR_CQ_WD_ENB(port->id.phys_id),
+ r1.val);
+ }
+}
+
+static void
+dlb2_domain_disable_ldb_queue_write_perms(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES;
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_queue *queue;
+ RTE_SET_USED(iter);
+
+ DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
+ union dlb2_sys_ldb_vasqid_v r0 = { {0} };
+ union dlb2_sys_ldb_qid2vqid r1 = { {0} };
+ union dlb2_sys_vf_ldb_vqid_v r2 = { {0} };
+ union dlb2_sys_vf_ldb_vqid2qid r3 = { {0} };
+ int idx;
+
+ idx = domain_offset + queue->id.phys_id;
+
+ DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(idx), r0.val);
+
+ if (queue->id.vdev_owned) {
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_QID2VQID(queue->id.phys_id),
+ r1.val);
+
+ idx = queue->id.vdev_id * DLB2_MAX_NUM_LDB_QUEUES +
+ queue->id.virt_id;
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_VF_LDB_VQID_V(idx),
+ r2.val);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_VF_LDB_VQID2QID(idx),
+ r3.val);
+ }
+ }
+}
+
+static void
+dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS;
+ struct dlb2_list_entry *iter;
+ struct dlb2_dir_pq_pair *queue;
+ RTE_SET_USED(iter);
+
+ DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
+ union dlb2_sys_dir_vasqid_v r0 = { {0} };
+ union dlb2_sys_vf_dir_vqid_v r1 = { {0} };
+ union dlb2_sys_vf_dir_vqid2qid r2 = { {0} };
+ int idx;
+
+ idx = domain_offset + queue->id.phys_id;
+
+ DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), r0.val);
+
+ if (queue->id.vdev_owned) {
+ idx = queue->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS +
+ queue->id.virt_id;
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_VF_DIR_VQID_V(idx),
+ r1.val);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_VF_DIR_VQID2QID(idx),
+ r2.val);
+ }
+ }
+}
+
+static void dlb2_domain_disable_ldb_seq_checks(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ union dlb2_chp_sn_chk_enbl r1;
+ struct dlb2_ldb_port *port;
+ int i;
+ RTE_SET_USED(iter);
+
+ r1.field.en = 0;
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_SN_CHK_ENBL(port->id.phys_id),
+ r1.val);
+ }
+}
+
+static int dlb2_domain_wait_for_ldb_cqs_to_empty(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_port *port;
+ int i;
+ RTE_SET_USED(iter);
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
+ int i;
+
+ for (i = 0; i < DLB2_MAX_CQ_COMP_CHECK_LOOPS; i++) {
+ if (dlb2_ldb_cq_inflight_count(hw, port) == 0)
+ break;
+ }
+
+ if (i == DLB2_MAX_CQ_COMP_CHECK_LOOPS) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
+ __func__, port->id.phys_id);
+ return -EFAULT;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void dlb2_domain_disable_dir_cqs(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_dir_pq_pair *port;
+ RTE_SET_USED(iter);
+
+ DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
+ port->enabled = false;
+
+ dlb2_dir_port_cq_disable(hw, port);
+ }
+}
+
+static void
+dlb2_domain_disable_dir_producer_ports(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_dir_pq_pair *port;
+ union dlb2_sys_dir_pp_v r1;
+ RTE_SET_USED(iter);
+
+ r1.field.pp_v = 0;
+
+ DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_PP_V(port->id.phys_id),
+ r1.val);
+}
+
+static void
+dlb2_domain_disable_ldb_producer_ports(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ union dlb2_sys_ldb_pp_v r1;
+ struct dlb2_ldb_port *port;
+ int i;
+ RTE_SET_USED(iter);
+
+ r1.field.pp_v = 0;
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_PP_V(port->id.phys_id),
+ r1.val);
+ }
+}
+
+static int dlb2_domain_verify_reset_success(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_dir_pq_pair *dir_port;
+ struct dlb2_ldb_port *ldb_port;
+ struct dlb2_ldb_queue *queue;
+ int i;
+ RTE_SET_USED(iter);
+
+ /*
+ * Confirm that all the domain's queue's inflight counts and AQED
+ * active counts are 0.
+ */
+ DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
+ if (!dlb2_ldb_queue_is_empty(hw, queue)) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: failed to empty ldb queue %d\n",
+ __func__, queue->id.phys_id);
+ return -EFAULT;
+ }
+ }
+
+ /* Confirm that all the domain's CQs inflight and token counts are 0. */
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], ldb_port, iter) {
+ if (dlb2_ldb_cq_inflight_count(hw, ldb_port) ||
+ dlb2_ldb_cq_token_count(hw, ldb_port)) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: failed to empty ldb port %d\n",
+ __func__, ldb_port->id.phys_id);
+ return -EFAULT;
+ }
+ }
+ }
+
+ DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
+ if (!dlb2_dir_queue_is_empty(hw, dir_port)) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: failed to empty dir queue %d\n",
+ __func__, dir_port->id.phys_id);
+ return -EFAULT;
+ }
+
+ if (dlb2_dir_cq_token_count(hw, dir_port)) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: failed to empty dir port %d\n",
+ __func__, dir_port->id.phys_id);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static void __dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
+ struct dlb2_ldb_port *port)
+{
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_PP2VAS(port->id.phys_id),
+ DLB2_SYS_LDB_PP2VAS_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_LDB_CQ2VAS(port->id.phys_id),
+ DLB2_CHP_LDB_CQ2VAS_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_PP2VDEV(port->id.phys_id),
+ DLB2_SYS_LDB_PP2VDEV_RST);
+
+ if (port->id.vdev_owned) {
+ unsigned int offs;
+ u32 virt_id;
+
+ /*
+ * DLB uses producer port address bits 17:12 to determine the
+ * producer port ID. In Scalable IOV mode, PP accesses come
+ * through the PF MMIO window for the physical producer port,
+ * so for translation purposes the virtual and physical port
+ * IDs are equal.
+ */
+ if (hw->virt_mode == DLB2_VIRT_SRIOV)
+ virt_id = port->id.virt_id;
+ else
+ virt_id = port->id.phys_id;
+
+ offs = port->id.vdev_id * DLB2_MAX_NUM_LDB_PORTS + virt_id;
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_VF_LDB_VPP2PP(offs),
+ DLB2_SYS_VF_LDB_VPP2PP_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_VF_LDB_VPP_V(offs),
+ DLB2_SYS_VF_LDB_VPP_V_RST);
+ }
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_PP_V(port->id.phys_id),
+ DLB2_SYS_LDB_PP_V_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_LDB_DSBL(port->id.phys_id),
+ DLB2_LSP_CQ_LDB_DSBL_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_LDB_CQ_DEPTH(port->id.phys_id),
+ DLB2_CHP_LDB_CQ_DEPTH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_LDB_INFL_LIM(port->id.phys_id),
+ DLB2_LSP_CQ_LDB_INFL_LIM_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_HIST_LIST_LIM(port->id.phys_id),
+ DLB2_CHP_HIST_LIST_LIM_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_HIST_LIST_BASE(port->id.phys_id),
+ DLB2_CHP_HIST_LIST_BASE_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_HIST_LIST_POP_PTR(port->id.phys_id),
+ DLB2_CHP_HIST_LIST_POP_PTR_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_HIST_LIST_PUSH_PTR(port->id.phys_id),
+ DLB2_CHP_HIST_LIST_PUSH_PTR_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id.phys_id),
+ DLB2_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_LDB_CQ_TMR_THRSH(port->id.phys_id),
+ DLB2_CHP_LDB_CQ_TMR_THRSH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_LDB_CQ_INT_ENB(port->id.phys_id),
+ DLB2_CHP_LDB_CQ_INT_ENB_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_CQ_ISR(port->id.phys_id),
+ DLB2_SYS_LDB_CQ_ISR_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id.phys_id),
+ DLB2_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id.phys_id),
+ DLB2_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_LDB_CQ_WPTR(port->id.phys_id),
+ DLB2_CHP_LDB_CQ_WPTR_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_LDB_TKN_CNT(port->id.phys_id),
+ DLB2_LSP_CQ_LDB_TKN_CNT_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_CQ_ADDR_L(port->id.phys_id),
+ DLB2_SYS_LDB_CQ_ADDR_L_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_CQ_ADDR_U(port->id.phys_id),
+ DLB2_SYS_LDB_CQ_ADDR_U_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_CQ_AT(port->id.phys_id),
+ DLB2_SYS_LDB_CQ_AT_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_CQ_PASID(port->id.phys_id),
+ DLB2_SYS_LDB_CQ_PASID_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_CQ2VF_PF_RO(port->id.phys_id),
+ DLB2_SYS_LDB_CQ2VF_PF_RO_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_LDB_TOT_SCH_CNTL(port->id.phys_id),
+ DLB2_LSP_CQ_LDB_TOT_SCH_CNTL_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_LDB_TOT_SCH_CNTH(port->id.phys_id),
+ DLB2_LSP_CQ_LDB_TOT_SCH_CNTH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ2QID0(port->id.phys_id),
+ DLB2_LSP_CQ2QID0_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ2QID1(port->id.phys_id),
+ DLB2_LSP_CQ2QID1_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ2PRIOV(port->id.phys_id),
+ DLB2_LSP_CQ2PRIOV_RST);
+}
+
+static void dlb2_domain_reset_ldb_port_registers(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_port *port;
+ int i;
+ RTE_SET_USED(iter);
+
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter)
+ __dlb2_domain_reset_ldb_port_registers(hw, port);
+ }
+}
+
+static void
+__dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
+ struct dlb2_dir_pq_pair *port)
+{
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
+ DLB2_CHP_DIR_CQ2VAS_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_DIR_DSBL(port->id.phys_id),
+ DLB2_LSP_CQ_DIR_DSBL_RST);
+
+ DLB2_CSR_WR(hw, DLB2_SYS_DIR_CQ_OPT_CLR, port->id.phys_id);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_DIR_CQ_DEPTH(port->id.phys_id),
+ DLB2_CHP_DIR_CQ_DEPTH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id.phys_id),
+ DLB2_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_DIR_CQ_TMR_THRSH(port->id.phys_id),
+ DLB2_CHP_DIR_CQ_TMR_THRSH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_DIR_CQ_INT_ENB(port->id.phys_id),
+ DLB2_CHP_DIR_CQ_INT_ENB_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_CQ_ISR(port->id.phys_id),
+ DLB2_SYS_DIR_CQ_ISR_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id.phys_id),
+ DLB2_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id.phys_id),
+ DLB2_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_DIR_CQ_WPTR(port->id.phys_id),
+ DLB2_CHP_DIR_CQ_WPTR_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_DIR_TKN_CNT(port->id.phys_id),
+ DLB2_LSP_CQ_DIR_TKN_CNT_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_CQ_ADDR_L(port->id.phys_id),
+ DLB2_SYS_DIR_CQ_ADDR_L_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_CQ_ADDR_U(port->id.phys_id),
+ DLB2_SYS_DIR_CQ_ADDR_U_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_CQ_AT(port->id.phys_id),
+ DLB2_SYS_DIR_CQ_AT_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_CQ_PASID(port->id.phys_id),
+ DLB2_SYS_DIR_CQ_PASID_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_CQ_FMT(port->id.phys_id),
+ DLB2_SYS_DIR_CQ_FMT_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_CQ2VF_PF_RO(port->id.phys_id),
+ DLB2_SYS_DIR_CQ2VF_PF_RO_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_DIR_TOT_SCH_CNTL(port->id.phys_id),
+ DLB2_LSP_CQ_DIR_TOT_SCH_CNTL_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_CQ_DIR_TOT_SCH_CNTH(port->id.phys_id),
+ DLB2_LSP_CQ_DIR_TOT_SCH_CNTH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_PP2VAS(port->id.phys_id),
+ DLB2_SYS_DIR_PP2VAS_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_DIR_CQ2VAS(port->id.phys_id),
+ DLB2_CHP_DIR_CQ2VAS_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_PP2VDEV(port->id.phys_id),
+ DLB2_SYS_DIR_PP2VDEV_RST);
+
+ if (port->id.vdev_owned) {
+ unsigned int offs;
+ u32 virt_id;
+
+ /*
+ * DLB uses producer port address bits 17:12 to determine the
+ * producer port ID. In Scalable IOV mode, PP accesses come
+ * through the PF MMIO window for the physical producer port,
+ * so for translation purposes the virtual and physical port
+ * IDs are equal.
+ */
+ if (hw->virt_mode == DLB2_VIRT_SRIOV)
+ virt_id = port->id.virt_id;
+ else
+ virt_id = port->id.phys_id;
+
+ offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id;
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_VF_DIR_VPP2PP(offs),
+ DLB2_SYS_VF_DIR_VPP2PP_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_VF_DIR_VPP_V(offs),
+ DLB2_SYS_VF_DIR_VPP_V_RST);
+ }
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_PP_V(port->id.phys_id),
+ DLB2_SYS_DIR_PP_V_RST);
+}
+
+static void dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_dir_pq_pair *port;
+ RTE_SET_USED(iter);
+
+ DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
+ __dlb2_domain_reset_dir_port_registers(hw, port);
+}
+
+static void dlb2_domain_reset_ldb_queue_registers(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_queue *queue;
+ RTE_SET_USED(iter);
+
+ DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
+ unsigned int queue_id = queue->id.phys_id;
+ int i;
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL(queue_id),
+ DLB2_LSP_QID_NALDB_TOT_ENQ_CNTL_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH(queue_id),
+ DLB2_LSP_QID_NALDB_TOT_ENQ_CNTH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_ATM_TOT_ENQ_CNTL(queue_id),
+ DLB2_LSP_QID_ATM_TOT_ENQ_CNTL_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_ATM_TOT_ENQ_CNTH(queue_id),
+ DLB2_LSP_QID_ATM_TOT_ENQ_CNTH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_NALDB_MAX_DEPTH(queue_id),
+ DLB2_LSP_QID_NALDB_MAX_DEPTH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_LDB_INFL_LIM(queue_id),
+ DLB2_LSP_QID_LDB_INFL_LIM_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_AQED_ACTIVE_LIM(queue_id),
+ DLB2_LSP_QID_AQED_ACTIVE_LIM_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_ATM_DEPTH_THRSH(queue_id),
+ DLB2_LSP_QID_ATM_DEPTH_THRSH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_NALDB_DEPTH_THRSH(queue_id),
+ DLB2_LSP_QID_NALDB_DEPTH_THRSH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_QID_ITS(queue_id),
+ DLB2_SYS_LDB_QID_ITS_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_ORD_QID_SN(queue_id),
+ DLB2_CHP_ORD_QID_SN_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_ORD_QID_SN_MAP(queue_id),
+ DLB2_CHP_ORD_QID_SN_MAP_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_QID_V(queue_id),
+ DLB2_SYS_LDB_QID_V_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_LDB_QID_CFG_V(queue_id),
+ DLB2_SYS_LDB_QID_CFG_V_RST);
+
+ if (queue->sn_cfg_valid) {
+ u32 offs[2];
+
+ offs[0] = DLB2_RO_PIPE_GRP_0_SLT_SHFT(queue->sn_slot);
+ offs[1] = DLB2_RO_PIPE_GRP_1_SLT_SHFT(queue->sn_slot);
+
+ DLB2_CSR_WR(hw,
+ offs[queue->sn_group],
+ DLB2_RO_PIPE_GRP_0_SLT_SHFT_RST);
+ }
+
+ for (i = 0; i < DLB2_LSP_QID2CQIDIX_NUM; i++) {
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID2CQIDIX(queue_id, i),
+ DLB2_LSP_QID2CQIDIX_00_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID2CQIDIX2(queue_id, i),
+ DLB2_LSP_QID2CQIDIX2_00_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_ATM_QID2CQIDIX(queue_id, i),
+ DLB2_ATM_QID2CQIDIX_00_RST);
+ }
+ }
+}
+
+static void dlb2_domain_reset_dir_queue_registers(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_dir_pq_pair *queue;
+ RTE_SET_USED(iter);
+
+ DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_DIR_MAX_DEPTH(queue->id.phys_id),
+ DLB2_LSP_QID_DIR_MAX_DEPTH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_DIR_TOT_ENQ_CNTL(queue->id.phys_id),
+ DLB2_LSP_QID_DIR_TOT_ENQ_CNTL_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_DIR_TOT_ENQ_CNTH(queue->id.phys_id),
+ DLB2_LSP_QID_DIR_TOT_ENQ_CNTH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_LSP_QID_DIR_DEPTH_THRSH(queue->id.phys_id),
+ DLB2_LSP_QID_DIR_DEPTH_THRSH_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_QID_ITS(queue->id.phys_id),
+ DLB2_SYS_DIR_QID_ITS_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_SYS_DIR_QID_V(queue->id.phys_id),
+ DLB2_SYS_DIR_QID_V_RST);
+ }
+}
+
+static void dlb2_domain_reset_registers(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ dlb2_domain_reset_ldb_port_registers(hw, domain);
+
+ dlb2_domain_reset_dir_port_registers(hw, domain);
+
+ dlb2_domain_reset_ldb_queue_registers(hw, domain);
+
+ dlb2_domain_reset_dir_queue_registers(hw, domain);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_CFG_LDB_VAS_CRD(domain->id.phys_id),
+ DLB2_CHP_CFG_LDB_VAS_CRD_RST);
+
+ DLB2_CSR_WR(hw,
+ DLB2_CHP_CFG_DIR_VAS_CRD(domain->id.phys_id),
+ DLB2_CHP_CFG_DIR_VAS_CRD_RST);
+}
+
+static int dlb2_domain_reset_software_state(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_dir_pq_pair *tmp_dir_port;
+ struct dlb2_ldb_queue *tmp_ldb_queue;
+ struct dlb2_ldb_port *tmp_ldb_port;
+ struct dlb2_list_entry *iter1;
+ struct dlb2_list_entry *iter2;
+ struct dlb2_function_resources *rsrcs;
+ struct dlb2_dir_pq_pair *dir_port;
+ struct dlb2_ldb_queue *ldb_queue;
+ struct dlb2_ldb_port *ldb_port;
+ struct dlb2_list_head *list;
+ int ret, i;
+ RTE_SET_USED(tmp_dir_port);
+ RTE_SET_USED(tmp_ldb_queue);
+ RTE_SET_USED(tmp_ldb_port);
+ RTE_SET_USED(iter1);
+ RTE_SET_USED(iter2);
+
+ rsrcs = domain->parent_func;
+
+ /* Move the domain's ldb queues to the function's avail list */
+ list = &domain->used_ldb_queues;
+ DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
+ if (ldb_queue->sn_cfg_valid) {
+ struct dlb2_sn_group *grp;
+
+ grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
+
+ dlb2_sn_group_free_slot(grp, ldb_queue->sn_slot);
+ ldb_queue->sn_cfg_valid = false;
+ }
+
+ ldb_queue->owned = false;
+ ldb_queue->num_mappings = 0;
+ ldb_queue->num_pending_additions = 0;
+
+ dlb2_list_del(&domain->used_ldb_queues,
+ &ldb_queue->domain_list);
+ dlb2_list_add(&rsrcs->avail_ldb_queues,
+ &ldb_queue->func_list);
+ rsrcs->num_avail_ldb_queues++;
+ }
+
+ list = &domain->avail_ldb_queues;
+ DLB2_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
+ ldb_queue->owned = false;
+
+ dlb2_list_del(&domain->avail_ldb_queues,
+ &ldb_queue->domain_list);
+ dlb2_list_add(&rsrcs->avail_ldb_queues,
+ &ldb_queue->func_list);
+ rsrcs->num_avail_ldb_queues++;
+ }
+
+ /* Move the domain's ldb ports to the function's avail list */
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ list = &domain->used_ldb_ports[i];
+ DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
+ iter1, iter2) {
+ int j;
+
+ ldb_port->owned = false;
+ ldb_port->configured = false;
+ ldb_port->num_pending_removals = 0;
+ ldb_port->num_mappings = 0;
+ ldb_port->init_tkn_cnt = 0;
+ for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
+ ldb_port->qid_map[j].state =
+ DLB2_QUEUE_UNMAPPED;
+
+ dlb2_list_del(&domain->used_ldb_ports[i],
+ &ldb_port->domain_list);
+ dlb2_list_add(&rsrcs->avail_ldb_ports[i],
+ &ldb_port->func_list);
+ rsrcs->num_avail_ldb_ports[i]++;
+ }
+
+ list = &domain->avail_ldb_ports[i];
+ DLB2_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port,
+ iter1, iter2) {
+ ldb_port->owned = false;
+
+ dlb2_list_del(&domain->avail_ldb_ports[i],
+ &ldb_port->domain_list);
+ dlb2_list_add(&rsrcs->avail_ldb_ports[i],
+ &ldb_port->func_list);
+ rsrcs->num_avail_ldb_ports[i]++;
+ }
+ }
+
+ /* Move the domain's dir ports to the function's avail list */
+ list = &domain->used_dir_pq_pairs;
+ DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
+ dir_port->owned = false;
+ dir_port->port_configured = false;
+ dir_port->init_tkn_cnt = 0;
+
+ dlb2_list_del(&domain->used_dir_pq_pairs,
+ &dir_port->domain_list);
+
+ dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
+ &dir_port->func_list);
+ rsrcs->num_avail_dir_pq_pairs++;
+ }
+
+ list = &domain->avail_dir_pq_pairs;
+ DLB2_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
+ dir_port->owned = false;
+
+ dlb2_list_del(&domain->avail_dir_pq_pairs,
+ &dir_port->domain_list);
+
+ dlb2_list_add(&rsrcs->avail_dir_pq_pairs,
+ &dir_port->func_list);
+ rsrcs->num_avail_dir_pq_pairs++;
+ }
+
+ /* Return hist list entries to the function */
+ ret = dlb2_bitmap_set_range(rsrcs->avail_hist_list_entries,
+ domain->hist_list_entry_base,
+ domain->total_hist_list_entries);
+ if (ret) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: domain hist list base doesn't match the function's bitmap.\n",
+ __func__);
+ return ret;
+ }
+
+ domain->total_hist_list_entries = 0;
+ domain->avail_hist_list_entries = 0;
+ domain->hist_list_entry_base = 0;
+ domain->hist_list_entry_offset = 0;
+
+ rsrcs->num_avail_qed_entries += domain->num_ldb_credits;
+ domain->num_ldb_credits = 0;
+
+ rsrcs->num_avail_dqed_entries += domain->num_dir_credits;
+ domain->num_dir_credits = 0;
+
+ rsrcs->num_avail_aqed_entries += domain->num_avail_aqed_entries;
+ rsrcs->num_avail_aqed_entries += domain->num_used_aqed_entries;
+ domain->num_avail_aqed_entries = 0;
+ domain->num_used_aqed_entries = 0;
+
+ domain->num_pending_removals = 0;
+ domain->num_pending_additions = 0;
+ domain->configured = false;
+ domain->started = false;
+
+ /*
+ * Move the domain out of the used_domains list and back to the
+ * function's avail_domains list.
+ */
+ dlb2_list_del(&rsrcs->used_domains, &domain->func_list);
+ dlb2_list_add(&rsrcs->avail_domains, &domain->func_list);
+ rsrcs->num_avail_domains++;
+
+ return 0;
+}
+
+static int dlb2_domain_drain_unmapped_queue(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain,
+ struct dlb2_ldb_queue *queue)
+{
+ struct dlb2_ldb_port *port;
+ int ret, i;
+
+ /* If a domain has LDB queues, it must have LDB ports */
+ for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
+ if (!dlb2_list_empty(&domain->used_ldb_ports[i]))
+ break;
+ }
+
+ if (i == DLB2_NUM_COS_DOMAINS) {
+ DLB2_HW_ERR(hw,
+ "[%s()] Internal error: No configured LDB ports\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ port = DLB2_DOM_LIST_HEAD(domain->used_ldb_ports[i], typeof(*port));
+
+ /* If necessary, free up a QID slot in this CQ */
+ if (port->num_mappings == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
+ struct dlb2_ldb_queue *mapped_queue;
+
+ mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
+
+ ret = dlb2_ldb_port_unmap_qid(hw, port, mapped_queue);
+ if (ret)
+ return ret;
+ }
+
+ ret = dlb2_ldb_port_map_qid_dynamic(hw, port, queue, 0);
+ if (ret)
+ return ret;
+
+ return dlb2_domain_drain_mapped_queues(hw, domain);
+}
+
+static int dlb2_domain_drain_unmapped_queues(struct dlb2_hw *hw,
+ struct dlb2_hw_domain *domain)
+{
+ struct dlb2_list_entry *iter;
+ struct dlb2_ldb_queue *queue;
+ int ret;
+ RTE_SET_USED(iter);
+
+ /* If the domain hasn't been started, there's no traffic to drain */
+ if (!domain->started)
+ return 0;
+
+ /*
+ * Pre-condition: the unattached queue must not have any outstanding
+ * completions. This is ensured by calling dlb2_domain_drain_ldb_cqs()
+ * prior to this in dlb2_domain_drain_mapped_queues().
+ */
+ DLB2_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
+ if (queue->num_mappings != 0 ||
+ dlb2_ldb_queue_is_empty(hw, queue))
+ continue;
+
+ ret = dlb2_domain_drain_unmapped_queue(hw, domain, queue);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * dlb2_reset_domain() - Reset a DLB scheduling domain and its associated
+ * hardware resources.
+ * @hw: Contains the current state of the DLB2 hardware.
+ * @domain_id: Domain ID
+ * @vdev_req: Request came from a virtual device.
+ * @vdev_id: If vdev_req is true, this contains the virtual device's ID.
+ *
+ * Note: User software *must* stop sending to this domain's producer ports
+ * before invoking this function, otherwise undefined behavior will result.
+ *
+ * Return: returns < 0 on error, 0 otherwise.
+ */
+int dlb2_reset_domain(struct dlb2_hw *hw,
+ u32 domain_id,
+ bool vdev_req,
+ unsigned int vdev_id)
+{
+ struct dlb2_hw_domain *domain;
+ int ret;
+
+ dlb2_log_reset_domain(hw, domain_id, vdev_req, vdev_id);
+
+ domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id);
+
+ if (!domain || !domain->configured)
+ return -EINVAL;
+
+ /* Disable VPPs */
+ if (vdev_req) {
+ dlb2_domain_disable_dir_vpps(hw, domain, vdev_id);
+
+ dlb2_domain_disable_ldb_vpps(hw, domain, vdev_id);
+ }
+
+ /* Disable CQ interrupts */
+ dlb2_domain_disable_dir_port_interrupts(hw, domain);
+
+ dlb2_domain_disable_ldb_port_interrupts(hw, domain);
+
+ /*
+ * For each queue owned by this domain, disable its write permissions to
+ * cause any traffic sent to it to be dropped. Well-behaved software
+ * should not be sending QEs at this point.
+ */
+ dlb2_domain_disable_dir_queue_write_perms(hw, domain);
+
+ dlb2_domain_disable_ldb_queue_write_perms(hw, domain);
+
+ /* Turn off completion tracking on all the domain's PPs. */
+ dlb2_domain_disable_ldb_seq_checks(hw, domain);
+
+ /*
+ * Disable the LDB CQs and drain them in order to complete the map and
+ * unmap procedures, which require zero CQ inflights and zero QID
+ * inflights respectively.
+ */
+ dlb2_domain_disable_ldb_cqs(hw, domain);
+
+ ret = dlb2_domain_drain_ldb_cqs(hw, domain, false);
+ if (ret < 0)
+ return ret;
+
+ ret = dlb2_domain_wait_for_ldb_cqs_to_empty(hw, domain);
+ if (ret < 0)
+ return ret;
+
+ ret = dlb2_domain_finish_unmap_qid_procedures(hw, domain);
+ if (ret < 0)
+ return ret;
+
+ ret = dlb2_domain_finish_map_qid_procedures(hw, domain);
+ if (ret < 0)
+ return ret;
+
+ /* Re-enable the CQs in order to drain the mapped queues. */
+ dlb2_domain_enable_ldb_cqs(hw, domain);
+
+ ret = dlb2_domain_drain_mapped_queues(hw, domain);
+ if (ret < 0)
+ return ret;
+
+ ret = dlb2_domain_drain_unmapped_queues(hw, domain);
+ if (ret < 0)
+ return ret;
+
+ /* Done draining LDB QEs, so disable the CQs. */
+ dlb2_domain_disable_ldb_cqs(hw, domain);
+
+ dlb2_domain_drain_dir_queues(hw, domain);
+
+ /* Done draining DIR QEs, so disable the CQs. */
+ dlb2_domain_disable_dir_cqs(hw, domain);
+
+ /* Disable PPs */
+ dlb2_domain_disable_dir_producer_ports(hw, domain);
+
+ dlb2_domain_disable_ldb_producer_ports(hw, domain);
+
+ ret = dlb2_domain_verify_reset_success(hw, domain);
+ if (ret)
+ return ret;
+
+ /* Reset the QID and port state. */
+ dlb2_domain_reset_registers(hw, domain);
+
+ /* Hardware reset complete. Reset the domain's software state */
+ ret = dlb2_domain_reset_software_state(hw, domain);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+unsigned int dlb2_finish_unmap_qid_procedures(struct dlb2_hw *hw)
+{
+ int i, num = 0;
+
+ /* Finish queue unmap jobs for any domain that needs it */
+ for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
+ struct dlb2_hw_domain *domain = &hw->domains[i];
+
+ num += dlb2_domain_finish_unmap_qid_procedures(hw, domain);
+ }
+
+ return num;
+}
+
+unsigned int dlb2_finish_map_qid_procedures(struct dlb2_hw *hw)
+{
+ int i, num = 0;
+
+ /* Finish queue map jobs for any domain that needs it */
+ for (i = 0; i < DLB2_MAX_NUM_DOMAINS; i++) {
+ struct dlb2_hw_domain *domain = &hw->domains[i];
+
+ num += dlb2_domain_finish_map_qid_procedures(hw, domain);
+ }
+
+ return num;
+}
diff --git a/drivers/event/dlb2/pf/dlb2_main.c b/drivers/event/dlb2/pf/dlb2_main.c
index 1c275ff..ca1ad69 100644
--- a/drivers/event/dlb2/pf/dlb2_main.c
+++ b/drivers/event/dlb2/pf/dlb2_main.c
@@ -618,3 +618,17 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
/****** Device configuration ******/
/**********************************/
+int
+dlb2_pf_create_sched_domain(struct dlb2_hw *hw,
+ struct dlb2_create_sched_domain_args *args,
+ struct dlb2_cmd_response *resp)
+{
+ return dlb2_hw_create_sched_domain(hw, args, resp, NOT_VF_REQ,
+ PF_ID_ZERO);
+}
+
+int
+dlb2_pf_reset_domain(struct dlb2_hw *hw, u32 id)
+{
+ return dlb2_reset_domain(hw, id, NOT_VF_REQ, PF_ID_ZERO);
+}
diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c
index 8c5ec20..21f28a4 100644
--- a/drivers/event/dlb2/pf/dlb2_pf.c
+++ b/drivers/event/dlb2/pf/dlb2_pf.c
@@ -114,16 +114,60 @@ dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev *handle,
return 0;
}
+static int
+dlb2_pf_sched_domain_create(struct dlb2_hw_dev *handle,
+ struct dlb2_create_sched_domain_args *arg)
+{
+ struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
+ struct dlb2_cmd_response response = {0};
+ int ret;
+
+ DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
+
+ if (dlb2_dev->domain_reset_failed) {
+ response.status = DLB2_ST_DOMAIN_RESET_FAILED;
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = dlb2_pf_create_sched_domain(&dlb2_dev->hw, arg, &response);
+ if (ret)
+ goto done;
+
+done:
+
+ arg->response = response;
+
+ DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static void
+dlb2_pf_domain_reset(struct dlb2_eventdev *dlb2)
+{
+ struct dlb2_dev *dlb2_dev;
+ int ret;
+
+ dlb2_dev = (struct dlb2_dev *)dlb2->qm_instance.pf_dev;
+ ret = dlb2_pf_reset_domain(&dlb2_dev->hw, dlb2->qm_instance.domain_id);
+ if (ret)
+ DLB2_LOG_ERR("dlb2_pf_reset_domain err %d", ret);
+}
+
static void
dlb2_pf_iface_fn_ptrs_init(void)
{
dlb2_iface_low_level_io_init = dlb2_pf_low_level_io_init;
dlb2_iface_open = dlb2_pf_open;
+ dlb2_iface_domain_reset = dlb2_pf_domain_reset;
dlb2_iface_get_device_version = dlb2_pf_get_device_version;
dlb2_iface_hardware_init = dlb2_pf_hardware_init;
dlb2_iface_get_num_resources = dlb2_pf_get_num_resources;
dlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;
+ dlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;
}
/* PCI DEV HOOKS */
--
2.6.4
next prev parent reply other threads:[~2020-09-11 20:31 UTC|newest]
Thread overview: 366+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-11 20:26 [dpdk-dev] [PATCH 00/22] Add DLB2 PMD Timothy McDaniel
2020-09-11 20:26 ` [dpdk-dev] [PATCH 01/22] event/dlb2: add meson build infrastructure Timothy McDaniel
2020-10-06 15:58 ` Eads, Gage
2020-10-17 18:20 ` [dpdk-dev] [PATCH v2 00/22] Add DLB2 PMD Timothy McDaniel
2020-10-17 18:20 ` [dpdk-dev] [PATCH v2 01/22] event/dlb2: add documentation and meson build infrastructure Timothy McDaniel
2020-10-18 8:48 ` Jerin Jacob
2020-10-19 8:33 ` Bruce Richardson
2020-10-20 15:17 ` McDaniel, Timothy
2020-10-20 15:20 ` Thomas Monjalon
2020-10-20 15:33 ` McDaniel, Timothy
2020-10-20 15:38 ` Bruce Richardson
2020-10-20 15:34 ` Bruce Richardson
2020-10-20 15:43 ` McDaniel, Timothy
2020-10-21 16:33 ` McDaniel, Timothy
2020-10-20 14:07 ` McDaniel, Timothy
2020-10-19 9:59 ` Kinsella, Ray
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 00/23] Add DLB2 PMD Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 01/23] event/dlb2: add documentation and meson build infrastructure Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 02/23] event/dlb2: add dynamic logging Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 03/23] event/dlb2: add private data structures and constants Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 04/23] event/dlb2: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 05/23] event/dlb2: add inline functions Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 06/23] event/dlb2: add eventdev probe Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 07/23] event/dlb2: add flexible interface Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 08/23] event/dlb2: add probe-time hardware init Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 09/23] event/dlb2: add xstats Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 10/23] event/dlb2: add infos get and configure Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 11/23] event/dlb2: add queue and port default conf Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 12/23] event/dlb2: add queue setup Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 13/23] event/dlb2: add port setup Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 14/23] event/dlb2: add port link Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 15/23] event/dlb2: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 16/23] event/dlb2: add eventdev start Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 17/23] event/dlb2: add enqueue and its burst variants Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 18/23] event/dlb2: add dequeue " Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 19/23] event/dlb2: add eventdev stop and close Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 20/23] event/dlb2: add PMD's token pop public interface Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 21/23] event/dlb2: add PMD self-tests Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 22/23] event/dlb2: add queue and port release Timothy McDaniel
2020-10-30 9:43 ` [dpdk-dev] [PATCH v5 23/23] event/dlb2: add timeout ticks entry point Timothy McDaniel
2020-10-30 10:01 ` [dpdk-dev] [PATCH v5 00/23] Add DLB2 PMD Thomas Monjalon
2020-10-30 10:16 ` McDaniel, Timothy
2020-10-30 10:32 ` Jerin Jacob
2020-10-30 10:43 ` Thomas Monjalon
2020-10-30 11:58 ` McDaniel, Timothy
2020-10-30 13:15 ` Thomas Monjalon
2020-10-30 15:35 ` McDaniel, Timothy
2020-10-30 15:47 ` Thomas Monjalon
2020-10-30 16:02 ` McDaniel, Timothy
2020-10-30 16:42 ` Thomas Monjalon
2020-10-30 14:21 ` Jerin Jacob
2020-10-30 15:25 ` McDaniel, Timothy
2020-10-30 15:31 ` Jerin Jacob
2020-10-30 16:08 ` Van Haaren, Harry
2020-10-30 16:13 ` McDaniel, Timothy
2020-10-30 15:33 ` David Marchand
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 " Timothy McDaniel
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 01/23] event/dlb2: add documentation and meson build infrastructure Timothy McDaniel
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 02/23] event/dlb2: add dynamic logging Timothy McDaniel
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 03/23] event/dlb2: add private data structures and constants Timothy McDaniel
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 04/23] event/dlb2: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 05/23] event/dlb2: add inline functions Timothy McDaniel
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 06/23] event/dlb2: add eventdev probe Timothy McDaniel
2020-10-30 19:51 ` Eads, Gage
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 07/23] event/dlb2: add flexible interface Timothy McDaniel
2020-10-30 19:51 ` Eads, Gage
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 08/23] event/dlb2: add probe-time hardware init Timothy McDaniel
2020-10-30 19:50 ` Eads, Gage
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 09/23] event/dlb2: add xstats Timothy McDaniel
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 10/23] event/dlb2: add infos get and configure Timothy McDaniel
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 11/23] event/dlb2: add queue and port default conf Timothy McDaniel
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 12/23] event/dlb2: add queue setup Timothy McDaniel
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 13/23] event/dlb2: add port setup Timothy McDaniel
2020-10-30 18:50 ` Eads, Gage
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 14/23] event/dlb2: add port link Timothy McDaniel
2020-10-30 18:28 ` [dpdk-dev] [PATCH v6 15/23] event/dlb2: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-30 18:29 ` [dpdk-dev] [PATCH v6 16/23] event/dlb2: add eventdev start Timothy McDaniel
2020-10-30 18:29 ` [dpdk-dev] [PATCH v6 17/23] event/dlb2: add enqueue and its burst variants Timothy McDaniel
2020-10-30 18:29 ` [dpdk-dev] [PATCH v6 18/23] event/dlb2: add dequeue " Timothy McDaniel
2020-10-30 18:29 ` [dpdk-dev] [PATCH v6 19/23] event/dlb2: add eventdev stop and close Timothy McDaniel
2020-10-30 18:29 ` [dpdk-dev] [PATCH v6 20/23] event/dlb2: add PMD's token pop public interface Timothy McDaniel
2020-10-30 18:29 ` [dpdk-dev] [PATCH v6 21/23] event/dlb2: add PMD self-tests Timothy McDaniel
2020-10-30 18:29 ` [dpdk-dev] [PATCH v6 22/23] event/dlb2: add queue and port release Timothy McDaniel
2020-10-30 18:29 ` [dpdk-dev] [PATCH v6 23/23] event/dlb2: add timeout ticks entry point Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 00/23] Add DLB2 PMD Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 01/23] event/dlb2: add documentation and meson build infrastructure Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 02/23] event/dlb2: add dynamic logging Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 03/23] event/dlb2: add private data structures and constants Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 04/23] event/dlb2: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 05/23] event/dlb2: add inline functions Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 06/23] event/dlb2: add eventdev probe Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 07/23] event/dlb2: add flexible interface Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 08/23] event/dlb2: add probe-time hardware init Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 09/23] event/dlb2: add xstats Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 10/23] event/dlb2: add infos get and configure Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 11/23] event/dlb2: add queue and port default conf Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 12/23] event/dlb2: add queue setup Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 13/23] event/dlb2: add port setup Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 14/23] event/dlb2: add port link Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 15/23] event/dlb2: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 16/23] event/dlb2: add eventdev start Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 17/23] event/dlb2: add enqueue and its burst variants Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 18/23] event/dlb2: add dequeue " Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 19/23] event/dlb2: add eventdev stop and close Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 20/23] event/dlb2: add PMD's token pop public interface Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 21/23] event/dlb2: add PMD self-tests Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 22/23] event/dlb2: add queue and port release Timothy McDaniel
2020-10-30 23:51 ` [dpdk-dev] [PATCH v7 23/23] event/dlb2: add timeout ticks entry point Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 00/23] Add DLB2 PMD Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 01/23] event/dlb2: add documentation and meson build infrastructure Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 02/23] event/dlb2: add dynamic logging Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 03/23] event/dlb2: add private data structures and constants Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 04/23] event/dlb2: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 05/23] event/dlb2: add inline functions Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 06/23] event/dlb2: add eventdev probe Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 07/23] event/dlb2: add flexible interface Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 08/23] event/dlb2: add probe-time hardware init Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 09/23] event/dlb2: add xstats Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 10/23] event/dlb2: add infos get and configure Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 11/23] event/dlb2: add queue and port default conf Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 12/23] event/dlb2: add queue setup Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 13/23] event/dlb2: add port setup Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 14/23] event/dlb2: add port link Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 15/23] event/dlb2: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 16/23] event/dlb2: add eventdev start Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 17/23] event/dlb2: add enqueue and its burst variants Timothy McDaniel
2020-10-31 9:52 ` Jerin Jacob
2020-10-31 17:13 ` McDaniel, Timothy
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 18/23] event/dlb2: add dequeue " Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 19/23] event/dlb2: add eventdev stop and close Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 20/23] event/dlb2: add PMD's token pop public interface Timothy McDaniel
2020-10-31 10:51 ` David Marchand
2020-10-31 16:37 ` McDaniel, Timothy
2020-10-31 19:19 ` McDaniel, Timothy
2020-10-31 21:38 ` David Marchand
2020-10-31 21:43 ` McDaniel, Timothy
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 21/23] event/dlb2: add PMD self-tests Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 22/23] event/dlb2: add queue and port release Timothy McDaniel
2020-10-31 2:01 ` [dpdk-dev] [PATCH v8 23/23] event/dlb2: add timeout ticks entry point Timothy McDaniel
2020-10-31 17:25 ` [dpdk-dev] [PATCH v9 00/23] Add DLB2 PMD Timothy McDaniel
2020-10-31 17:25 ` [dpdk-dev] [PATCH v9 01/23] event/dlb2: add documentation and meson build infrastructure Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 02/23] event/dlb2: add dynamic logging Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 03/23] event/dlb2: add private data structures and constants Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 04/23] event/dlb2: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 05/23] event/dlb2: add inline functions Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 06/23] event/dlb2: add eventdev probe Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 07/23] event/dlb2: add flexible interface Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 08/23] event/dlb2: add probe-time hardware init Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 09/23] event/dlb2: add xstats Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 10/23] event/dlb2: add infos get and configure Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 11/23] event/dlb2: add queue and port default conf Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 12/23] event/dlb2: add queue setup Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 13/23] event/dlb2: add port setup Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 14/23] event/dlb2: add port link Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 15/23] event/dlb2: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 16/23] event/dlb2: add eventdev start Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 17/23] event/dlb2: add enqueue and its burst variants Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 18/23] event/dlb2: add dequeue " Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 19/23] event/dlb2: add eventdev stop and close Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 20/23] event/dlb2: add PMD's token pop public interface Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 21/23] event/dlb2: add PMD self-tests Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 22/23] event/dlb2: add queue and port release Timothy McDaniel
2020-10-31 17:26 ` [dpdk-dev] [PATCH v9 23/23] event/dlb2: add timeout ticks entry point Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 00/23] Add DLB2 PMD Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 01/23] event/dlb2: add documentation and meson build infrastructure Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 02/23] event/dlb2: add dynamic logging Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 03/23] event/dlb2: add private data structures and constants Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 04/23] event/dlb2: add definitions shared with LKM or shared code Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 05/23] event/dlb2: add inline functions Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 06/23] event/dlb2: add eventdev probe Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 07/23] event/dlb2: add flexible interface Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 08/23] event/dlb2: add probe-time hardware init Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 09/23] event/dlb2: add xstats Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 10/23] event/dlb2: add infos get and configure Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 11/23] event/dlb2: add queue and port default conf Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 12/23] event/dlb2: add queue setup Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 13/23] event/dlb2: add port setup Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 14/23] event/dlb2: add port link Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 15/23] event/dlb2: add port unlink and port unlinks in progress Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 16/23] event/dlb2: add eventdev start Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 17/23] event/dlb2: add enqueue and its burst variants Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 18/23] event/dlb2: add dequeue " Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 19/23] event/dlb2: add eventdev stop and close Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 20/23] event/dlb2: add PMD's token pop public interface Timothy McDaniel
2020-11-01 20:00 ` [dpdk-dev] [PATCH v10 21/23] event/dlb2: add PMD self-tests Timothy McDaniel
2020-11-01 20:01 ` [dpdk-dev] [PATCH v10 22/23] event/dlb2: add queue and port release Timothy McDaniel
2020-11-01 20:01 ` [dpdk-dev] [PATCH v10 23/23] event/dlb2: add timeout ticks entry point Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 00/23] Add DLB2 PMD Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 01/23] event/dlb2: add documentation and meson build infrastructure Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 02/23] event/dlb2: add dynamic logging Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 03/23] event/dlb2: add private data structures and constants Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 04/23] event/dlb2: add definitions shared with LKM or shared code Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 05/23] event/dlb2: add inline functions Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 06/23] event/dlb2: add eventdev probe Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 07/23] event/dlb2: add flexible interface Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 08/23] event/dlb2: add probe-time hardware init Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 09/23] event/dlb2: add xstats Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 10/23] event/dlb2: add infos get and configure Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 11/23] event/dlb2: add queue and port default conf Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 12/23] event/dlb2: add queue setup Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 13/23] event/dlb2: add port setup Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 14/23] event/dlb2: add port link Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 15/23] event/dlb2: add port unlink and port unlinks in progress Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 16/23] event/dlb2: add eventdev start Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 17/23] event/dlb2: add enqueue and its burst variants Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 18/23] event/dlb2: add dequeue " Timothy McDaniel
2020-11-01 23:37 ` [dpdk-dev] [PATCH v11 19/23] event/dlb2: add eventdev stop and close Timothy McDaniel
2020-11-01 23:38 ` [dpdk-dev] [PATCH v11 20/23] event/dlb2: add PMD's token pop public interface Timothy McDaniel
2020-11-01 23:38 ` [dpdk-dev] [PATCH v11 21/23] event/dlb2: add PMD self-tests Timothy McDaniel
2020-11-01 23:38 ` [dpdk-dev] [PATCH v11 22/23] event/dlb2: add queue and port release Timothy McDaniel
2020-11-01 23:38 ` [dpdk-dev] [PATCH v11 23/23] event/dlb2: add timeout ticks entry point Timothy McDaniel
2020-11-02 8:49 ` [dpdk-dev] [PATCH v11 00/23] Add DLB2 PMD Jerin Jacob
2020-10-17 18:20 ` [dpdk-dev] [PATCH v2 02/22] event/dlb2: add dynamic logging Timothy McDaniel
2020-10-18 8:57 ` Jerin Jacob
2020-10-20 14:08 ` McDaniel, Timothy
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 03/22] event/dlb2: add private data structures and constants Timothy McDaniel
2020-10-20 14:01 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 04/22] event/dlb2: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 05/22] event/dlb2: add inline functions Timothy McDaniel
2020-10-18 8:59 ` Jerin Jacob
2020-10-20 14:08 ` McDaniel, Timothy
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 06/22] event/dlb2: add probe Timothy McDaniel
2020-10-18 8:39 ` Jerin Jacob
2020-10-20 14:04 ` McDaniel, Timothy
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 07/22] event/dlb2: add xstats Timothy McDaniel
2020-10-20 14:01 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 08/22] event/dlb2: add infos get and configure Timothy McDaniel
2020-10-20 14:01 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 09/22] event/dlb2: add queue and port default conf Timothy McDaniel
2020-10-20 14:02 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 10/22] event/dlb2: add queue setup Timothy McDaniel
2020-10-20 14:01 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 11/22] event/dlb2: add port setup Timothy McDaniel
2020-10-20 14:02 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 12/22] event/dlb2: add port link Timothy McDaniel
2020-10-20 14:02 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 13/22] event/dlb2: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 14/22] event/dlb2: add eventdev start Timothy McDaniel
2020-10-20 14:04 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 15/22] event/dlb2: add enqueue and its burst variants Timothy McDaniel
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 16/22] event/dlb2: add dequeue " Timothy McDaniel
2020-10-20 14:04 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 17/22] event/dlb2: add eventdev stop and close Timothy McDaniel
2020-10-20 14:04 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 18/22] event/dlb2: add PMD's token pop public interface Timothy McDaniel
2020-10-18 9:13 ` Jerin Jacob
2020-10-20 14:12 ` McDaniel, Timothy
2020-10-19 10:01 ` Kinsella, Ray
2020-10-20 14:05 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 19/22] event/dlb2: add PMD self-tests Timothy McDaniel
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 20/22] event/dlb2: add queue and port release Timothy McDaniel
2020-10-20 14:04 ` Eads, Gage
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 21/22] event/dlb2: add timeout ticks entry point Timothy McDaniel
2020-10-17 18:21 ` [dpdk-dev] [PATCH v2 22/22] doc: add new DLB2 eventdev driver to relnotes Timothy McDaniel
2020-10-18 9:22 ` Jerin Jacob
2020-10-20 14:13 ` McDaniel, Timothy
2020-10-24 13:06 ` [dpdk-dev] [PATCH v2 00/22] Add DLB2 PMD Thomas Monjalon
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 00/23] " Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 01/23] event/dlb2: add documentation and meson build infrastructure Timothy McDaniel
2020-10-24 12:58 ` Jerin Jacob
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 02/23] event/dlb2: add dynamic logging Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 03/23] event/dlb2: add private data structures and constants Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 04/23] event/dlb2: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 05/23] event/dlb2: add inline functions Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 06/23] event/dlb2: add eventdev probe Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 07/23] event/dlb2: add flexible interface Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 08/23] event/dlb2: add probe-time hardware init Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 09/23] event/dlb2: add xstats Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 10/23] event/dlb2: add infos get and configure Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 11/23] event/dlb2: add queue and port default conf Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 12/23] event/dlb2: add queue setup Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 13/23] event/dlb2: add port setup Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 14/23] event/dlb2: add port link Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 15/23] event/dlb2: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 16/23] event/dlb2: add eventdev start Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 17/23] event/dlb2: add enqueue and its burst variants Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 18/23] event/dlb2: add dequeue " Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 19/23] event/dlb2: add eventdev stop and close Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 20/23] event/dlb2: add PMD's token pop public interface Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 21/23] event/dlb2: add PMD self-tests Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 22/23] event/dlb2: add queue and port release Timothy McDaniel
2020-10-23 18:30 ` [dpdk-dev] [PATCH v3 23/23] event/dlb2: add timeout ticks entry point Timothy McDaniel
2020-10-29 15:24 ` [dpdk-dev] [PATCH v4 00/23] Add DLB2 PMD Timothy McDaniel
2020-10-29 15:24 ` [dpdk-dev] [PATCH v4 01/23] event/dlb2: add documentation and meson build infrastructure Timothy McDaniel
2020-10-29 15:24 ` [dpdk-dev] [PATCH v4 02/23] event/dlb2: add dynamic logging Timothy McDaniel
2020-10-29 15:24 ` [dpdk-dev] [PATCH v4 03/23] event/dlb2: add private data structures and constants Timothy McDaniel
2020-10-29 15:29 ` Stephen Hemminger
2020-10-29 16:07 ` McDaniel, Timothy
2020-10-29 15:30 ` Stephen Hemminger
2020-10-29 16:10 ` McDaniel, Timothy
2020-10-29 15:24 ` [dpdk-dev] [PATCH v4 04/23] event/dlb2: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-29 15:24 ` [dpdk-dev] [PATCH v4 05/23] event/dlb2: add inline functions Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 06/23] event/dlb2: add eventdev probe Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 07/23] event/dlb2: add flexible interface Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 08/23] event/dlb2: add probe-time hardware init Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 09/23] event/dlb2: add xstats Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 10/23] event/dlb2: add infos get and configure Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 11/23] event/dlb2: add queue and port default conf Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 12/23] event/dlb2: add queue setup Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 13/23] event/dlb2: add port setup Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 14/23] event/dlb2: add port link Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 15/23] event/dlb2: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 16/23] event/dlb2: add eventdev start Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 17/23] event/dlb2: add enqueue and its burst variants Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 18/23] event/dlb2: add dequeue " Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 19/23] event/dlb2: add eventdev stop and close Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 20/23] event/dlb2: add PMD's token pop public interface Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 21/23] event/dlb2: add PMD self-tests Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 22/23] event/dlb2: add queue and port release Timothy McDaniel
2020-10-29 15:25 ` [dpdk-dev] [PATCH v4 23/23] event/dlb2: add timeout ticks entry point Timothy McDaniel
2020-09-11 20:26 ` [dpdk-dev] [PATCH 02/22] event/dlb2: add dynamic logging Timothy McDaniel
2020-10-06 16:52 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 03/22] event/dlb2: add private data structures and constants Timothy McDaniel
2020-10-06 16:52 ` Eads, Gage
2020-10-07 16:14 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 04/22] event/dlb2: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-06 19:26 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 05/22] event/dlb2: add inline functions Timothy McDaniel
2020-10-06 21:33 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 06/22] event/dlb2: add probe Timothy McDaniel
2020-10-07 16:56 ` Eads, Gage
2020-10-18 9:05 ` Jerin Jacob
2020-10-20 14:11 ` McDaniel, Timothy
2020-09-11 20:26 ` [dpdk-dev] [PATCH 07/22] event/dlb2: add xstats Timothy McDaniel
2020-09-17 20:58 ` Chen, Mike Ximing
2020-09-17 21:26 ` McDaniel, Timothy
2020-09-18 0:37 ` Chen, Mike Ximing
2020-09-18 8:39 ` Bruce Richardson
2020-10-07 18:47 ` Eads, Gage
2020-09-11 20:26 ` Timothy McDaniel [this message]
2020-10-07 19:14 ` [dpdk-dev] [PATCH 08/22] event/dlb2: add infos get and configure Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 09/22] event/dlb2: add queue and port default conf Timothy McDaniel
2020-10-07 19:15 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 10/22] event/dlb2: add queue setup Timothy McDaniel
2020-10-07 19:26 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 11/22] event/dlb2: add port setup Timothy McDaniel
2020-10-07 20:34 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 12/22] event/dlb2: add port link Timothy McDaniel
2020-10-07 20:40 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 13/22] event/dlb2: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-07 20:44 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 14/22] event/dlb2: add eventdev start Timothy McDaniel
2020-10-07 20:51 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 15/22] event/dlb2: add enqueue and its burst variants Timothy McDaniel
2020-10-07 21:02 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 16/22] event/dlb2: add dequeue " Timothy McDaniel
2020-10-07 21:18 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 17/22] event/dlb2: add eventdev stop and close Timothy McDaniel
2020-10-07 21:21 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 18/22] event/dlb2: add PMD's token pop public interface Timothy McDaniel
2020-10-07 21:24 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 19/22] event/dlb2: add PMD self-tests Timothy McDaniel
2020-10-07 21:33 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 20/22] event/dlb2: add queue and port release Timothy McDaniel
2020-10-07 21:55 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 21/22] event/dlb2: add timeout ticks entry point Timothy McDaniel
2020-10-07 21:58 ` Eads, Gage
2020-09-11 20:26 ` [dpdk-dev] [PATCH 22/22] doc: add new DLB2 eventdev driver to relnotes Timothy McDaniel
2020-10-07 22:04 ` Eads, Gage
2020-09-21 17:11 ` [dpdk-dev] [PATCH 00/22] Add DLB2 PMD Jerin Jacob
2020-09-21 17:15 ` McDaniel, Timothy
2020-09-29 18:41 ` Jerin Jacob
2020-09-29 18:46 ` McDaniel, Timothy
2020-09-30 16:10 ` McDaniel, Timothy
2020-09-29 18:46 ` Jerin Jacob
2020-09-30 16:14 ` McDaniel, Timothy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1599855987-25976-9-git-send-email-timothy.mcdaniel@intel.com \
--to=timothy.mcdaniel@intel.com \
--cc=dev@dpdk.org \
--cc=erik.g.carrillo@intel.com \
--cc=gage.eads@intel.com \
--cc=harry.van.haaren@intel.com \
--cc=jerinj@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).