DPDK patches and discussions
 help / color / mirror / Atom feed
From: Tim McDaniel <timothy.mcdaniel@intel.com>
To: jerinj@marvell.com
Cc: mattias.ronnblom@ericsson.com, dev@dpdk.org, gage.eads@intel.com,
	harry.van.haaren@intel.com, "McDaniel,
	Timothy" <timothy.mcdaniel@intel.com>
Subject: [dpdk-dev] [PATCH 16/27] event/dlb: add infos_get and configure
Date: Fri, 26 Jun 2020 23:37:40 -0500	[thread overview]
Message-ID: <1593232671-5690-17-git-send-email-timothy.mcdaniel@intel.com> (raw)
In-Reply-To: <1593232671-5690-1-git-send-email-timothy.mcdaniel@intel.com>

From: "McDaniel, Timothy" <timothy.mcdaniel@intel.com>

Signed-off-by: McDaniel, Timothy <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb/dlb.c |  401 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 401 insertions(+)

diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
index c635deb..b864253 100644
--- a/drivers/event/dlb/dlb.c
+++ b/drivers/event/dlb/dlb.c
@@ -218,6 +218,395 @@ int dlb_string_to_int(int *result, const char *str)
 			    DLB_MAX_NUM_DIR_CREDITS);
 		return -EINVAL;
 	}
+	return 0;
+}
+
+/* VDEV-only notes:
+ * This function first unmaps all memory mappings and closes the
+ * domain's file descriptor, which causes the driver to reset the
+ * scheduling domain. Once that completes (when close() returns), we
+ * can safely free the dynamically allocated memory used by the
+ * scheduling domain.
+ *
+ * PF-only notes:
+ * We will maintain a use count and use that to determine when
+ * a reset is required.  In PF mode, we never mmap, or munmap
+ * device memory,  and we own the entire physical PCI device.
+ */
+
+static void
+dlb_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	enum dlb_configuration_state config_state;
+	int i, j;
+
+	/* Close and reset the domain */
+	dlb_iface_domain_close(dlb);
+
+	/* Free all dynamically allocated port memory */
+	for (i = 0; i < dlb->num_ports; i++)
+		dlb_free_qe_mem(&dlb->ev_ports[i].qm_port);
+
+	/* If reconfiguring, mark the device's queues and ports as "previously
+	 * configured." If the user doesn't reconfigure them, the PMD will
+	 * reapply their previous configuration when the device is started.
+	 */
+	config_state = (reconfig) ? DLB_PREV_CONFIGURED : DLB_NOT_CONFIGURED;
+
+	for (i = 0; i < dlb->num_ports; i++) {
+		dlb->ev_ports[i].qm_port.config_state = config_state;
+		/* Reset setup_done so ports can be reconfigured */
+		dlb->ev_ports[i].setup_done = false;
+		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
+			dlb->ev_ports[i].link[j].mapped = false;
+	}
+
+	for (i = 0; i < dlb->num_queues; i++)
+		dlb->ev_queues[i].qm_queue.config_state = config_state;
+
+	for (i = 0; i < DLB_MAX_NUM_QUEUES; i++)
+		dlb->ev_queues[i].setup_done = false;
+
+	dlb->num_ports = 0;
+	dlb->num_ldb_ports = 0;
+	dlb->num_dir_ports = 0;
+	dlb->num_queues = 0;
+	dlb->num_ldb_queues = 0;
+	dlb->num_dir_queues = 0;
+	dlb->configured = false;
+}
+
+static int
+dlb_ldb_credit_pool_create(struct dlb_hw_dev *handle)
+{
+	struct dlb_create_ldb_pool_args cfg;
+	struct dlb_cmd_response response;
+	int ret;
+
+	if (handle == NULL)
+		return -EINVAL;
+
+	if (!handle->cfg.resources.num_ldb_credits) {
+		handle->cfg.ldb_credit_pool_id = 0;
+		handle->cfg.num_ldb_credits = 0;
+		return 0;
+	}
+
+	cfg.response = (uintptr_t)&response;
+	cfg.num_ldb_credits = handle->cfg.resources.num_ldb_credits;
+
+	ret = dlb_iface_ldb_credit_pool_create(handle,
+					       &cfg);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: ldb_credit_pool_create ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+	}
+
+	handle->cfg.ldb_credit_pool_id = response.id;
+	handle->cfg.num_ldb_credits = cfg.num_ldb_credits;
+
+	return ret;
+}
+
+static int
+dlb_dir_credit_pool_create(struct dlb_hw_dev *handle)
+{
+	struct dlb_create_dir_pool_args cfg;
+	struct dlb_cmd_response response;
+	int ret;
+
+	if (handle == NULL)
+		return -EINVAL;
+
+	if (!handle->cfg.resources.num_dir_credits) {
+		handle->cfg.dir_credit_pool_id = 0;
+		handle->cfg.num_dir_credits = 0;
+		return 0;
+	}
+
+	cfg.response = (uintptr_t)&response;
+	cfg.num_dir_credits = handle->cfg.resources.num_dir_credits;
+
+	ret = dlb_iface_dir_credit_pool_create(handle,
+					       &cfg);
+	if (ret < 0)
+		DLB_LOG_ERR("dlb: dir_credit_pool_create ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+
+	handle->cfg.dir_credit_pool_id = response.id;
+	handle->cfg.num_dir_credits = cfg.num_dir_credits;
+
+	return ret;
+}
+
+static int
+dlb_hw_create_sched_domain(struct dlb_hw_dev *handle,
+			   struct dlb_eventdev *dlb,
+			   const struct dlb_hw_rsrcs *resources_asked)
+{
+	int ret = 0;
+	struct dlb_create_sched_domain_args *config_params;
+	struct dlb_cmd_response response;
+
+	if (resources_asked == NULL) {
+		DLB_LOG_ERR("dlb: dlb_create NULL parameter\n");
+		ret = EINVAL;
+		goto error_exit;
+	}
+
+	/* Map generic qm resources to dlb resources */
+	config_params = &handle->cfg.resources;
+
+	config_params->response = (uintptr_t)&response;
+
+	/* DIR ports and queues */
+
+	config_params->num_dir_ports =
+		resources_asked->num_dir_ports;
+
+	config_params->num_dir_credits =
+		resources_asked->num_dir_credits;
+
+	/* LDB ports and queues */
+
+	config_params->num_ldb_queues =
+		resources_asked->num_ldb_queues;
+
+	config_params->num_ldb_ports =
+		resources_asked->num_ldb_ports;
+
+	config_params->num_ldb_credits =
+		resources_asked->num_ldb_credits;
+
+	config_params->num_atomic_inflights =
+		dlb->num_atm_inflights_per_queue *
+		config_params->num_ldb_queues;
+
+	config_params->num_hist_list_entries = config_params->num_ldb_ports *
+		DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
+
+	/* dlb limited to 1 credit pool per queue type */
+	config_params->num_ldb_credit_pools = 1;
+	config_params->num_dir_credit_pools = 1;
+
+	DLB_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d, ldb_cred_pools=%d, dir-credit_pools=%d\n",
+		    config_params->num_ldb_queues,
+		    config_params->num_ldb_ports,
+		    config_params->num_dir_ports,
+		    config_params->num_atomic_inflights,
+		    config_params->num_hist_list_entries,
+		    config_params->num_ldb_credits,
+		    config_params->num_dir_credits,
+		    config_params->num_ldb_credit_pools,
+		    config_params->num_dir_credit_pools);
+
+	/* Configure the QM */
+
+	ret = dlb_iface_sched_domain_create(handle, config_params);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: domain create failed, device_id = %d, (driver ret = %d, extra status: %s)\n",
+			    handle->device_id,
+			    ret,
+			    dlb_error_strings[response.status]);
+		goto error_exit;
+	}
+
+	handle->domain_id = response.id;
+	handle->domain_id_valid = 1;
+
+	config_params->response = 0;
+
+	ret = dlb_ldb_credit_pool_create(handle);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: create ldb credit pool failed\n");
+		goto error_exit2;
+	}
+
+	ret = dlb_dir_credit_pool_create(handle);
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: create dir credit pool failed\n");
+		goto error_exit2;
+	}
+
+	handle->cfg.configured = true;
+
+	return 0;
+
+error_exit2:
+	dlb_iface_domain_close(dlb);
+
+error_exit:
+
+	return ret;
+}
+
+/* End HW specific */
+static void
+dlb_eventdev_info_get(struct rte_eventdev *dev,
+		      struct rte_event_dev_info *dev_info)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	int ret;
+
+	ret = dlb_hw_query_resources(dlb);
+	if (ret) {
+		const struct rte_eventdev_data *data = dev->data;
+
+		DLB_LOG_ERR("get resources err=%d, devid=%d\n",
+			    ret, data->dev_id);
+		/* fn is void, so fall through and return values set up in
+		 * probe
+		 */
+	}
+
+	/* Add num resources currently owned by this domain.
+	 * These would become available if the scheduling domain were reset due
+	 * to the application recalling eventdev_configure to *reconfigure* the
+	 * domain.
+	 */
+	evdev_dlb_default_info.max_event_ports += dlb->num_ldb_ports;
+	evdev_dlb_default_info.max_event_queues += dlb->num_ldb_queues;
+	evdev_dlb_default_info.max_num_events += dlb->num_ldb_credits;
+
+
+	/* In DLB A-stepping hardware, applications are limited to 128
+	 * configured ports (load-balanced or directed). The reported number of
+	 * available ports must reflect this.
+	 */
+	if (dlb->revision < DLB_REV_B0) {
+		int used_ports;
+
+		used_ports = DLB_MAX_NUM_LDB_PORTS + DLB_MAX_NUM_DIR_PORTS -
+			dlb->hw_rsrc_query_results.num_ldb_ports -
+			dlb->hw_rsrc_query_results.num_dir_ports;
+
+		evdev_dlb_default_info.max_event_ports =
+			RTE_MIN(evdev_dlb_default_info.max_event_ports,
+				128 - used_ports);
+	}
+
+	evdev_dlb_default_info.max_event_queues =
+		RTE_MIN(evdev_dlb_default_info.max_event_queues,
+			RTE_EVENT_MAX_QUEUES_PER_DEV);
+
+	evdev_dlb_default_info.max_num_events =
+		RTE_MIN(evdev_dlb_default_info.max_num_events,
+			dlb->max_num_events_override);
+
+	*dev_info = evdev_dlb_default_info;
+}
+
+/* Note: 1 QM instance per QM device, QM instance/device == event device */
+static int
+dlb_eventdev_configure(const struct rte_eventdev *dev)
+{
+	struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+	struct dlb_hw_dev *handle = &dlb->qm_instance;
+	struct dlb_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
+	const struct rte_eventdev_data *data = dev->data;
+	const struct rte_event_dev_config *config = &data->dev_conf;
+	int ret;
+
+	/* If this eventdev is already configured, we must release the current
+	 * scheduling domain before attempting to configure a new one.
+	 */
+	if (dlb->configured) {
+		dlb_hw_reset_sched_domain(dev, true);
+
+		ret = dlb_hw_query_resources(dlb);
+		if (ret) {
+			DLB_LOG_ERR("get resources err=%d, devid=%d\n",
+				    ret, data->dev_id);
+			return ret;
+		}
+	}
+
+	if (config->nb_event_queues > rsrcs->num_queues) {
+		DLB_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
+			    config->nb_event_queues,
+			    rsrcs->num_queues);
+		return -EINVAL;
+	}
+	if (config->nb_event_ports > (rsrcs->num_ldb_ports
+			+ rsrcs->num_dir_ports)) {
+		DLB_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
+			    config->nb_event_ports,
+			    (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
+		return -EINVAL;
+	}
+	if (config->nb_events_limit > rsrcs->nb_events_limit) {
+		DLB_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
+			    config->nb_events_limit,
+			    rsrcs->nb_events_limit);
+		return -EINVAL;
+	}
+
+	if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+		dlb->global_dequeue_wait = false;
+	} else {
+		uint32_t timeout32;
+
+		dlb->global_dequeue_wait = true;
+
+		timeout32 = config->dequeue_timeout_ns;
+
+		/* PF PMD does not support interrupts, and
+		 * UMONITOR wait is temporarily disabled.
+		 */
+
+		dlb->global_dequeue_wait_ticks =
+			timeout32 * (rte_get_timer_hz() / 1E9);
+	}
+
+	/* Does this platform support umonitor/umwait? */
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_UMWAIT)) {
+		if (RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 0 &&
+		    RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 1) {
+			DLB_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE must be 0 or 1.\n",
+				    RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE);
+			return -EINVAL;
+		}
+		dlb->umwait_allowed = true;
+	}
+
+	rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
+	rsrcs->num_ldb_ports  = config->nb_event_ports - rsrcs->num_dir_ports;
+	/* 1 dir queue per dir port */
+	rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
+
+	/* Scale down nb_events_limit by 4 for directed credits, since there
+	 * are 4x as many load-balanced credits.
+	 */
+	rsrcs->num_ldb_credits = 0;
+	rsrcs->num_dir_credits = 0;
+
+	if (rsrcs->num_ldb_queues)
+		rsrcs->num_ldb_credits = config->nb_events_limit;
+	if (rsrcs->num_dir_ports)
+		rsrcs->num_dir_credits = config->nb_events_limit / 4;
+	if (dlb->num_dir_credits_override != -1)
+		rsrcs->num_dir_credits = dlb->num_dir_credits_override;
+
+	if (dlb_hw_create_sched_domain(handle, dlb, rsrcs) < 0) {
+		DLB_LOG_ERR("dlb_hw_create_sched_domain failed\n");
+		return -ENODEV;
+	}
+
+	dlb->new_event_limit = config->nb_events_limit;
+	rte_atomic32_set(&dlb->inflights, 0);
+
+	/* Save number of ports/queues for this event dev */
+	dlb->num_ports = config->nb_event_ports;
+	dlb->num_queues = config->nb_event_queues;
+	dlb->num_dir_ports = rsrcs->num_dir_ports;
+	dlb->num_ldb_ports = dlb->num_ports - dlb->num_dir_ports;
+	dlb->num_ldb_queues = dlb->num_queues - dlb->num_dir_ports;
+	dlb->num_dir_queues = dlb->num_dir_ports;
+	dlb->num_ldb_credits = rsrcs->num_ldb_credits;
+	dlb->num_dir_credits = rsrcs->num_dir_credits;
+
+	dlb->configured = true;
 
 	return 0;
 }
@@ -292,6 +681,18 @@ int dlb_string_to_int(int *result, const char *str)
 	return 0;
 }
 
+void
+dlb_entry_points_init(struct rte_eventdev *dev)
+{
+	static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
+		.dev_infos_get    = dlb_eventdev_info_get,
+		.dev_configure    = dlb_eventdev_configure,
+	};
+
+	/* Expose PMD's eventdev interface */
+	dev->dev_ops = &dlb_eventdev_entry_ops;
+
+}
 
 static void
 dlb_qm_mmio_fn_init(void)
-- 
1.7.10


  parent reply	other threads:[~2020-06-27  4:43 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-27  4:37 [dpdk-dev] [PATCH 00/27] event/dlb Intel DLB PMD Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 01/27] eventdev: dlb upstream prerequisites Tim McDaniel
2020-06-27  7:44   ` Jerin Jacob
2020-06-29 19:30     ` McDaniel, Timothy
2020-06-30  4:21       ` Jerin Jacob
2020-06-30 15:37         ` McDaniel, Timothy
2020-06-30 15:57           ` Jerin Jacob
2020-06-30 19:26             ` McDaniel, Timothy
2020-06-30 20:40               ` Pavan Nikhilesh Bhagavatula
2020-06-30 21:07                 ` McDaniel, Timothy
2020-07-01  4:50               ` Jerin Jacob
2020-07-01 16:48                 ` McDaniel, Timothy
2020-06-30 11:22     ` Kinsella, Ray
2020-06-30 11:30       ` Jerin Jacob
2020-06-30 11:36         ` Kinsella, Ray
2020-06-30 12:14           ` Jerin Jacob
2020-07-02 15:21             ` Kinsella, Ray
2020-07-02 16:35               ` McDaniel, Timothy
2020-06-27  4:37 ` [dpdk-dev] [PATCH 02/27] eventdev: do not pass disable_implicit_release bit to trace macro Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 03/27] event/dlb: add shared code version 10.7.9 Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 04/27] event/dlb: add make and meson build infrastructure Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 05/27] event/dlb: add DLB documentation Tim McDaniel
2020-07-09  3:29   ` Eads, Gage
2020-06-27  4:37 ` [dpdk-dev] [PATCH 06/27] event/dlb: add dynamic logging Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 07/27] event/dlb: add private data structures and constants Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 08/27] event/dlb: add definitions shared with LKM or shared code Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 09/27] event/dlb: add inline functions used in multiple files Tim McDaniel
2020-07-07 12:02   ` Bruce Richardson
2020-07-07 14:33     ` McDaniel, Timothy
2020-06-27  4:37 ` [dpdk-dev] [PATCH 10/27] event/dlb: add PFPMD-specific interface layer to shared code Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 11/27] event/dlb: add flexible PMD to device interfaces Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 12/27] event/dlb: add the PMD's public interfaces Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 13/27] event/dlb: add xstats support Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 14/27] event/dlb: add PMD self-tests Tim McDaniel
2020-07-10 20:42   ` Eads, Gage
2020-07-29 18:56     ` McDaniel, Timothy
2020-06-27  4:37 ` [dpdk-dev] [PATCH 15/27] event/dlb: add probe Tim McDaniel
2020-07-09  3:45   ` Eads, Gage
2020-06-27  4:37 ` Tim McDaniel [this message]
2020-06-27  4:37 ` [dpdk-dev] [PATCH 17/27] event/dlb: add queue_def_conf and port_def_conf Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 18/27] event/dlb: add queue setup Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 19/27] event/dlb: add port_setup Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 20/27] event/dlb: add port_link Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 21/27] event/dlb: add queue_release and port_release Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 22/27] event/dlb: add port_unlink and port_unlinks_in_progress Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 23/27] event/dlb: add eventdev_start Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 24/27] event/dlb: add timeout_ticks, dump, xstats, and selftest Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 25/27] event/dlb: add enqueue and its burst variants Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 26/27] event/dlb: add dequeue, dequeue_burst, and variants Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 27/27] event/dlb: add eventdev_stop and eventdev_close Tim McDaniel
     [not found] <1593232671-5690-0-git-send-email-timothy.mcdaniel@intel.com>
2020-07-30 19:49 ` [dpdk-dev] [PATCH 00/27] Add Intel DLM PMD to 20.11 McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 16/27] event/dlb: add infos_get and configure McDaniel, Timothy
  -- strict thread matches above, loose matches on Subject: below --
2020-06-12 21:24 [dpdk-dev] [PATCH 00/27] V1 event/dlb add Intel DLB PMD McDaniel, Timothy
2020-06-12 21:24 ` [dpdk-dev] [PATCH 16/27] event/dlb: add infos_get and configure McDaniel, Timothy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1593232671-5690-17-git-send-email-timothy.mcdaniel@intel.com \
    --to=timothy.mcdaniel@intel.com \
    --cc=dev@dpdk.org \
    --cc=gage.eads@intel.com \
    --cc=harry.van.haaren@intel.com \
    --cc=jerinj@marvell.com \
    --cc=mattias.ronnblom@ericsson.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).