From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C0DEEA00BE; Fri, 12 Jun 2020 23:29:28 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id CEE931C0D9; Fri, 12 Jun 2020 23:26:47 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id DF5711BFA9 for ; Fri, 12 Jun 2020 23:26:30 +0200 (CEST) IronPort-SDR: skNFZnOeyuszCXDVngItqanJvjLd2q665K92++5B4Jlj8hHrJlo8y1MvqNGjX+5EXaY9KOjqM5 o+4YLlwJp9wA== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jun 2020 14:26:30 -0700 IronPort-SDR: w3E5fu2OiQySOD/ENP5oAW6OSqxuIJOgX1ex3UPW+dtNKlD1Yn50CfIb1hjfMOdvXXfvKlroRZ YeviH6LXQSkA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,504,1583222400"; d="scan'208";a="272035847" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by orsmga003.jf.intel.com with ESMTP; 12 Jun 2020 14:26:30 -0700 From: "McDaniel, Timothy" To: jerinj@marvell.com Cc: dev@dpdk.org, gage.eads@intel.com, harry.van.haaren@intel.com Date: Fri, 12 Jun 2020 16:24:23 -0500 Message-Id: <20200612212434.6852-17-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 2.13.6 In-Reply-To: <20200612212434.6852-1-timothy.mcdaniel@intel.com> References: <20200612212434.6852-1-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH 16/27] event/dlb: add infos_get and configure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Change-Id: I749ae914852bc3516601301c2b1fb338ba7508f7 Signed-off-by: McDaniel, Timothy --- drivers/event/dlb/dlb.c | 401 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 401 insertions(+) diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c index 124b86a1d..c21ebe7e2 100644 --- a/drivers/event/dlb/dlb.c +++ b/drivers/event/dlb/dlb.c @@ -220,6 +220,394 @@ set_num_dir_credits(const char *key __rte_unused, DLB_MAX_NUM_DIR_CREDITS); return -EINVAL; } + return 0; +} + +/* VDEV-only notes: + * This function first unmaps all memory mappings and closes the + * domain's file descriptor, which causes the driver to reset the + * scheduling domain. Once that completes (when close() returns), we + * can safely free the dynamically allocated memory used by the + * scheduling domain. + * + * PF-only notes: + * We will maintain a use count and use that to determine when + * a reset is required. In PF mode, we never mmap, or munmap + * device memory, and we own the entire physical PCI device. + */ + +static void +dlb_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + enum dlb_configuration_state config_state; + int i, j; + + /* Close and reset the domain */ + dlb_iface_domain_close(dlb); + + /* Free all dynamically allocated port memory */ + for (i = 0; i < dlb->num_ports; i++) + dlb_free_qe_mem(&dlb->ev_ports[i].qm_port); + + /* If reconfiguring, mark the device's queues and ports as "previously + * configured." If the user doesn't reconfigure them, the PMD will + * reapply their previous configuration when the device is started. + */ + config_state = (reconfig) ? DLB_PREV_CONFIGURED : DLB_NOT_CONFIGURED; + + for (i = 0; i < dlb->num_ports; i++) { + dlb->ev_ports[i].qm_port.config_state = config_state; + /* Reset setup_done so ports can be reconfigured */ + dlb->ev_ports[i].setup_done = false; + for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) + dlb->ev_ports[i].link[j].mapped = false; + } + + for (i = 0; i < dlb->num_queues; i++) + dlb->ev_queues[i].qm_queue.config_state = config_state; + + for (i = 0; i < DLB_MAX_NUM_QUEUES; i++) + dlb->ev_queues[i].setup_done = false; + + dlb->num_ports = 0; + dlb->num_ldb_ports = 0; + dlb->num_dir_ports = 0; + dlb->num_queues = 0; + dlb->num_ldb_queues = 0; + dlb->num_dir_queues = 0; + dlb->configured = false; +} + +static int +dlb_ldb_credit_pool_create(struct dlb_hw_dev *handle) +{ + struct dlb_create_ldb_pool_args cfg; + struct dlb_cmd_response response; + int ret; + + if (handle == NULL) + return -EINVAL; + + if (!handle->cfg.resources.num_ldb_credits) { + handle->cfg.ldb_credit_pool_id = 0; + handle->cfg.num_ldb_credits = 0; + return 0; + } + + cfg.response = (uintptr_t)&response; + cfg.num_ldb_credits = handle->cfg.resources.num_ldb_credits; + + ret = dlb_iface_ldb_credit_pool_create(handle, + &cfg); + if (ret < 0) { + DLB_LOG_ERR("dlb: ldb_credit_pool_create ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + } + + handle->cfg.ldb_credit_pool_id = response.id; + handle->cfg.num_ldb_credits = cfg.num_ldb_credits; + + return ret; +} + +static int +dlb_dir_credit_pool_create(struct dlb_hw_dev *handle) +{ + struct dlb_create_dir_pool_args cfg; + struct dlb_cmd_response response; + int ret; + + if (handle == NULL) + return -EINVAL; + + if (!handle->cfg.resources.num_dir_credits) { + handle->cfg.dir_credit_pool_id = 0; + handle->cfg.num_dir_credits = 0; + return 0; + } + + cfg.response = (uintptr_t)&response; + cfg.num_dir_credits = handle->cfg.resources.num_dir_credits; + + ret = dlb_iface_dir_credit_pool_create(handle, + &cfg); + if (ret < 0) + DLB_LOG_ERR("dlb: dir_credit_pool_create ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + + handle->cfg.dir_credit_pool_id = response.id; + handle->cfg.num_dir_credits = cfg.num_dir_credits; + + return ret; +} + +static int +dlb_hw_create_sched_domain(struct dlb_hw_dev *handle, + struct dlb_eventdev *dlb, + const struct dlb_hw_rsrcs *resources_asked) +{ + int ret = 0; + struct dlb_create_sched_domain_args *config_params; + struct dlb_cmd_response response; + + if (resources_asked == NULL) { + DLB_LOG_ERR("dlb: dlb_create NULL parameter\n"); + ret = EINVAL; + goto error_exit; + } + + /* Map generic qm resources to dlb resources */ + config_params = &handle->cfg.resources; + + config_params->response = (uintptr_t)&response; + + /* DIR ports and queues */ + + config_params->num_dir_ports = + resources_asked->num_dir_ports; + + config_params->num_dir_credits = + resources_asked->num_dir_credits; + + /* LDB ports and queues */ + + config_params->num_ldb_queues = + resources_asked->num_ldb_queues; + + config_params->num_ldb_ports = + resources_asked->num_ldb_ports; + + config_params->num_ldb_credits = + resources_asked->num_ldb_credits; + + config_params->num_atomic_inflights = + dlb->num_atm_inflights_per_queue * + config_params->num_ldb_queues; + + config_params->num_hist_list_entries = config_params->num_ldb_ports * + DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT; + + /* dlb limited to 1 credit pool per queue type */ + config_params->num_ldb_credit_pools = 1; + config_params->num_dir_credit_pools = 1; + + DLB_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d, ldb_cred_pools=%d, dir-credit_pools=%d\n", + config_params->num_ldb_queues, + config_params->num_ldb_ports, + config_params->num_dir_ports, + config_params->num_atomic_inflights, + config_params->num_hist_list_entries, + config_params->num_ldb_credits, + config_params->num_dir_credits, + config_params->num_ldb_credit_pools, + config_params->num_dir_credit_pools); + + /* Configure the QM */ + + ret = dlb_iface_sched_domain_create(handle, config_params); + if (ret < 0) { + DLB_LOG_ERR("dlb: domain create failed, device_id = %d, (driver ret = %d, extra status: %s)\n", + handle->device_id, + ret, + dlb_error_strings[response.status]); + goto error_exit; + } + + handle->domain_id = response.id; + handle->domain_id_valid = 1; + + config_params->response = 0; + + ret = dlb_ldb_credit_pool_create(handle); + if (ret < 0) { + DLB_LOG_ERR("dlb: create ldb credit pool failed\n"); + goto error_exit2; + } + + ret = dlb_dir_credit_pool_create(handle); + if (ret < 0) { + DLB_LOG_ERR("dlb: create dir credit pool failed\n"); + goto error_exit2; + } + + handle->cfg.configured = true; + + return 0; + +error_exit2: + dlb_iface_domain_close(dlb); + +error_exit: + + return ret; +} + +/* End HW specific */ +static void +dlb_eventdev_info_get(struct rte_eventdev *dev, + struct rte_event_dev_info *dev_info) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + int ret; + + ret = dlb_hw_query_resources(dlb); + if (ret) { + const struct rte_eventdev_data *data = dev->data; + + DLB_LOG_ERR("get resources err=%d, devid=%d\n", + ret, data->dev_id); + /* fn is void, so fall through and return values set up in + * probe + */ + } + + /* Add num resources currently owned by this domain. + * These would become available if the scheduling domain were reset due + * to the application recalling eventdev_configure to *reconfigure* the + * domain. + */ + evdev_dlb_default_info.max_event_ports += dlb->num_ldb_ports; + evdev_dlb_default_info.max_event_queues += dlb->num_ldb_queues; + evdev_dlb_default_info.max_num_events += dlb->num_ldb_credits; + + /* In DLB A-stepping hardware, applications are limited to 128 + * configured ports (load-balanced or directed). The reported number of + * available ports must reflect this. + */ + if (dlb->revision < DLB_REV_B0) { + int used_ports; + + used_ports = DLB_MAX_NUM_LDB_PORTS + DLB_MAX_NUM_DIR_PORTS - + dlb->hw_rsrc_query_results.num_ldb_ports - + dlb->hw_rsrc_query_results.num_dir_ports; + + evdev_dlb_default_info.max_event_ports = + RTE_MIN(evdev_dlb_default_info.max_event_ports, + 128 - used_ports); + } + + evdev_dlb_default_info.max_event_queues = + RTE_MIN(evdev_dlb_default_info.max_event_queues, + RTE_EVENT_MAX_QUEUES_PER_DEV); + + evdev_dlb_default_info.max_num_events = + RTE_MIN(evdev_dlb_default_info.max_num_events, + dlb->max_num_events_override); + + *dev_info = evdev_dlb_default_info; +} + +/* Note: 1 QM instance per QM device, QM instance/device == event device */ +static int +dlb_eventdev_configure(const struct rte_eventdev *dev) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max; + const struct rte_eventdev_data *data = dev->data; + const struct rte_event_dev_config *config = &data->dev_conf; + int ret; + + /* If this eventdev is already configured, we must release the current + * scheduling domain before attempting to configure a new one. + */ + if (dlb->configured) { + dlb_hw_reset_sched_domain(dev, true); + + ret = dlb_hw_query_resources(dlb); + if (ret) { + DLB_LOG_ERR("get resources err=%d, devid=%d\n", + ret, data->dev_id); + return ret; + } + } + + if (config->nb_event_queues > rsrcs->num_queues) { + DLB_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n", + config->nb_event_queues, + rsrcs->num_queues); + return -EINVAL; + } + if (config->nb_event_ports > (rsrcs->num_ldb_ports + + rsrcs->num_dir_ports)) { + DLB_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n", + config->nb_event_ports, + (rsrcs->num_ldb_ports + rsrcs->num_dir_ports)); + return -EINVAL; + } + if (config->nb_events_limit > rsrcs->nb_events_limit) { + DLB_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n", + config->nb_events_limit, + rsrcs->nb_events_limit); + return -EINVAL; + } + + if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { + dlb->global_dequeue_wait = false; + } else { + uint32_t timeout32; + + dlb->global_dequeue_wait = true; + + timeout32 = config->dequeue_timeout_ns; + + /* PF PMD does not support interrupts, and + * UMONITOR wait is temporarily disabled. + */ + + dlb->global_dequeue_wait_ticks = + timeout32 * (rte_get_timer_hz() / 1E9); + } + + /* Does this platform support umonitor/umwait? */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_UMWAIT)) { + if (RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 0 && + RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE != 1) { + DLB_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE must be 0 or 1.\n", + RTE_LIBRTE_PMD_DLB_UMWAIT_CTL_STATE); + return -EINVAL; + } + dlb->umwait_allowed = true; + } + + rsrcs->num_dir_ports = config->nb_single_link_event_port_queues; + rsrcs->num_ldb_ports = config->nb_event_ports - rsrcs->num_dir_ports; + /* 1 dir queue per dir port */ + rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports; + + /* Scale down nb_events_limit by 4 for directed credits, since there + * are 4x as many load-balanced credits. + */ + rsrcs->num_ldb_credits = 0; + rsrcs->num_dir_credits = 0; + + if (rsrcs->num_ldb_queues) + rsrcs->num_ldb_credits = config->nb_events_limit; + if (rsrcs->num_dir_ports) + rsrcs->num_dir_credits = config->nb_events_limit / 4; + if (dlb->num_dir_credits_override != -1) + rsrcs->num_dir_credits = dlb->num_dir_credits_override; + + if (dlb_hw_create_sched_domain(handle, dlb, rsrcs) < 0) { + DLB_LOG_ERR("dlb_hw_create_sched_domain failed\n"); + return -ENODEV; + } + + dlb->new_event_limit = config->nb_events_limit; + rte_atomic32_set(&dlb->inflights, 0); + + /* Save number of ports/queues for this event dev */ + dlb->num_ports = config->nb_event_ports; + dlb->num_queues = config->nb_event_queues; + dlb->num_dir_ports = rsrcs->num_dir_ports; + dlb->num_ldb_ports = dlb->num_ports - dlb->num_dir_ports; + dlb->num_ldb_queues = dlb->num_queues - dlb->num_dir_ports; + dlb->num_dir_queues = dlb->num_dir_ports; + dlb->num_ldb_credits = rsrcs->num_ldb_credits; + dlb->num_dir_credits = rsrcs->num_dir_credits; + + dlb->configured = true; return 0; } @@ -294,6 +682,19 @@ set_num_atm_inflights(const char *key __rte_unused, return 0; } +void +dlb_entry_points_init(struct rte_eventdev *dev) +{ + static struct rte_eventdev_ops dlb_eventdev_entry_ops = { + .dev_infos_get = dlb_eventdev_info_get, + .dev_configure = dlb_eventdev_configure, + }; + + /* Expose PMD's eventdev interface */ + + dev->dev_ops = &dlb_eventdev_entry_ops; + +} static void dlb_qm_mmio_fn_init(void) -- 2.13.6