From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8FD31A00BE; Fri, 12 Jun 2020 23:29:19 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 67EC41C1D3; Fri, 12 Jun 2020 23:26:46 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 40EE31BFB6 for ; Fri, 12 Jun 2020 23:26:30 +0200 (CEST) IronPort-SDR: 4geQr/V6FPIm4j+DsZJndktlZGT3xdeynpHwucAScT2yK75tkVYaokw+sGP2ZSVGDUbXZQEnY3 PyhDOO7bONKQ== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jun 2020 14:26:29 -0700 IronPort-SDR: WbidxJyT+/WuTDhDOpEdoTPUxyUVaTiocM8aIpjbfZfDWv7eyitJJWROF1SVybT52nnRtYx3zr BNQFfQ/Kvu1g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,504,1583222400"; d="scan'208";a="272035842" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by orsmga003.jf.intel.com with ESMTP; 12 Jun 2020 14:26:28 -0700 From: "McDaniel, Timothy" To: jerinj@marvell.com Cc: dev@dpdk.org, gage.eads@intel.com, harry.van.haaren@intel.com Date: Fri, 12 Jun 2020 16:24:22 -0500 Message-Id: <20200612212434.6852-16-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 2.13.6 In-Reply-To: <20200612212434.6852-1-timothy.mcdaniel@intel.com> References: <20200612212434.6852-1-timothy.mcdaniel@intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH 15/27] event/dlb: add probe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Change-Id: I4b8d0ca2bd5eb01d4ea8c4c6c9487b4f7116d68e Signed-off-by: McDaniel, Timothy --- drivers/event/dlb/dlb.c | 519 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 519 insertions(+) create mode 100644 drivers/event/dlb/dlb.c diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c new file mode 100644 index 000000000..124b86a1d --- /dev/null +++ b/drivers/event/dlb/dlb.c @@ -0,0 +1,519 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2020 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dlb_priv.h" +#include "dlb_iface.h" +#include "dlb_inline_fns.h" + +/* + * Resources exposed to eventdev. Some values overridden at runtime using + * values returned by the DLB kernel driver. + */ +#if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX) +#error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues" +#endif +static struct rte_event_dev_info evdev_dlb_default_info = { + .driver_name = "", /* probe will set */ + .min_dequeue_timeout_ns = DLB_MIN_DEQUEUE_TIMEOUT_NS, + .max_dequeue_timeout_ns = DLB_MAX_DEQUEUE_TIMEOUT_NS, +#if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB_MAX_NUM_LDB_QUEUES) + .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV, +#else + .max_event_queues = DLB_MAX_NUM_LDB_QUEUES, +#endif + .max_event_queue_flows = DLB_MAX_NUM_FLOWS, + .max_event_queue_priority_levels = DLB_QID_PRIORITIES, + .max_event_priority_levels = DLB_QID_PRIORITIES, + .max_event_ports = DLB_MAX_NUM_LDB_PORTS, + .max_event_port_dequeue_depth = DLB_MAX_CQ_DEPTH, + .max_event_port_enqueue_depth = DLB_MAX_ENQUEUE_DEPTH, + .max_event_port_links = DLB_MAX_NUM_QIDS_PER_LDB_CQ, + .max_num_events = DLB_MAX_NUM_LDB_CREDITS, + .max_single_link_event_port_queue_pairs = DLB_MAX_NUM_DIR_PORTS, + .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS | + RTE_EVENT_DEV_CAP_EVENT_QOS | + RTE_EVENT_DEV_CAP_BURST_MODE | + RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | + RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE | + RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES), +}; + +/* These functions will vary based on processor capabilities */ +static struct dlb_port_low_level_io_functions qm_mmio_fns; + +static int +dlb_hw_query_resources(struct dlb_eventdev *dlb) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_hw_resource_info *dlb_info = &handle->info; + int ret; + + /* Query driver resources provisioned for this VF */ + + ret = dlb_iface_get_num_resources(handle, + &dlb->hw_rsrc_query_results); + if (ret) { + DLB_LOG_ERR("ioctl get dlb num resources, err=%d\n", + ret); + return ret; + } + + /* Complete filling in device resource info returned to evdev app, + * overriding any default values. + * The capabilities (CAPs) were set at compile time. + */ + + evdev_dlb_default_info.max_event_queues = + dlb->hw_rsrc_query_results.num_ldb_queues; + + evdev_dlb_default_info.max_event_ports = + dlb->hw_rsrc_query_results.num_ldb_ports; + + evdev_dlb_default_info.max_num_events = + dlb->hw_rsrc_query_results.max_contiguous_ldb_credits; + + /* Save off values used when creating the scheduling domain. */ + + handle->info.num_sched_domains = + dlb->hw_rsrc_query_results.num_sched_domains; + + handle->info.hw_rsrc_max.nb_events_limit = + dlb->hw_rsrc_query_results.max_contiguous_ldb_credits; + + handle->info.hw_rsrc_max.num_queues = + dlb->hw_rsrc_query_results.num_ldb_queues + + dlb->hw_rsrc_query_results.num_dir_ports; + + handle->info.hw_rsrc_max.num_ldb_queues = + dlb->hw_rsrc_query_results.num_ldb_queues; + + handle->info.hw_rsrc_max.num_ldb_ports = + dlb->hw_rsrc_query_results.num_ldb_ports; + + handle->info.hw_rsrc_max.num_dir_ports = + dlb->hw_rsrc_query_results.num_dir_ports; + + handle->info.hw_rsrc_max.reorder_window_size = + dlb->hw_rsrc_query_results.num_hist_list_entries; + + rte_memcpy(dlb_info, &handle->info.hw_rsrc_max, sizeof(*dlb_info)); + + return 0; +} + +/* Wrapper for string to int conversion. Substituted for atoi(...), which is + * unsafe. + */ +#define RTE_BASE_10 10 +int dlb_string_to_int(int *result, const char *str) +{ + long ret; + + if (str == NULL || result == NULL) + return -EINVAL; + + errno = 0; + ret = strtol(str, NULL, RTE_BASE_10); + if (errno) + return -errno; + + /* long int and int may be different width for some architectures */ + if (ret < INT_MIN || ret > INT_MAX) + return -EINVAL; + + *result = ret; + return 0; +} + +int +set_numa_node(const char *key __rte_unused, const char *value, void *opaque) +{ + int *socket_id = opaque; + int ret; + + ret = dlb_string_to_int(socket_id, value); + if (ret < 0) + return ret; + + if (*socket_id > RTE_MAX_NUMA_NODES) + return -EINVAL; + + return 0; +} + +int +set_max_num_events(const char *key __rte_unused, + const char *value, + void *opaque) +{ + int *max_num_events = opaque; + int ret; + + if (value == NULL || opaque == NULL) { + DLB_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + ret = dlb_string_to_int(max_num_events, value); + if (ret < 0) + return ret; + + if (*max_num_events < 0 || *max_num_events > DLB_MAX_NUM_LDB_CREDITS) { + DLB_LOG_ERR("dlb: max_num_events must be between 0 and %d\n", + DLB_MAX_NUM_LDB_CREDITS); + return -EINVAL; + } + + return 0; +} + +int +set_num_dir_credits(const char *key __rte_unused, + const char *value, + void *opaque) +{ + int *num_dir_credits = opaque; + int ret; + + if (value == NULL || opaque == NULL) { + DLB_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + ret = dlb_string_to_int(num_dir_credits, value); + if (ret < 0) + return ret; + + if (*num_dir_credits < 0 || + *num_dir_credits > DLB_MAX_NUM_DIR_CREDITS) { + DLB_LOG_ERR("dlb: num_dir_credits must be between 0 and %d\n", + DLB_MAX_NUM_DIR_CREDITS); + return -EINVAL; + } + + return 0; +} + +static int +set_dev_id(const char *key __rte_unused, + const char *value, + void *opaque) +{ + int *dev_id = opaque; + int ret; + + if (value == NULL || opaque == NULL) { + DLB_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + ret = dlb_string_to_int(dev_id, value); + if (ret < 0) + return ret; + + return 0; +} + +static int +set_defer_sched(const char *key __rte_unused, + const char *value, + void *opaque) +{ + int *defer_sched = opaque; + + if (value == NULL || opaque == NULL) { + DLB_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + if (strncmp(value, "on", 2) != 0) { + DLB_LOG_ERR("Invalid defer_sched argument \"%s\" (expected \"on\")\n", + value); + return -EINVAL; + } + + *defer_sched = 1; + + return 0; +} + +static int +set_num_atm_inflights(const char *key __rte_unused, + const char *value, + void *opaque) +{ + int *num_atm_inflights = opaque; + int ret; + + if (value == NULL || opaque == NULL) { + DLB_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + ret = dlb_string_to_int(num_atm_inflights, value); + if (ret < 0) + return ret; + + if (*num_atm_inflights < 0 || + *num_atm_inflights > DLB_MAX_NUM_ATM_INFLIGHTS) { + DLB_LOG_ERR("dlb: atm_inflights must be between 0 and %d\n", + DLB_MAX_NUM_ATM_INFLIGHTS); + return -EINVAL; + } + + return 0; +} + + +static void +dlb_qm_mmio_fn_init(void) +{ + /* Process-local function pointers for performing low level port i/o */ + + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_MOVDIR64B)) + qm_mmio_fns.pp_enqueue_four = dlb_movdir64b; + else + qm_mmio_fns.pp_enqueue_four = dlb_movntdq; +} + +int +dlb_primary_eventdev_probe(struct rte_eventdev *dev, + const char *name, + struct dlb_devargs *dlb_args, + bool is_vdev) +{ + struct dlb_eventdev *dlb; + int err, i; + + dlb = dev->data->dev_private; + + dlb->event_dev = dev; /* backlink */ + dlb->is_vdev = is_vdev; /* vdev or pf */ + + evdev_dlb_default_info.driver_name = name; + + dlb->max_num_events_override = dlb_args->max_num_events; + dlb->num_dir_credits_override = dlb_args->num_dir_credits_override; + dlb->qm_instance.device_path_id = dlb_args->dev_id; + dlb->defer_sched = dlb_args->defer_sched; + dlb->num_atm_inflights_per_queue = dlb_args->num_atm_inflights; + + /* Open the interface. + * For vdev mode, this means open the dlb kernel module. + */ + err = dlb_iface_open(&dlb->qm_instance, name); + if (err < 0) { + DLB_LOG_ERR("could not open event hardware device, err=%d\n", + err); + return err; + } + + err = dlb_iface_get_device_version(&dlb->qm_instance, &dlb->revision); + if (err < 0) { + DLB_LOG_ERR("dlb: failed to get the device version, err=%d\n", + err); + return err; + } + + err = dlb_hw_query_resources(dlb); + if (err) { + DLB_LOG_ERR("get resources err=%d for %s\n", + err, name); + return err; + } + + err = dlb_iface_get_cq_poll_mode(&dlb->qm_instance, &dlb->poll_mode); + if (err < 0) { + DLB_LOG_ERR("dlb: failed to get the poll mode, err=%d\n", + err); + return err; + } + + /* Complete xtstats runtime initialization */ + err = dlb_xstats_init(dlb); + if (err) { + DLB_LOG_ERR("dlb: failed to init xstats, err=%d\n", err); + return err; + } + + /* Initialize each port's token pop mode */ + for (i = 0; i < DLB_MAX_NUM_PORTS; i++) + dlb->ev_ports[i].qm_port.token_pop_mode = AUTO_POP; + + rte_spinlock_init(&dlb->qm_instance.resource_lock); + + dlb_qm_mmio_fn_init(); + + dlb_iface_low_level_io_init(dlb); + + dlb_entry_points_init(dev); + + return 0; +} + +int +dlb_secondary_eventdev_probe(struct rte_eventdev *dev, + const char *name, + bool is_vdev) +{ + struct dlb_eventdev *dlb; + int err; + +RTE_SET_USED(is_vdev); + + dlb = dev->data->dev_private; + + evdev_dlb_default_info.driver_name = name; + + err = dlb_iface_open(&dlb->qm_instance, name); + if (err < 0) { + DLB_LOG_ERR("could not open event hardware device, err=%d\n", + err); + return err; + } + + err = dlb_hw_query_resources(dlb); + if (err) { + DLB_LOG_ERR("get resources err=%d for %s\n", + err, name); + return err; + } + + dlb_qm_mmio_fn_init(); + + dlb_iface_low_level_io_init(dlb); + + dlb_entry_points_init(dev); + + return 0; +} + +int +dlb_parse_params(const char *params, + const char *name, + struct dlb_devargs *dlb_args) +{ + int ret = 0; + static const char * const args[] = { NUMA_NODE_ARG, + DLB_MAX_NUM_EVENTS, + DLB_NUM_DIR_CREDITS, + DEV_ID_ARG, + DLB_DEFER_SCHED_ARG, + DLB_NUM_ATM_INFLIGHTS_ARG, + NULL }; + + if (params != NULL && params[0] != '\0') { + struct rte_kvargs *kvlist = rte_kvargs_parse(params, args); + + if (!kvlist) { + RTE_LOG(INFO, PMD, + "Ignoring unsupported parameters when creating device '%s'\n", + name); + } else { + int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG, + set_numa_node, + &dlb_args->socket_id); + if (ret != 0) { + DLB_LOG_ERR("%s: Error parsing numa node parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, DLB_MAX_NUM_EVENTS, + set_max_num_events, + &dlb_args->max_num_events); + if (ret != 0) { + DLB_LOG_ERR("%s: Error parsing max_num_events parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, + DLB_NUM_DIR_CREDITS, + set_num_dir_credits, + &dlb_args->num_dir_credits_override); + if (ret != 0) { + DLB_LOG_ERR("%s: Error parsing num_dir_credits parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, DEV_ID_ARG, + set_dev_id, + &dlb_args->dev_id); + if (ret != 0) { + DLB_LOG_ERR("%s: Error parsing dev_id parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, DLB_DEFER_SCHED_ARG, + set_defer_sched, + &dlb_args->defer_sched); + if (ret != 0) { + DLB_LOG_ERR("%s: Error parsing defer_sched parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, + DLB_NUM_ATM_INFLIGHTS_ARG, + set_num_atm_inflights, + &dlb_args->num_atm_inflights); + if (ret != 0) { + DLB_LOG_ERR("%s: Error parsing atm_inflights parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + rte_kvargs_free(kvlist); + } + } + return ret; +} + +/* declared extern in header, for access from other .c files */ +int eventdev_dlb_log_level; + +RTE_INIT(evdev_dlb_init_log) +{ + eventdev_dlb_log_level = rte_log_register("pmd.event.dlb"); + if (eventdev_dlb_log_level >= 0) + rte_log_set_level(eventdev_dlb_log_level, RTE_LOG_NOTICE); +} -- 2.13.6