From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 819D8A04DC; Sat, 31 Oct 2020 18:31:00 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9F1C8AC8D; Sat, 31 Oct 2020 18:25:24 +0100 (CET) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id E70472BE2 for ; Sat, 31 Oct 2020 18:24:50 +0100 (CET) IronPort-SDR: qjGse8lsOIXudv7OhuJo3XdFOYZDePXtUe6FfXWWBbDEc/pch86n5+jYwisN4w2lntkFS7xU9E wplLDwkdnDtA== X-IronPort-AV: E=McAfee;i="6000,8403,9791"; a="148597804" X-IronPort-AV: E=Sophos;i="5.77,438,1596524400"; d="scan'208";a="148597804" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 31 Oct 2020 10:24:49 -0700 IronPort-SDR: OYOBmhzaQiJOil3Db6BJqFlRRnviqWWKy2le9+bznLqKohq8pl/DQgvNMTGRFoe4v+OtBSgExV yKkg1ItXQrGQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,438,1596524400"; d="scan'208";a="324398058" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by orsmga006.jf.intel.com with ESMTP; 31 Oct 2020 10:24:49 -0700 From: Timothy McDaniel To: Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com, harry.van.haaren@intel.com, jerinj@marvell.com, thomas@monjalon.net Date: Sat, 31 Oct 2020 12:26:14 -0500 Message-Id: <1604165181-19929-17-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1604165181-19929-1-git-send-email-timothy.mcdaniel@intel.com> References: <1602958879-8558-2-git-send-email-timothy.mcdaniel@intel.com> <1604165181-19929-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH v9 16/23] event/dlb2: add eventdev start X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for the eventdev start entry point. We delay initializing some resources until eventdev start, since the number of linked queues can be used to determine if we are dealing with a ldb or dir resource. If this is a device restart, then the previous configuration will be reapplied. Signed-off-by: Timothy McDaniel Reviewed-by: Gage Eads --- drivers/event/dlb2/dlb2.c | 133 +++++++++++++++++++++++++++++ drivers/event/dlb2/dlb2_iface.c | 3 + drivers/event/dlb2/dlb2_iface.h | 3 + drivers/event/dlb2/pf/base/dlb2_resource.c | 123 ++++++++++++++++++++++++++ drivers/event/dlb2/pf/dlb2_main.c | 10 +++ drivers/event/dlb2/pf/dlb2_pf.c | 25 ++++++ 6 files changed, 297 insertions(+) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index c221a61..051836d 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -1957,6 +1957,138 @@ dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev, return cfg.response.id; } +static int +dlb2_eventdev_reapply_configuration(struct rte_eventdev *dev) +{ + struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev); + int ret, i; + + /* If an event queue or port was previously configured, but hasn't been + * reconfigured, reapply its original configuration. + */ + for (i = 0; i < dlb2->num_queues; i++) { + struct dlb2_eventdev_queue *ev_queue; + + ev_queue = &dlb2->ev_queues[i]; + + if (ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED) + continue; + + ret = dlb2_eventdev_queue_setup(dev, i, &ev_queue->conf); + if (ret < 0) { + DLB2_LOG_ERR("dlb2: failed to reconfigure queue %d", i); + return ret; + } + } + + for (i = 0; i < dlb2->num_ports; i++) { + struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i]; + + if (ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) + continue; + + ret = dlb2_eventdev_port_setup(dev, i, &ev_port->conf); + if (ret < 0) { + DLB2_LOG_ERR("dlb2: failed to reconfigure ev_port %d", + i); + return ret; + } + } + + return 0; +} + +static int +dlb2_eventdev_apply_port_links(struct rte_eventdev *dev) +{ + struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev); + int i; + + /* Perform requested port->queue links */ + for (i = 0; i < dlb2->num_ports; i++) { + struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i]; + int j; + + for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) { + struct dlb2_eventdev_queue *ev_queue; + uint8_t prio, queue_id; + + if (!ev_port->link[j].valid) + continue; + + prio = ev_port->link[j].priority; + queue_id = ev_port->link[j].queue_id; + + if (dlb2_validate_port_link(ev_port, queue_id, true, j)) + return -EINVAL; + + ev_queue = &dlb2->ev_queues[queue_id]; + + if (dlb2_do_port_link(dev, ev_queue, ev_port, prio)) + return -EINVAL; + } + } + + return 0; +} + +static int +dlb2_eventdev_start(struct rte_eventdev *dev) +{ + struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev); + struct dlb2_hw_dev *handle = &dlb2->qm_instance; + struct dlb2_start_domain_args cfg; + int ret, i; + + rte_spinlock_lock(&dlb2->qm_instance.resource_lock); + if (dlb2->run_state != DLB2_RUN_STATE_STOPPED) { + DLB2_LOG_ERR("bad state %d for dev_start\n", + (int)dlb2->run_state); + rte_spinlock_unlock(&dlb2->qm_instance.resource_lock); + return -EINVAL; + } + dlb2->run_state = DLB2_RUN_STATE_STARTING; + rte_spinlock_unlock(&dlb2->qm_instance.resource_lock); + + /* If the device was configured more than once, some event ports and/or + * queues may need to be reconfigured. + */ + ret = dlb2_eventdev_reapply_configuration(dev); + if (ret) + return ret; + + /* The DLB PMD delays port links until the device is started. */ + ret = dlb2_eventdev_apply_port_links(dev); + if (ret) + return ret; + + for (i = 0; i < dlb2->num_ports; i++) { + if (!dlb2->ev_ports[i].setup_done) { + DLB2_LOG_ERR("dlb2: port %d not setup", i); + return -ESTALE; + } + } + + for (i = 0; i < dlb2->num_queues; i++) { + if (dlb2->ev_queues[i].num_links == 0) { + DLB2_LOG_ERR("dlb2: queue %d is not linked", i); + return -ENOLINK; + } + } + + ret = dlb2_iface_sched_domain_start(handle, &cfg); + if (ret < 0) { + DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)\n", + ret, dlb2_error_strings[cfg.response.status]); + return ret; + } + + dlb2->run_state = DLB2_RUN_STATE_STARTED; + DLB2_LOG_DBG("dlb2: sched_domain_start completed OK\n"); + + return 0; +} + static void dlb2_entry_points_init(struct rte_eventdev *dev) { @@ -1964,6 +2096,7 @@ dlb2_entry_points_init(struct rte_eventdev *dev) static struct rte_eventdev_ops dlb2_eventdev_entry_ops = { .dev_infos_get = dlb2_eventdev_info_get, .dev_configure = dlb2_eventdev_configure, + .dev_start = dlb2_eventdev_start, .queue_def_conf = dlb2_eventdev_queue_default_conf_get, .queue_setup = dlb2_eventdev_queue_setup, .port_def_conf = dlb2_eventdev_port_default_conf_get, diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c index bd241f3..a86191d 100644 --- a/drivers/event/dlb2/dlb2_iface.c +++ b/drivers/event/dlb2/dlb2_iface.c @@ -63,3 +63,6 @@ int (*dlb2_iface_unmap_qid)(struct dlb2_hw_dev *handle, int (*dlb2_iface_pending_port_unmaps)(struct dlb2_hw_dev *handle, struct dlb2_pending_port_unmaps_args *args); + +int (*dlb2_iface_sched_domain_start)(struct dlb2_hw_dev *handle, + struct dlb2_start_domain_args *cfg); diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h index e892036..7ead374 100644 --- a/drivers/event/dlb2/dlb2_iface.h +++ b/drivers/event/dlb2/dlb2_iface.h @@ -62,4 +62,7 @@ extern int (*dlb2_iface_unmap_qid)(struct dlb2_hw_dev *handle, extern int (*dlb2_iface_pending_port_unmaps)(struct dlb2_hw_dev *handle, struct dlb2_pending_port_unmaps_args *args); + +extern int (*dlb2_iface_sched_domain_start)(struct dlb2_hw_dev *handle, + struct dlb2_start_domain_args *cfg); #endif /* _DLB2_IFACE_H_ */ diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index 4a3d96d..c14a2f3 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -5811,3 +5811,126 @@ int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw, return 0; } + +static int dlb2_verify_start_domain_args(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + + if (domain == NULL) { + resp->status = DLB2_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + if (!domain->configured) { + resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED; + return -EINVAL; + } + + if (domain->started) { + resp->status = DLB2_ST_DOMAIN_STARTED; + return -EINVAL; + } + + return 0; +} + +static void dlb2_log_start_domain(struct dlb2_hw *hw, + u32 domain_id, + bool vdev_req, + unsigned int vdev_id) +{ + DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n"); + if (vdev_req) + DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id); + DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id); +} + +/** + * dlb2_hw_start_domain() - Lock the domain configuration + * @hw: Contains the current state of the DLB2 hardware. + * @domain_id: Domain ID + * @arg: User-provided arguments (unused, here for ioctl callback template). + * @resp: Response to user. + * @vdev_req: Request came from a virtual device. + * @vdev_id: If vdev_req is true, this contains the virtual device's ID. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int +dlb2_hw_start_domain(struct dlb2_hw *hw, + u32 domain_id, + __attribute((unused)) struct dlb2_start_domain_args *arg, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_list_entry *iter; + struct dlb2_dir_pq_pair *dir_queue; + struct dlb2_ldb_queue *ldb_queue; + struct dlb2_hw_domain *domain; + int ret; + RTE_SET_USED(arg); + RTE_SET_USED(iter); + + dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id); + + ret = dlb2_verify_start_domain_args(hw, + domain_id, + resp, + vdev_req, + vdev_id); + if (ret) + return ret; + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + if (domain == NULL) { + DLB2_HW_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + /* + * Enable load-balanced and directed queue write permissions for the + * queues this domain owns. Without this, the DLB2 will drop all + * incoming traffic to those queues. + */ + DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) { + union dlb2_sys_ldb_vasqid_v r0 = { {0} }; + unsigned int offs; + + r0.field.vasqid_v = 1; + + offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES + + ldb_queue->id.phys_id; + + DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r0.val); + } + + DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) { + union dlb2_sys_dir_vasqid_v r0 = { {0} }; + unsigned int offs; + + r0.field.vasqid_v = 1; + + offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS + + dir_queue->id.phys_id; + + DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val); + } + + dlb2_flush_csr(hw); + + domain->started = true; + + resp->status = 0; + + return 0; +} diff --git a/drivers/event/dlb2/pf/dlb2_main.c b/drivers/event/dlb2/pf/dlb2_main.c index a010e25..aee8336 100644 --- a/drivers/event/dlb2/pf/dlb2_main.c +++ b/drivers/event/dlb2/pf/dlb2_main.c @@ -661,3 +661,13 @@ dlb2_pf_create_dir_queue(struct dlb2_hw *hw, return dlb2_hw_create_dir_queue(hw, id, args, resp, NOT_VF_REQ, PF_ID_ZERO); } + +int +dlb2_pf_start_domain(struct dlb2_hw *hw, + u32 id, + struct dlb2_start_domain_args *args, + struct dlb2_cmd_response *resp) +{ + return dlb2_hw_start_domain(hw, id, args, resp, NOT_VF_REQ, + PF_ID_ZERO); +} diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index 33ea73e..fff2e57 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -503,6 +503,30 @@ dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle, return ret; } + +static int +dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle, + struct dlb2_start_domain_args *cfg) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + struct dlb2_cmd_response response = {0}; + int ret; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_pf_start_domain(&dlb2_dev->hw, + handle->domain_id, + cfg, + &response); + + cfg->response = response; + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + static void dlb2_pf_iface_fn_ptrs_init(void) { @@ -520,6 +544,7 @@ dlb2_pf_iface_fn_ptrs_init(void) dlb2_iface_dir_port_create = dlb2_pf_dir_port_create; dlb2_iface_map_qid = dlb2_pf_map_qid; dlb2_iface_unmap_qid = dlb2_pf_unmap_qid; + dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start; dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps; dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation; dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation; -- 2.6.4