From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 83C78A04DD; Fri, 23 Oct 2020 20:34:39 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 13E4272F2; Fri, 23 Oct 2020 20:29:10 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 539435AB5 for ; Fri, 23 Oct 2020 20:28:48 +0200 (CEST) IronPort-SDR: fESoW7jx8KD+kF2G6VJ+hB9jS84Rst0C0zdwqJSSbclmL45+GJBArk0PPoV7hi71LMl95wpJWM xwGRMYcgRKDA== X-IronPort-AV: E=McAfee;i="6000,8403,9783"; a="231907482" X-IronPort-AV: E=Sophos;i="5.77,409,1596524400"; d="scan'208";a="231907482" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Oct 2020 11:28:47 -0700 IronPort-SDR: XBp6VGupijlyvJuAtuxS33zdaRbPqRFEV/HvckZTz9w+6aD1vvqSiZ8lWL6nnG4HJ3IHLxatEW n6ByqSlf6Q5A== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,409,1596524400"; d="scan'208";a="524764073" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by fmsmga005.fm.intel.com with ESMTP; 23 Oct 2020 11:28:46 -0700 From: Timothy McDaniel To: Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com, harry.van.haaren@intel.com, jerinj@marvell.com Date: Fri, 23 Oct 2020 13:30:18 -0500 Message-Id: <1603477826-31374-16-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1603477826-31374-1-git-send-email-timothy.mcdaniel@intel.com> References: <1599855987-25976-2-git-send-email-timothy.mcdaniel@intel.com> <1603477826-31374-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH v3 15/23] event/dlb2: add port unlink and port unlinks in progress X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add supports for the port unlink(s) eventdev entry points. The unlink operation is an asynchronous operation executed by a control thread, and the unlinks-in-progress function reads a counter shared with the control thread. Signed-off-by: Timothy McDaniel Reviewed-by: Gage Eads --- drivers/event/dlb2/dlb2.c | 163 +++++++++++++++++ drivers/event/dlb2/dlb2_iface.c | 6 + drivers/event/dlb2/dlb2_iface.h | 6 + drivers/event/dlb2/pf/base/dlb2_resource.c | 283 +++++++++++++++++++++++++++++ drivers/event/dlb2/pf/dlb2_pf.c | 51 ++++++ 5 files changed, 509 insertions(+) diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index c6593c7..4ef7b6d 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -1797,6 +1797,166 @@ dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port, return i; } +static int16_t +dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle, + uint32_t qm_port_id, + uint16_t qm_qid) +{ + struct dlb2_unmap_qid_args cfg; + int32_t ret; + + if (handle == NULL) + return -EINVAL; + + cfg.port_id = qm_port_id; + cfg.qid = qm_qid; + + ret = dlb2_iface_unmap_qid(handle, &cfg); + if (ret < 0) + DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)\n", + ret, dlb2_error_strings[cfg.response.status]); + + return ret; +} + +static int +dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2, + struct dlb2_eventdev_port *ev_port, + struct dlb2_eventdev_queue *ev_queue) +{ + int ret, i; + + /* Don't unlink until start time. */ + if (dlb2->run_state == DLB2_RUN_STATE_STOPPED) + return 0; + + for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) { + if (ev_port->link[i].valid && + ev_port->link[i].queue_id == ev_queue->id) + break; /* found */ + } + + /* This is expected with eventdev API! + * It blindly attemmpts to unmap all queues. + */ + if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) { + DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n", + ev_queue->qm_queue.id, + ev_port->qm_port.id); + return 0; + } + + ret = dlb2_hw_unmap_ldb_qid_from_port(&dlb2->qm_instance, + ev_port->qm_port.id, + ev_queue->qm_queue.id); + if (!ret) + ev_port->link[i].mapped = false; + + return ret; +} + +static int +dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port, + uint8_t queues[], uint16_t nb_unlinks) +{ + struct dlb2_eventdev_port *ev_port = event_port; + struct dlb2_eventdev *dlb2; + int i; + + RTE_SET_USED(dev); + + if (!ev_port->setup_done) { + DLB2_LOG_ERR("dlb2: evport %d is not configured\n", + ev_port->id); + rte_errno = -EINVAL; + return 0; + } + + if (queues == NULL || nb_unlinks == 0) { + DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0\n"); + return 0; /* Ignore and return success */ + } + + /* FIXME: How to handle unlink on a directed port? */ + if (ev_port->qm_port.is_directed) { + DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n", + ev_port->id); + rte_errno = 0; + return nb_unlinks; /* as if success */ + } + + dlb2 = ev_port->dlb2; + + for (i = 0; i < nb_unlinks; i++) { + struct dlb2_eventdev_queue *ev_queue; + int ret, j; + + if (queues[i] >= dlb2->num_queues) { + DLB2_LOG_ERR("dlb2: invalid queue id %d\n", queues[i]); + rte_errno = -EINVAL; + return i; /* return index of offending queue */ + } + + ev_queue = &dlb2->ev_queues[queues[i]]; + + /* Does a link exist? */ + for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) + if (ev_port->link[j].queue_id == queues[i] && + ev_port->link[j].valid) + break; + + if (j == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) + continue; + + ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue); + if (ret) { + DLB2_LOG_ERR("unlink err=%d for port %d queue %d\n", + ret, ev_port->id, queues[i]); + rte_errno = -ENOENT; + return i; /* return index of offending queue */ + } + + ev_port->link[j].valid = false; + ev_port->num_links--; + ev_queue->num_links--; + } + + return nb_unlinks; +} + +static int +dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev, + void *event_port) +{ + struct dlb2_eventdev_port *ev_port = event_port; + struct dlb2_eventdev *dlb2; + struct dlb2_hw_dev *handle; + struct dlb2_pending_port_unmaps_args cfg; + int ret; + + RTE_SET_USED(dev); + + if (!ev_port->setup_done) { + DLB2_LOG_ERR("dlb2: evport %d is not configured\n", + ev_port->id); + rte_errno = -EINVAL; + return 0; + } + + cfg.port_id = ev_port->qm_port.id; + dlb2 = ev_port->dlb2; + handle = &dlb2->qm_instance; + ret = dlb2_iface_pending_port_unmaps(handle, &cfg); + + if (ret < 0) { + DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)\n", + ret, dlb2_error_strings[cfg.response.status]); + return ret; + } + + return cfg.response.id; +} + static void dlb2_entry_points_init(struct rte_eventdev *dev) { @@ -1809,6 +1969,9 @@ dlb2_entry_points_init(struct rte_eventdev *dev) .port_def_conf = dlb2_eventdev_port_default_conf_get, .port_setup = dlb2_eventdev_port_setup, .port_link = dlb2_eventdev_port_link, + .port_unlink = dlb2_eventdev_port_unlink, + .port_unlinks_in_progress = + dlb2_eventdev_port_unlinks_in_progress, .dump = dlb2_eventdev_dump, .xstats_get = dlb2_eventdev_xstats_get, .xstats_get_names = dlb2_eventdev_xstats_get_names, diff --git a/drivers/event/dlb2/dlb2_iface.c b/drivers/event/dlb2/dlb2_iface.c index 7d1ba73..bd241f3 100644 --- a/drivers/event/dlb2/dlb2_iface.c +++ b/drivers/event/dlb2/dlb2_iface.c @@ -57,3 +57,9 @@ int (*dlb2_iface_dir_queue_create)(struct dlb2_hw_dev *handle, int (*dlb2_iface_map_qid)(struct dlb2_hw_dev *handle, struct dlb2_map_qid_args *cfg); + +int (*dlb2_iface_unmap_qid)(struct dlb2_hw_dev *handle, + struct dlb2_unmap_qid_args *cfg); + +int (*dlb2_iface_pending_port_unmaps)(struct dlb2_hw_dev *handle, + struct dlb2_pending_port_unmaps_args *args); diff --git a/drivers/event/dlb2/dlb2_iface.h b/drivers/event/dlb2/dlb2_iface.h index 4eddd14..e892036 100644 --- a/drivers/event/dlb2/dlb2_iface.h +++ b/drivers/event/dlb2/dlb2_iface.h @@ -56,4 +56,10 @@ extern int (*dlb2_iface_dir_queue_create)(struct dlb2_hw_dev *handle, extern int (*dlb2_iface_map_qid)(struct dlb2_hw_dev *handle, struct dlb2_map_qid_args *cfg); + +extern int (*dlb2_iface_unmap_qid)(struct dlb2_hw_dev *handle, + struct dlb2_unmap_qid_args *cfg); + +extern int (*dlb2_iface_pending_port_unmaps)(struct dlb2_hw_dev *handle, + struct dlb2_pending_port_unmaps_args *args); #endif /* _DLB2_IFACE_H_ */ diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index 14ad262..4a3d96d 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -5528,3 +5528,286 @@ int dlb2_hw_map_qid(struct dlb2_hw *hw, return 0; } + +static void dlb2_log_unmap_qid(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_unmap_qid_args *args, + bool vdev_req, + unsigned int vdev_id) +{ + DLB2_HW_DBG(hw, "DLB2 unmap QID arguments:\n"); + if (vdev_req) + DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id); + DLB2_HW_DBG(hw, "\tDomain ID: %d\n", + domain_id); + DLB2_HW_DBG(hw, "\tPort ID: %d\n", + args->port_id); + DLB2_HW_DBG(hw, "\tQueue ID: %d\n", + args->qid); + if (args->qid < DLB2_MAX_NUM_LDB_QUEUES) + DLB2_HW_DBG(hw, "\tQueue's num mappings: %d\n", + hw->rsrcs.ldb_queues[args->qid].num_mappings); +} + +static int dlb2_verify_unmap_qid_args(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_unmap_qid_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + enum dlb2_qid_map_state state; + struct dlb2_hw_domain *domain; + struct dlb2_ldb_queue *queue; + struct dlb2_ldb_port *port; + int slot; + int id; + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + + if (domain == NULL) { + resp->status = DLB2_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + if (!domain->configured) { + resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED; + return -EINVAL; + } + + id = args->port_id; + + port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain); + + if (port == NULL || !port->configured) { + resp->status = DLB2_ST_INVALID_PORT_ID; + return -EINVAL; + } + + if (port->domain_id.phys_id != domain->id.phys_id) { + resp->status = DLB2_ST_INVALID_PORT_ID; + return -EINVAL; + } + + queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain); + + if (queue == NULL || !queue->configured) { + DLB2_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n", + __func__, args->qid); + resp->status = DLB2_ST_INVALID_QID; + return -EINVAL; + } + + /* + * Verify that the port has the queue mapped. From the application's + * perspective a queue is mapped if it is actually mapped, the map is + * in progress, or the map is blocked pending an unmap. + */ + state = DLB2_QUEUE_MAPPED; + if (dlb2_port_find_slot_queue(port, state, queue, &slot)) + return 0; + + state = DLB2_QUEUE_MAP_IN_PROG; + if (dlb2_port_find_slot_queue(port, state, queue, &slot)) + return 0; + + if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &slot)) + return 0; + + resp->status = DLB2_ST_INVALID_QID; + return -EINVAL; +} + +int dlb2_hw_unmap_qid(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_unmap_qid_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + struct dlb2_ldb_queue *queue; + enum dlb2_qid_map_state st; + struct dlb2_ldb_port *port; + bool unmap_complete; + int i, ret, id; + + dlb2_log_unmap_qid(hw, domain_id, args, vdev_req, vdev_id); + + /* + * Verify that hardware resources are available before attempting to + * satisfy the request. This simplifies the error unwinding code. + */ + ret = dlb2_verify_unmap_qid_args(hw, + domain_id, + args, + resp, + vdev_req, + vdev_id); + if (ret) + return ret; + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + if (domain == NULL) { + DLB2_HW_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + id = args->port_id; + + port = dlb2_get_domain_used_ldb_port(id, vdev_req, domain); + if (port == NULL) { + DLB2_HW_ERR(hw, + "[%s():%d] Internal error: port not found\n", + __func__, __LINE__); + return -EFAULT; + } + + queue = dlb2_get_domain_ldb_queue(args->qid, vdev_req, domain); + if (queue == NULL) { + DLB2_HW_ERR(hw, + "[%s():%d] Internal error: queue not found\n", + __func__, __LINE__); + return -EFAULT; + } + + /* + * If the queue hasn't been mapped yet, we need to update the slot's + * state and re-enable the queue's inflights. + */ + st = DLB2_QUEUE_MAP_IN_PROG; + if (dlb2_port_find_slot_queue(port, st, queue, &i)) { + if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) { + DLB2_HW_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + /* + * Since the in-progress map was aborted, re-enable the QID's + * inflights. + */ + if (queue->num_pending_additions == 0) + dlb2_ldb_queue_set_inflight_limit(hw, queue); + + st = DLB2_QUEUE_UNMAPPED; + ret = dlb2_port_slot_state_transition(hw, port, queue, i, st); + if (ret) + return ret; + + goto unmap_qid_done; + } + + /* + * If the queue mapping is on hold pending an unmap, we simply need to + * update the slot's state. + */ + if (dlb2_port_find_slot_with_pending_map_queue(port, queue, &i)) { + if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) { + DLB2_HW_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + st = DLB2_QUEUE_UNMAP_IN_PROG; + ret = dlb2_port_slot_state_transition(hw, port, queue, i, st); + if (ret) + return ret; + + goto unmap_qid_done; + } + + st = DLB2_QUEUE_MAPPED; + if (!dlb2_port_find_slot_queue(port, st, queue, &i)) { + DLB2_HW_ERR(hw, + "[%s()] Internal error: no available CQ slots\n", + __func__); + return -EFAULT; + } + + if (i >= DLB2_MAX_NUM_QIDS_PER_LDB_CQ) { + DLB2_HW_ERR(hw, + "[%s():%d] Internal error: port slot tracking failed\n", + __func__, __LINE__); + return -EFAULT; + } + + /* + * QID->CQ mapping removal is an asynchronous procedure. It requires + * stopping the DLB2 from scheduling this CQ, draining all inflights + * from the CQ, then unmapping the queue from the CQ. This function + * simply marks the port as needing the queue unmapped, and (if + * necessary) starts the unmapping worker thread. + */ + dlb2_ldb_port_cq_disable(hw, port); + + st = DLB2_QUEUE_UNMAP_IN_PROG; + ret = dlb2_port_slot_state_transition(hw, port, queue, i, st); + if (ret) + return ret; + + /* + * Attempt to finish the unmapping now, in case the port has no + * outstanding inflights. If that's not the case, this will fail and + * the unmapping will be completed at a later time. + */ + unmap_complete = dlb2_domain_finish_unmap_port(hw, domain, port); + + /* + * If the unmapping couldn't complete immediately, launch the worker + * thread (if it isn't already launched) to finish it later. + */ + if (!unmap_complete && !os_worker_active(hw)) + os_schedule_work(hw); + +unmap_qid_done: + resp->status = 0; + + return 0; +} + +static void +dlb2_log_pending_port_unmaps_args(struct dlb2_hw *hw, + struct dlb2_pending_port_unmaps_args *args, + bool vdev_req, + unsigned int vdev_id) +{ + DLB2_HW_DBG(hw, "DLB unmaps in progress arguments:\n"); + if (vdev_req) + DLB2_HW_DBG(hw, "(Request from VF %d)\n", vdev_id); + DLB2_HW_DBG(hw, "\tPort ID: %d\n", args->port_id); +} + +int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_pending_port_unmaps_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_hw_domain *domain; + struct dlb2_ldb_port *port; + + dlb2_log_pending_port_unmaps_args(hw, args, vdev_req, vdev_id); + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + + if (domain == NULL) { + resp->status = DLB2_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + port = dlb2_get_domain_used_ldb_port(args->port_id, vdev_req, domain); + if (port == NULL || !port->configured) { + resp->status = DLB2_ST_INVALID_PORT_ID; + return -EINVAL; + } + + resp->id = port->num_pending_removals; + + return 0; +} diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index 0f54b8d..1ea5fa5 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -454,6 +454,55 @@ dlb2_pf_map_qid(struct dlb2_hw_dev *handle, return ret; } +static int +dlb2_pf_unmap_qid(struct dlb2_hw_dev *handle, + struct dlb2_unmap_qid_args *cfg) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + struct dlb2_cmd_response response = {0}; + int ret; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_unmap_qid(&dlb2_dev->hw, + handle->domain_id, + cfg, + &response, + false, + 0); + + cfg->response = response; + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} + +static int +dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle, + struct dlb2_pending_port_unmaps_args *args) +{ + struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev; + struct dlb2_cmd_response response = {0}; + int ret; + + DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__); + + ret = dlb2_hw_pending_port_unmaps(&dlb2_dev->hw, + handle->domain_id, + args, + &response, + false, + 0); + + args->response = response; + + DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n", + __func__, ret); + + return ret; +} static void dlb2_pf_iface_fn_ptrs_init(void) { @@ -470,6 +519,8 @@ dlb2_pf_iface_fn_ptrs_init(void) dlb2_iface_dir_queue_create = dlb2_pf_dir_queue_create; dlb2_iface_dir_port_create = dlb2_pf_dir_port_create; dlb2_iface_map_qid = dlb2_pf_map_qid; + dlb2_iface_unmap_qid = dlb2_pf_unmap_qid; + dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps; dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation; dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation; dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy; -- 2.6.4