From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id DA76DA00BE; Fri, 12 Jun 2020 23:30:20 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id E633A1C2F5; Fri, 12 Jun 2020 23:26:55 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 862481BFFA for ; Fri, 12 Jun 2020 23:26:34 +0200 (CEST) IronPort-SDR: 3IBoh2kY52ih3+y3KKyJEyOaRNOcEcoLeIZB3W6Dtn6XnC78owR5YN3LaONvjYXtwOs1SwiMcf RgSMov8YL5vg== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Jun 2020 14:26:34 -0700 IronPort-SDR: tfZzMI43SZG40KNauN6/dkqc1xAfae8huiX2OjOFYEecEGkCDQ9i9kkAUgK/sXsYG+4rKmu+Oi x900wC9koTbg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,504,1583222400"; d="scan'208";a="272035896" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by orsmga003.jf.intel.com with ESMTP; 12 Jun 2020 14:26:33 -0700 From: "McDaniel, Timothy" To: jerinj@marvell.com Cc: dev@dpdk.org, gage.eads@intel.com, harry.van.haaren@intel.com Date: Fri, 12 Jun 2020 16:24:29 -0500 Message-Id: <20200612212434.6852-23-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 2.13.6 In-Reply-To: <20200612212434.6852-1-timothy.mcdaniel@intel.com> References: <20200612212434.6852-1-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH 22/27] event/dlb: add port_unlink and port_unlinks_in_progress X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Change-Id: I06d4006786440a0454ce883dba0ce14640dfff92 Signed-off-by: McDaniel, Timothy --- drivers/event/dlb/dlb.c | 166 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 166 insertions(+) diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c index b1103ea95..2d7f4479a 100644 --- a/drivers/event/dlb/dlb.c +++ b/drivers/event/dlb/dlb.c @@ -862,6 +862,30 @@ dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle, return ret; } +static int16_t +dlb_hw_unmap_ldb_qid_from_port(struct dlb_hw_dev *handle, + uint32_t qm_port_id, + uint16_t qm_qid) +{ + struct dlb_unmap_qid_args cfg; + struct dlb_cmd_response response; + int32_t ret; + + if (handle == NULL) + return -EINVAL; + + cfg.response = (uintptr_t)&response; + cfg.port_id = qm_port_id; + cfg.qid = qm_qid; + + ret = dlb_iface_unmap_qid(handle, &cfg); + if (ret < 0) + DLB_LOG_ERR("dlb: unmap qid error, ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + + return ret; +} + /* VDEV-only notes: * This function first unmaps all memory mappings and closes the * domain's file descriptor, which causes the driver to reset the @@ -1905,6 +1929,42 @@ dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb, } static int +dlb_event_queue_detach_ldb(struct dlb_eventdev *dlb, + struct dlb_eventdev_port *ev_port, + struct dlb_eventdev_queue *ev_queue) +{ + int ret, i; + + /* Don't unlink until start time. */ + if (dlb->run_state == DLB_RUN_STATE_STOPPED) + return 0; + + for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) { + if (ev_port->link[i].valid && + ev_port->link[i].queue_id == ev_queue->id) + break; /* found */ + } + + /* This is expected with eventdev API! + * It blindly attemmpts to unmap all queues. + */ + if (i == DLB_MAX_NUM_QIDS_PER_LDB_CQ) { + DLB_LOG_DBG("dlb: ignoring LB QID %d not mapped for qm_port %d.\n", + ev_queue->qm_queue.id, + ev_port->qm_port.id); + return 0; + } + + ret = dlb_hw_unmap_ldb_qid_from_port(&dlb->qm_instance, + ev_port->qm_port.id, + ev_queue->qm_queue.id); + if (!ret) + ev_port->link[i].mapped = false; + + return ret; +} + +static int dlb_do_port_link(struct rte_eventdev *dev, struct dlb_eventdev_queue *ev_queue, struct dlb_eventdev_port *ev_port, @@ -2477,6 +2537,109 @@ dlb_eventdev_port_setup(struct rte_eventdev *dev, return 0; } +static int +dlb_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port, + uint8_t queues[], uint16_t nb_unlinks) +{ + struct dlb_eventdev_port *ev_port = event_port; + struct dlb_eventdev *dlb; + int i; + + RTE_SET_USED(dev); + + if (!ev_port->setup_done) { + DLB_LOG_ERR("dlb: evport %d is not configured\n", + ev_port->id); + rte_errno = -EINVAL; + return 0; + } + + if (!queues || nb_unlinks == 0) { + DLB_LOG_DBG("dlb: queues is NULL or nb_unlinks is 0\n"); + return 0; /* Ignore and return success */ + } + + if (ev_port->qm_port.is_directed) { + DLB_LOG_DBG("dlb: ignore unlink from dir port %d\n", + ev_port->id); + rte_errno = 0; + return nb_unlinks; /* as if success */ + } + + dlb = ev_port->dlb; + + for (i = 0; i < nb_unlinks; i++) { + struct dlb_eventdev_queue *ev_queue; + int ret, j; + + if (queues[i] >= dlb->num_queues) { + DLB_LOG_ERR("dlb: invalid queue id %d\n", queues[i]); + rte_errno = -EINVAL; + return i; /* return index of offending queue */ + } + + ev_queue = &dlb->ev_queues[queues[i]]; + + /* Does a link exist? */ + for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) + if (ev_port->link[j].queue_id == queues[i] && + ev_port->link[j].valid) + break; + + if (j == DLB_MAX_NUM_QIDS_PER_LDB_CQ) + continue; + + ret = dlb_event_queue_detach_ldb(dlb, ev_port, ev_queue); + if (ret) { + DLB_LOG_ERR("unlink err=%d for port %d queue %d\n", + ret, ev_port->id, queues[i]); + rte_errno = -ENOENT; + return i; /* return index of offending queue */ + } + + ev_port->link[j].valid = false; + ev_port->num_links--; + ev_queue->num_links--; + } + + return nb_unlinks; +} + +static int +dlb_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev, + void *event_port) +{ + struct dlb_eventdev_port *ev_port = event_port; + struct dlb_eventdev *dlb; + struct dlb_hw_dev *handle; + struct dlb_pending_port_unmaps_args cfg; + struct dlb_cmd_response response; + int ret; + + RTE_SET_USED(dev); + + if (!ev_port->setup_done) { + DLB_LOG_ERR("dlb: evport %d is not configured\n", + ev_port->id); + rte_errno = -EINVAL; + return 0; + } + + cfg.port_id = ev_port->qm_port.id; + cfg.response = (uintptr_t)&response; + dlb = ev_port->dlb; + handle = &dlb->qm_instance; + ret = dlb_iface_pending_port_unmaps(handle, &cfg); + + if (ret < 0) { + DLB_LOG_ERR("dlb: num_unlinks_in_progress ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return ret; + } + + return response.id; +} + static void dlb_eventdev_port_release(void *port) { @@ -2583,6 +2746,9 @@ dlb_entry_points_init(struct rte_eventdev *dev) .port_setup = dlb_eventdev_port_setup, .port_release = dlb_eventdev_port_release, .port_link = dlb_eventdev_port_link, + .port_unlink = dlb_eventdev_port_unlink, + .port_unlinks_in_progress = + dlb_eventdev_port_unlinks_in_progress, }; /* Expose PMD's eventdev interface */ -- 2.13.6