From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 8578DA04DF;
	Fri, 30 Oct 2020 10:44:31 +0100 (CET)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id 8422BC806;
	Fri, 30 Oct 2020 10:40:11 +0100 (CET)
Received: from mga06.intel.com (mga06.intel.com [134.134.136.31])
 by dpdk.org (Postfix) with ESMTP id 542EDBE4B
 for <dev@dpdk.org>; Fri, 30 Oct 2020 10:39:38 +0100 (CET)
IronPort-SDR: X/zOLFKZb9g3vDg/3rcaZ7ykuVoFcjs5537SLiTgphtg1Blc9vMR3qGkI8+eOXsWLcv6Z1ynj5
 S8n7YSbnpJsA==
X-IronPort-AV: E=McAfee;i="6000,8403,9789"; a="230219322"
X-IronPort-AV: E=Sophos;i="5.77,432,1596524400"; d="scan'208";a="230219322"
X-Amp-Result: SKIPPED(no attachment in message)
X-Amp-File-Uploaded: False
Received: from fmsmga005.fm.intel.com ([10.253.24.32])
 by orsmga104.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 30 Oct 2020 02:39:37 -0700
IronPort-SDR: p6/tspvPXipIpYuWDkYCt50PGUseJv4nsk/AuRyydLPFQGaP+FmYxnRxowfX545xvc0fozgQIl
 5XtnFQeiCc5Q==
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.77,432,1596524400"; d="scan'208";a="527062110"
Received: from txasoft-yocto.an.intel.com ([10.123.72.192])
 by fmsmga005.fm.intel.com with ESMTP; 30 Oct 2020 02:39:37 -0700
From: Timothy McDaniel <timothy.mcdaniel@intel.com>
To: 
Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,
 harry.van.haaren@intel.com, jerinj@marvell.com, thomas@monjalon.net
Date: Fri, 30 Oct 2020 04:41:03 -0500
Message-Id: <1604050872-24997-16-git-send-email-timothy.mcdaniel@intel.com>
X-Mailer: git-send-email 1.7.10
In-Reply-To: <1604050872-24997-1-git-send-email-timothy.mcdaniel@intel.com>
References: <20200612212434.6852-2-timothy.mcdaniel@intel.com>
 <1604050872-24997-1-git-send-email-timothy.mcdaniel@intel.com>
Subject: [dpdk-dev] [PATCH v8 15/23] event/dlb: add port unlink and port
	unlinks in progress
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

Add supports for the port unlink(s) eventdev entry points.
The unlink operation is an asynchronous operation executed by
a control thread, and the unlinks-in-progress function reads
a counter shared with the control thread. Port QE and memzone
memory is freed here.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
Reviewed-by: Gage Eads <gage.eads@intel.com>
---
 drivers/event/dlb/dlb.c | 166 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 166 insertions(+)

diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
index 2ad195d..c64f559 100644
--- a/drivers/event/dlb/dlb.c
+++ b/drivers/event/dlb/dlb.c
@@ -693,6 +693,169 @@ dlb_eventdev_configure(const struct rte_eventdev *dev)
 	return 0;
 }
 
+static int16_t
+dlb_hw_unmap_ldb_qid_from_port(struct dlb_hw_dev *handle,
+			       uint32_t qm_port_id,
+			       uint16_t qm_qid)
+{
+	struct dlb_unmap_qid_args cfg;
+	struct dlb_cmd_response response;
+	int32_t ret;
+
+	if (handle == NULL)
+		return -EINVAL;
+
+	cfg.response = (uintptr_t)&response;
+	cfg.port_id = qm_port_id;
+	cfg.qid = qm_qid;
+
+	ret = dlb_iface_unmap_qid(handle, &cfg);
+	if (ret < 0)
+		DLB_LOG_ERR("dlb: unmap qid error, ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+
+	return ret;
+}
+
+static int
+dlb_event_queue_detach_ldb(struct dlb_eventdev *dlb,
+			   struct dlb_eventdev_port *ev_port,
+			   struct dlb_eventdev_queue *ev_queue)
+{
+	int ret, i;
+
+	/* Don't unlink until start time. */
+	if (dlb->run_state == DLB_RUN_STATE_STOPPED)
+		return 0;
+
+	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+		if (ev_port->link[i].valid &&
+		    ev_port->link[i].queue_id == ev_queue->id)
+			break; /* found */
+	}
+
+	/* This is expected with eventdev API!
+	 * It blindly attempts to unmap all queues.
+	 */
+	if (i == DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
+		DLB_LOG_DBG("dlb: ignoring LB QID %d not mapped for qm_port %d.\n",
+			    ev_queue->qm_queue.id,
+			    ev_port->qm_port.id);
+		return 0;
+	}
+
+	ret = dlb_hw_unmap_ldb_qid_from_port(&dlb->qm_instance,
+					     ev_port->qm_port.id,
+					     ev_queue->qm_queue.id);
+	if (!ret)
+		ev_port->link[i].mapped = false;
+
+	return ret;
+}
+
+static int
+dlb_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
+			 uint8_t queues[], uint16_t nb_unlinks)
+{
+	struct dlb_eventdev_port *ev_port = event_port;
+	struct dlb_eventdev *dlb;
+	int i;
+
+	RTE_SET_USED(dev);
+
+	if (!ev_port->setup_done) {
+		DLB_LOG_ERR("dlb: evport %d is not configured\n",
+			    ev_port->id);
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	if (queues == NULL || nb_unlinks == 0) {
+		DLB_LOG_DBG("dlb: queues is NULL or nb_unlinks is 0\n");
+		return 0; /* Ignore and return success */
+	}
+
+	if (ev_port->qm_port.is_directed) {
+		DLB_LOG_DBG("dlb: ignore unlink from dir port %d\n",
+			    ev_port->id);
+		rte_errno = 0;
+		return nb_unlinks; /* as if success */
+	}
+
+	dlb = ev_port->dlb;
+
+	for (i = 0; i < nb_unlinks; i++) {
+		struct dlb_eventdev_queue *ev_queue;
+		int ret, j;
+
+		if (queues[i] >= dlb->num_queues) {
+			DLB_LOG_ERR("dlb: invalid queue id %d\n", queues[i]);
+			rte_errno = -EINVAL;
+			return i; /* return index of offending queue */
+		}
+
+		ev_queue = &dlb->ev_queues[queues[i]];
+
+		/* Does a link exist? */
+		for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++)
+			if (ev_port->link[j].queue_id == queues[i] &&
+			    ev_port->link[j].valid)
+				break;
+
+		if (j == DLB_MAX_NUM_QIDS_PER_LDB_CQ)
+			continue;
+
+		ret = dlb_event_queue_detach_ldb(dlb, ev_port, ev_queue);
+		if (ret) {
+			DLB_LOG_ERR("unlink err=%d for port %d queue %d\n",
+				    ret, ev_port->id, queues[i]);
+			rte_errno = -ENOENT;
+			return i; /* return index of offending queue */
+		}
+
+		ev_port->link[j].valid = false;
+		ev_port->num_links--;
+		ev_queue->num_links--;
+	}
+
+	return nb_unlinks;
+}
+
+static int
+dlb_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
+				      void *event_port)
+{
+	struct dlb_eventdev_port *ev_port = event_port;
+	struct dlb_eventdev *dlb;
+	struct dlb_hw_dev *handle;
+	struct dlb_pending_port_unmaps_args cfg;
+	struct dlb_cmd_response response;
+	int ret;
+
+	RTE_SET_USED(dev);
+
+	if (!ev_port->setup_done) {
+		DLB_LOG_ERR("dlb: evport %d is not configured\n",
+			    ev_port->id);
+		rte_errno = -EINVAL;
+		return 0;
+	}
+
+	cfg.port_id = ev_port->qm_port.id;
+	cfg.response = (uintptr_t)&response;
+	dlb = ev_port->dlb;
+	handle = &dlb->qm_instance;
+	ret = dlb_iface_pending_port_unmaps(handle, &cfg);
+
+	if (ret < 0) {
+		DLB_LOG_ERR("dlb: num_unlinks_in_progress ret=%d (driver status: %s)\n",
+			    ret, dlb_error_strings[response.status]);
+		return ret;
+	}
+
+	return response.id;
+}
+
 static void
 dlb_eventdev_port_default_conf_get(struct rte_eventdev *dev,
 				   uint8_t port_id,
@@ -1848,6 +2011,9 @@ dlb_entry_points_init(struct rte_eventdev *dev)
 		.queue_setup      = dlb_eventdev_queue_setup,
 		.port_setup       = dlb_eventdev_port_setup,
 		.port_link        = dlb_eventdev_port_link,
+		.port_unlink      = dlb_eventdev_port_unlink,
+		.port_unlinks_in_progress =
+				    dlb_eventdev_port_unlinks_in_progress,
 		.dump             = dlb_eventdev_dump,
 		.xstats_get       = dlb_eventdev_xstats_get,
 		.xstats_get_names = dlb_eventdev_xstats_get_names,
-- 
2.6.4