From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 33E4CA0548;
	Mon, 26 Apr 2021 19:46:23 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 51E5841225;
	Mon, 26 Apr 2021 19:45:38 +0200 (CEST)
Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com
 [67.231.148.174])
 by mails.dpdk.org (Postfix) with ESMTP id AF3CF41104
 for <dev@dpdk.org>; Mon, 26 Apr 2021 19:45:35 +0200 (CEST)
Received: from pps.filterd (m0045849.ppops.net [127.0.0.1])
 by mx0a-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id
 13QHjY7l009116 for <dev@dpdk.org>; Mon, 26 Apr 2021 10:45:35 -0700
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;
 h=from : to : cc :
 subject : date : message-id : in-reply-to : references : mime-version :
 content-transfer-encoding : content-type; s=pfpt0220;
 bh=ZpoeXjySFaUkuFBki6HH6/VLUkzclxPkLSKVAWLzJt8=;
 b=O4XLbUkJdXSoatpK4YWARPqIdO5FHjjXgiT9bksNYQpklObVOXWJnQS7g5eCnp0VOlYE
 RKj9vJMexd5RzIwozxGn2MTsJazvKbb8FsNyZA9REimmi4bFSXhwIOmJDBeEa8oN9i0v
 KhjpW0oxFqrVeE02gmegPa/uE4TO0RDkJfa6r9ZPXXs4LwtskBpwRTHhCbJWKcom8rcP
 iWOBC7M+IS9QHfsPf150piHX5o0qtnGanIVguOlhvC4gWT+KHHFygxmZmkxv+MP8s6wM
 P7gZJDYA6qvNhn6AcT1mg6UeRtNsYHB6gdmYAQ0tZoOoLfd4TDbAX8NR6jm0QK+vyTTq WA== 
Received: from dc5-exch01.marvell.com ([199.233.59.181])
 by mx0a-0016f401.pphosted.com with ESMTP id 385hfr2tkn-1
 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)
 for <dev@dpdk.org>; Mon, 26 Apr 2021 10:45:34 -0700
Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com
 (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.2;
 Mon, 26 Apr 2021 10:45:26 -0700
Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com
 (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.2 via Frontend
 Transport; Mon, 26 Apr 2021 10:45:26 -0700
Received: from BG-LT7430.marvell.com (BG-LT7430.marvell.com [10.28.177.176])
 by maili.marvell.com (Postfix) with ESMTP id 0C17D5B6C96;
 Mon, 26 Apr 2021 10:45:24 -0700 (PDT)
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>, "Shijith
 Thotton" <sthotton@marvell.com>
CC: <dev@dpdk.org>
Date: Mon, 26 Apr 2021 23:14:17 +0530
Message-ID: <20210426174441.2302-11-pbhagavatula@marvell.com>
X-Mailer: git-send-email 2.17.1
In-Reply-To: <20210426174441.2302-1-pbhagavatula@marvell.com>
References: <20210306162942.6845-1-pbhagavatula@marvell.com>
 <20210426174441.2302-1-pbhagavatula@marvell.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
Content-Type: text/plain
X-Proofpoint-ORIG-GUID: fsZQIoAUhNRtAehbGAo0XHun93dG64Zp
X-Proofpoint-GUID: fsZQIoAUhNRtAehbGAo0XHun93dG64Zp
X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.391, 18.0.761
 definitions=2021-04-26_09:2021-04-26,
 2021-04-26 signatures=0
Subject: [dpdk-dev] [PATCH v2 10/33] event/cnxk: add port config functions
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

From: Shijith Thotton <sthotton@marvell.com>

Add SSO HWS aka event port setup and release functions.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c | 121 +++++++++++++++++++++++
 drivers/event/cnxk/cn9k_eventdev.c  | 147 ++++++++++++++++++++++++++++
 drivers/event/cnxk/cnxk_eventdev.c  |  65 ++++++++++++
 drivers/event/cnxk/cnxk_eventdev.h  |  91 +++++++++++++++++
 4 files changed, 424 insertions(+)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 47eb8898b..c60df7f7b 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -4,6 +4,91 @@
 
 #include "cnxk_eventdev.h"
 
+static void
+cn10k_init_hws_ops(struct cn10k_sso_hws *ws, uintptr_t base)
+{
+	ws->tag_wqe_op = base + SSOW_LF_GWS_WQE0;
+	ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
+	ws->updt_wqe_op = base + SSOW_LF_GWS_OP_UPD_WQP_GRP1;
+	ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
+	ws->swtag_untag_op = base + SSOW_LF_GWS_OP_SWTAG_UNTAG;
+	ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
+	ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
+}
+
+static uint32_t
+cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
+{
+	uint32_t wdata = BIT(16) | 1;
+
+	switch (dev->gw_mode) {
+	case CN10K_GW_MODE_NONE:
+	default:
+		break;
+	case CN10K_GW_MODE_PREF:
+		wdata |= BIT(19);
+		break;
+	case CN10K_GW_MODE_PREF_WFE:
+		wdata |= BIT(20) | BIT(19);
+		break;
+	}
+
+	return wdata;
+}
+
+static void *
+cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
+{
+	struct cnxk_sso_evdev *dev = arg;
+	struct cn10k_sso_hws *ws;
+
+	/* Allocate event port memory */
+	ws = rte_zmalloc("cn10k_ws",
+			 sizeof(struct cn10k_sso_hws) + RTE_CACHE_LINE_SIZE,
+			 RTE_CACHE_LINE_SIZE);
+	if (ws == NULL) {
+		plt_err("Failed to alloc memory for port=%d", port_id);
+		return NULL;
+	}
+
+	/* First cache line is reserved for cookie */
+	ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
+	ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
+	cn10k_init_hws_ops(ws, ws->base);
+	ws->hws_id = port_id;
+	ws->swtag_req = 0;
+	ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
+	ws->lmt_base = dev->sso.lmt_base;
+
+	return ws;
+}
+
+static void
+cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
+{
+	struct cnxk_sso_evdev *dev = arg;
+	struct cn10k_sso_hws *ws = hws;
+	uint64_t val;
+
+	rte_memcpy(ws->grps_base, grps_base,
+		   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
+	ws->fc_mem = dev->fc_mem;
+	ws->xaq_lmt = dev->xaq_lmt;
+
+	/* Set get_work timeout for HWS */
+	val = NSEC2USEC(dev->deq_tmo_ns) - 1;
+	plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
+}
+
+static void
+cn10k_sso_hws_release(void *arg, void *hws)
+{
+	struct cn10k_sso_hws *ws = hws;
+
+	RTE_SET_USED(arg);
+	memset(ws, 0, sizeof(*ws));
+}
+
 static void
 cn10k_sso_set_rsrc(void *arg)
 {
@@ -59,12 +144,46 @@ cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
 	if (rc < 0)
 		goto cnxk_rsrc_fini;
 
+	rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem,
+				    cn10k_sso_hws_setup);
+	if (rc < 0)
+		goto cnxk_rsrc_fini;
+
 	return 0;
 cnxk_rsrc_fini:
 	roc_sso_rsrc_fini(&dev->sso);
+	dev->nb_event_ports = 0;
 	return rc;
 }
 
+static int
+cn10k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
+		     const struct rte_event_port_conf *port_conf)
+{
+
+	RTE_SET_USED(port_conf);
+	return cnxk_sso_port_setup(event_dev, port_id, cn10k_sso_hws_setup);
+}
+
+static void
+cn10k_sso_port_release(void *port)
+{
+	struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
+	struct cnxk_sso_evdev *dev;
+
+	if (port == NULL)
+		return;
+
+	dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
+	if (!gws_cookie->configured)
+		goto free;
+
+	cn10k_sso_hws_release(dev, port);
+	memset(gws_cookie, 0, sizeof(*gws_cookie));
+free:
+	rte_free(gws_cookie);
+}
+
 static struct rte_eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -72,6 +191,8 @@ static struct rte_eventdev_ops cn10k_sso_dev_ops = {
 	.queue_setup = cnxk_sso_queue_setup,
 	.queue_release = cnxk_sso_queue_release,
 	.port_def_conf = cnxk_sso_port_def_conf,
+	.port_setup = cn10k_sso_port_setup,
+	.port_release = cn10k_sso_port_release,
 };
 
 static int
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 43c045d43..116f5bdab 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -7,6 +7,63 @@
 #define CN9K_DUAL_WS_NB_WS	    2
 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
 
+static void
+cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
+{
+	ws->tag_op = base + SSOW_LF_GWS_TAG;
+	ws->wqp_op = base + SSOW_LF_GWS_WQP;
+	ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
+	ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
+	ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
+	ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
+}
+
+static void
+cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
+{
+	struct cnxk_sso_evdev *dev = arg;
+	struct cn9k_sso_hws_dual *dws;
+	struct cn9k_sso_hws *ws;
+	uint64_t val;
+
+	/* Set get_work tmo for HWS */
+	val = NSEC2USEC(dev->deq_tmo_ns) - 1;
+	if (dev->dual_ws) {
+		dws = hws;
+		rte_memcpy(dws->grps_base, grps_base,
+			   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
+		dws->fc_mem = dev->fc_mem;
+		dws->xaq_lmt = dev->xaq_lmt;
+
+		plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
+		plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
+	} else {
+		ws = hws;
+		rte_memcpy(ws->grps_base, grps_base,
+			   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
+		ws->fc_mem = dev->fc_mem;
+		ws->xaq_lmt = dev->xaq_lmt;
+
+		plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
+	}
+}
+
+static void
+cn9k_sso_hws_release(void *arg, void *hws)
+{
+	struct cnxk_sso_evdev *dev = arg;
+	struct cn9k_sso_hws_dual *dws;
+	struct cn9k_sso_hws *ws;
+
+	if (dev->dual_ws) {
+		dws = hws;
+		memset(dws, 0, sizeof(*dws));
+	} else {
+		ws = hws;
+		memset(ws, 0, sizeof(*ws));
+	}
+}
+
 static void
 cn9k_sso_set_rsrc(void *arg)
 {
@@ -33,6 +90,60 @@ cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
 	return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
 }
 
+static void *
+cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
+{
+	struct cnxk_sso_evdev *dev = arg;
+	struct cn9k_sso_hws_dual *dws;
+	struct cn9k_sso_hws *ws;
+	void *data;
+
+	if (dev->dual_ws) {
+		dws = rte_zmalloc("cn9k_dual_ws",
+				  sizeof(struct cn9k_sso_hws_dual) +
+					  RTE_CACHE_LINE_SIZE,
+				  RTE_CACHE_LINE_SIZE);
+		if (dws == NULL) {
+			plt_err("Failed to alloc memory for port=%d", port_id);
+			return NULL;
+		}
+
+		dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
+		dws->base[0] = roc_sso_hws_base_get(
+			&dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
+		dws->base[1] = roc_sso_hws_base_get(
+			&dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
+		cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
+		cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
+		dws->hws_id = port_id;
+		dws->swtag_req = 0;
+		dws->vws = 0;
+
+		data = dws;
+	} else {
+		/* Allocate event port memory */
+		ws = rte_zmalloc("cn9k_ws",
+				 sizeof(struct cn9k_sso_hws) +
+					 RTE_CACHE_LINE_SIZE,
+				 RTE_CACHE_LINE_SIZE);
+		if (ws == NULL) {
+			plt_err("Failed to alloc memory for port=%d", port_id);
+			return NULL;
+		}
+
+		/* First cache line is reserved for cookie */
+		ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
+		ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
+		cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
+		ws->hws_id = port_id;
+		ws->swtag_req = 0;
+
+		data = ws;
+	}
+
+	return data;
+}
+
 static void
 cn9k_sso_info_get(struct rte_eventdev *event_dev,
 		  struct rte_event_dev_info *dev_info)
@@ -67,12 +178,46 @@ cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
 	if (rc < 0)
 		goto cnxk_rsrc_fini;
 
+	rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
+				    cn9k_sso_hws_setup);
+	if (rc < 0)
+		goto cnxk_rsrc_fini;
+
 	return 0;
 cnxk_rsrc_fini:
 	roc_sso_rsrc_fini(&dev->sso);
+	dev->nb_event_ports = 0;
 	return rc;
 }
 
+static int
+cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
+		    const struct rte_event_port_conf *port_conf)
+{
+
+	RTE_SET_USED(port_conf);
+	return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
+}
+
+static void
+cn9k_sso_port_release(void *port)
+{
+	struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
+	struct cnxk_sso_evdev *dev;
+
+	if (port == NULL)
+		return;
+
+	dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
+	if (!gws_cookie->configured)
+		goto free;
+
+	cn9k_sso_hws_release(dev, port);
+	memset(gws_cookie, 0, sizeof(*gws_cookie));
+free:
+	rte_free(gws_cookie);
+}
+
 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
@@ -80,6 +225,8 @@ static struct rte_eventdev_ops cn9k_sso_dev_ops = {
 	.queue_setup = cnxk_sso_queue_setup,
 	.queue_release = cnxk_sso_queue_release,
 	.port_def_conf = cnxk_sso_port_def_conf,
+	.port_setup = cn9k_sso_port_setup,
+	.port_release = cn9k_sso_port_release,
 };
 
 static int
diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c
index 4cb5359a8..9d455c93d 100644
--- a/drivers/event/cnxk/cnxk_eventdev.c
+++ b/drivers/event/cnxk/cnxk_eventdev.c
@@ -125,6 +125,42 @@ cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
 	return rc;
 }
 
+int
+cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
+		       cnxk_sso_init_hws_mem_t init_hws_fn,
+		       cnxk_sso_hws_setup_t setup_hws_fn)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	int i;
+
+	for (i = 0; i < dev->nb_event_ports; i++) {
+		struct cnxk_sso_hws_cookie *ws_cookie;
+		void *ws;
+
+		/* Free memory prior to re-allocation if needed */
+		if (event_dev->data->ports[i] != NULL)
+			ws = event_dev->data->ports[i];
+		else
+			ws = init_hws_fn(dev, i);
+		if (ws == NULL)
+			goto hws_fini;
+		ws_cookie = cnxk_sso_hws_get_cookie(ws);
+		ws_cookie->event_dev = event_dev;
+		ws_cookie->configured = 1;
+		event_dev->data->ports[i] = ws;
+		cnxk_sso_port_setup((struct rte_eventdev *)(uintptr_t)event_dev,
+				    i, setup_hws_fn);
+	}
+
+	return 0;
+hws_fini:
+	for (i = i - 1; i >= 0; i--) {
+		event_dev->data->ports[i] = NULL;
+		rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i]));
+	}
+	return -ENOMEM;
+}
+
 int
 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
 {
@@ -225,6 +261,35 @@ cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
 	port_conf->enqueue_depth = 1;
 }
 
+int
+cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
+		    cnxk_sso_hws_setup_t hws_setup_fn)
+{
+	struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+	uintptr_t grps_base[CNXK_SSO_MAX_HWGRP] = {0};
+	uint16_t q;
+
+	plt_sso_dbg("Port=%d", port_id);
+	if (event_dev->data->ports[port_id] == NULL) {
+		plt_err("Invalid port Id %d", port_id);
+		return -EINVAL;
+	}
+
+	for (q = 0; q < dev->nb_event_queues; q++) {
+		grps_base[q] = roc_sso_hwgrp_base_get(&dev->sso, q);
+		if (grps_base[q] == 0) {
+			plt_err("Failed to get grp[%d] base addr", q);
+			return -EINVAL;
+		}
+	}
+
+	hws_setup_fn(dev, event_dev->data->ports[port_id], grps_base);
+	plt_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
+	rte_mb();
+
+	return 0;
+}
+
 static void
 parse_queue_param(char *value, void *opaque)
 {
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 4a2fa73fe..0e8457f02 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -17,13 +17,23 @@
 #define CNXK_SSO_XAE_CNT  "xae_cnt"
 #define CNXK_SSO_GGRP_QOS "qos"
 
+#define NSEC2USEC(__ns) ((__ns) / 1E3)
 #define USEC2NSEC(__us) ((__us)*1E3)
 
+#define CNXK_SSO_MAX_HWGRP     (RTE_EVENT_MAX_QUEUES_PER_DEV + 1)
 #define CNXK_SSO_FC_NAME       "cnxk_evdev_xaq_fc"
 #define CNXK_SSO_MZ_NAME       "cnxk_evdev_mz"
 #define CNXK_SSO_XAQ_CACHE_CNT (0x7)
 #define CNXK_SSO_XAQ_SLACK     (8)
 
+#define CN10K_GW_MODE_NONE     0
+#define CN10K_GW_MODE_PREF     1
+#define CN10K_GW_MODE_PREF_WFE 2
+
+typedef void *(*cnxk_sso_init_hws_mem_t)(void *dev, uint8_t port_id);
+typedef void (*cnxk_sso_hws_setup_t)(void *dev, void *ws, uintptr_t *grp_base);
+typedef void (*cnxk_sso_hws_release_t)(void *dev, void *ws);
+
 struct cnxk_sso_qos {
 	uint16_t queue;
 	uint8_t xaq_prcnt;
@@ -53,6 +63,76 @@ struct cnxk_sso_evdev {
 	struct cnxk_sso_qos *qos_parse_data;
 	/* CN9K */
 	uint8_t dual_ws;
+	/* CN10K */
+	uint8_t gw_mode;
+} __rte_cache_aligned;
+
+/* CN10K HWS ops */
+#define CN10K_SSO_HWS_OPS                                                      \
+	uintptr_t swtag_desched_op;                                            \
+	uintptr_t swtag_flush_op;                                              \
+	uintptr_t swtag_untag_op;                                              \
+	uintptr_t swtag_norm_op;                                               \
+	uintptr_t updt_wqe_op;                                                 \
+	uintptr_t tag_wqe_op;                                                  \
+	uintptr_t getwrk_op
+
+struct cn10k_sso_hws {
+	/* Get Work Fastpath data */
+	CN10K_SSO_HWS_OPS;
+	uint32_t gw_wdata;
+	uint8_t swtag_req;
+	uint8_t hws_id;
+	/* Add Work Fastpath data */
+	uint64_t xaq_lmt __rte_cache_aligned;
+	uint64_t *fc_mem;
+	uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
+	uint64_t base;
+	uintptr_t lmt_base;
+} __rte_cache_aligned;
+
+/* CN9K HWS ops */
+#define CN9K_SSO_HWS_OPS                                                       \
+	uintptr_t swtag_desched_op;                                            \
+	uintptr_t swtag_flush_op;                                              \
+	uintptr_t swtag_norm_op;                                               \
+	uintptr_t getwrk_op;                                                   \
+	uintptr_t tag_op;                                                      \
+	uintptr_t wqp_op
+
+/* Event port aka GWS */
+struct cn9k_sso_hws {
+	/* Get Work Fastpath data */
+	CN9K_SSO_HWS_OPS;
+	uint8_t swtag_req;
+	uint8_t hws_id;
+	/* Add Work Fastpath data */
+	uint64_t xaq_lmt __rte_cache_aligned;
+	uint64_t *fc_mem;
+	uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
+	uint64_t base;
+} __rte_cache_aligned;
+
+struct cn9k_sso_hws_state {
+	CN9K_SSO_HWS_OPS;
+};
+
+struct cn9k_sso_hws_dual {
+	/* Get Work Fastpath data */
+	struct cn9k_sso_hws_state ws_state[2]; /* Ping and Pong */
+	uint8_t swtag_req;
+	uint8_t vws; /* Ping pong bit */
+	uint8_t hws_id;
+	/* Add Work Fastpath data */
+	uint64_t xaq_lmt __rte_cache_aligned;
+	uint64_t *fc_mem;
+	uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
+	uint64_t base[2];
+} __rte_cache_aligned;
+
+struct cnxk_sso_hws_cookie {
+	const struct rte_eventdev *event_dev;
+	bool configured;
 } __rte_cache_aligned;
 
 static inline int
@@ -70,6 +150,12 @@ cnxk_sso_pmd_priv(const struct rte_eventdev *event_dev)
 	return event_dev->data->dev_private;
 }
 
+static inline struct cnxk_sso_hws_cookie *
+cnxk_sso_hws_get_cookie(void *ws)
+{
+	return RTE_PTR_SUB(ws, sizeof(struct cnxk_sso_hws_cookie));
+}
+
 /* Configuration functions */
 int cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev);
 
@@ -80,6 +166,9 @@ int cnxk_sso_remove(struct rte_pci_device *pci_dev);
 void cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
 		       struct rte_event_dev_info *dev_info);
 int cnxk_sso_dev_validate(const struct rte_eventdev *event_dev);
+int cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
+			   cnxk_sso_init_hws_mem_t init_hws_mem,
+			   cnxk_sso_hws_setup_t hws_setup);
 void cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
 			     struct rte_event_queue_conf *queue_conf);
 int cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
@@ -87,5 +176,7 @@ int cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
 void cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id);
 void cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
 			    struct rte_event_port_conf *port_conf);
+int cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
+			cnxk_sso_hws_setup_t hws_setup_fn);
 
 #endif /* __CNXK_EVENTDEV_H__ */
-- 
2.17.1