From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id B4C97A0558;
	Mon,  5 Sep 2022 15:33:12 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id E1C2C42825;
	Mon,  5 Sep 2022 15:33:03 +0200 (CEST)
Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com
 [67.231.156.173])
 by mails.dpdk.org (Postfix) with ESMTP id E270442825
 for <dev@dpdk.org>; Mon,  5 Sep 2022 15:33:02 +0200 (CEST)
Received: from pps.filterd (m0045851.ppops.net [127.0.0.1])
 by mx0b-0016f401.pphosted.com (8.17.1.5/8.17.1.5) with ESMTP id 285CToUa031672
 for <dev@dpdk.org>; Mon, 5 Sep 2022 06:33:02 -0700
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;
 h=from : to : cc :
 subject : date : message-id : in-reply-to : references : mime-version :
 content-transfer-encoding : content-type; s=pfpt0220;
 bh=YU2pD9blrVwIgRz82LMSRXwU/z4Rpm05U8geDVgu5yc=;
 b=c5QjMnkjQEl5f89da+55SrGr0IitkRRZAMOmi06Qs7hFKcyQ7ZjCSweZG2Sbyj6uJbO5
 wUpB4yf1qHwCUtEGiJVvg91R5j0DTGfLv9onAfDMNMuLAC/C/e9Qpy7MfW7NyIsmgUFs
 kmdN5mpxMDs4k9IVtKQ0mcCqn/Z+ETvQGrS8nqaPEJm9TMwcQHOh9ojHkwSR43JaQxoW
 ebiN040fygmhnpHU4ZwyDj8HqZUrYwT17pYJIvyJxrgpa3ViQJiob1niGILF8XSl9UEZ
 iQiWhSPEDjx50HU62GDzwA3UEHIC0E+kEaDik9Kp5YpU7++zjnH65ezSQrYSyGnwY6dN 9A== 
Received: from dc5-exch02.marvell.com ([199.233.59.182])
 by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3jc6epngeb-1
 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)
 for <dev@dpdk.org>; Mon, 05 Sep 2022 06:33:02 -0700
Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com
 (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18;
 Mon, 5 Sep 2022 06:33:00 -0700
Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com
 (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.2 via Frontend
 Transport; Mon, 5 Sep 2022 06:33:00 -0700
Received: from localhost.localdomain (unknown [10.29.52.204])
 by maili.marvell.com (Postfix) with ESMTP id 30A515E6867;
 Mon,  5 Sep 2022 06:32:57 -0700 (PDT)
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>, Kiran Kumar K
 <kirankumark@marvell.com>, Sunil Kumar Kori <skori@marvell.com>, Satha Rao
 <skoteshwar@marvell.com>
CC: <jerinj@marvell.com>, <dev@dpdk.org>
Subject: [PATCH v2 10/31] common/cnxk: update attributes to pools used by NIX
Date: Mon, 5 Sep 2022 19:02:07 +0530
Message-ID: <20220905133228.818616-10-ndabilpuram@marvell.com>
X-Mailer: git-send-email 2.25.1
In-Reply-To: <20220905133228.818616-1-ndabilpuram@marvell.com>
References: <20220809184908.24030-1-ndabilpuram@marvell.com>
 <20220905133228.818616-1-ndabilpuram@marvell.com>
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
Content-Type: text/plain
X-Proofpoint-ORIG-GUID: 4vixxXg-sJVHJF1dmP9qLCh5YLF_RKpR
X-Proofpoint-GUID: 4vixxXg-sJVHJF1dmP9qLCh5YLF_RKpR
X-Proofpoint-Virus-Version: vendor=baseguard
 engine=ICAP:2.0.205,Aquarius:18.0.895,Hydra:6.0.517,FMLib:17.11.122.1
 definitions=2022-09-05_09,2022-09-05_02,2022-06-22_01
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

Update attributes to pools used by NIX so that we
can later identify which mempools are packet pools
and which are used for Inline IPsec enabled ethdev.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_queue.c | 112 +++++++++++++++++++++++++++-
 1 file changed, 110 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 70b4516eca..98b9fb45f5 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -140,6 +140,96 @@ roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid)
 	return sso_enable ? true : false;
 }
 
+static int
+nix_rq_aura_buf_type_update(struct roc_nix_rq *rq, bool set)
+{
+	struct roc_nix *roc_nix = rq->roc_nix;
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	bool inl_inb_ena = roc_nix_inl_inb_is_enabled(roc_nix);
+	uint64_t lpb_aura = 0, vwqe_aura = 0, spb_aura = 0;
+	struct mbox *mbox = nix->dev.mbox;
+	uint64_t aura_base;
+	int rc, count;
+
+	count = set ? 1 : -1;
+	/* For buf type set, use info from RQ context */
+	if (set) {
+		lpb_aura = rq->aura_handle;
+		spb_aura = rq->spb_ena ? rq->spb_aura_handle : 0;
+		vwqe_aura = rq->vwqe_ena ? rq->vwqe_aura_handle : 0;
+		goto skip_ctx_read;
+	}
+
+	aura_base = roc_npa_aura_handle_to_base(rq->aura_handle);
+	if (roc_model_is_cn9k()) {
+		struct nix_aq_enq_rsp *rsp;
+		struct nix_aq_enq_req *aq;
+
+		aq = mbox_alloc_msg_nix_aq_enq(mbox);
+		if (!aq)
+			return -ENOSPC;
+
+		aq->qidx = rq->qid;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_READ;
+		rc = mbox_process_msg(mbox, (void *)&rsp);
+		if (rc)
+			return rc;
+
+		/* Get aura handle from aura */
+		lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base);
+		if (rsp->rq.spb_ena)
+			spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base);
+	} else {
+		struct nix_cn10k_aq_enq_rsp *rsp;
+		struct nix_cn10k_aq_enq_req *aq;
+
+		aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox);
+		if (!aq)
+			return -ENOSPC;
+
+		aq->qidx = rq->qid;
+		aq->ctype = NIX_AQ_CTYPE_RQ;
+		aq->op = NIX_AQ_INSTOP_READ;
+
+		rc = mbox_process_msg(mbox, (void *)&rsp);
+		if (rc)
+			return rc;
+
+		/* Get aura handle from aura */
+		lpb_aura = roc_npa_aura_handle_gen(rsp->rq.lpb_aura, aura_base);
+		if (rsp->rq.spb_ena)
+			spb_aura = roc_npa_aura_handle_gen(rsp->rq.spb_aura, aura_base);
+		if (rsp->rq.vwqe_ena)
+			vwqe_aura = roc_npa_aura_handle_gen(rsp->rq.wqe_aura, aura_base);
+	}
+
+skip_ctx_read:
+	/* Update attributes for LPB aura */
+	if (inl_inb_ena)
+		roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count);
+	else
+		roc_npa_buf_type_update(lpb_aura, ROC_NPA_BUF_TYPE_PACKET, count);
+
+	/* Update attributes for SPB aura */
+	if (spb_aura) {
+		if (inl_inb_ena)
+			roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET_IPSEC, count);
+		else
+			roc_npa_buf_type_update(spb_aura, ROC_NPA_BUF_TYPE_PACKET, count);
+	}
+
+	/* Update attributes for VWQE aura */
+	if (vwqe_aura) {
+		if (inl_inb_ena)
+			roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE_IPSEC, count);
+		else
+			roc_npa_buf_type_update(vwqe_aura, ROC_NPA_BUF_TYPE_VWQE, count);
+	}
+
+	return 0;
+}
+
 int
 nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
 		bool cfg, bool ena)
@@ -292,7 +382,7 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 			/* Maximal Vector size is (2^(MAX_VSIZE_EXP+2)) */
 			aq->rq.max_vsize_exp = rq->vwqe_max_sz_exp - 2;
 			aq->rq.vtime_wait = rq->vwqe_wait_tmo;
-			aq->rq.wqe_aura = rq->vwqe_aura_handle;
+			aq->rq.wqe_aura = roc_npa_aura_handle_to_aura(rq->vwqe_aura_handle);
 		}
 	} else {
 		/* CQ mode */
@@ -463,6 +553,9 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rc)
 		return rc;
 
+	/* Update aura buf type to indicate its use */
+	nix_rq_aura_buf_type_update(rq, true);
+
 	return nix_tel_node_add_rq(rq);
 }
 
@@ -481,6 +574,9 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rq->qid >= nix->nb_rx_queues)
 		return NIX_ERR_QUEUE_INVALID_RANGE;
 
+	/* Clear attributes for existing aura's */
+	nix_rq_aura_buf_type_update(rq, false);
+
 	rq->roc_nix = roc_nix;
 
 	if (is_cn9k)
@@ -495,14 +591,25 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
 	if (rc)
 		return rc;
 
+	/* Update aura attribute to indicate its use */
+	nix_rq_aura_buf_type_update(rq, true);
+
 	return nix_tel_node_add_rq(rq);
 }
 
 int
 roc_nix_rq_fini(struct roc_nix_rq *rq)
 {
+	int rc;
+
 	/* Disabling RQ is sufficient */
-	return roc_nix_rq_ena_dis(rq, false);
+	rc = roc_nix_rq_ena_dis(rq, false);
+	if (rc)
+		return rc;
+
+	/* Update aura attribute to indicate its use for */
+	nix_rq_aura_buf_type_update(rq, false);
+	return 0;
 }
 
 int
@@ -717,6 +824,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct roc_nix_sq *sq)
 	if (rc)
 		goto fail;
 
+	roc_npa_buf_type_update(sq->aura_handle, ROC_NPA_BUF_TYPE_SQB, 1);
 	sq->sqe_mem = plt_zmalloc(blk_sz * nb_sqb_bufs, blk_sz);
 	if (sq->sqe_mem == NULL) {
 		rc = NIX_ERR_NO_MEM;
-- 
2.25.1