From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 0DF82A0C46;
	Fri, 18 Jun 2021 12:40:09 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 04AD44111D;
	Fri, 18 Jun 2021 12:39:41 +0200 (CEST)
Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com
 [67.231.156.173])
 by mails.dpdk.org (Postfix) with ESMTP id 46310410FE
 for <dev@dpdk.org>; Fri, 18 Jun 2021 12:39:39 +0200 (CEST)
Received: from pps.filterd (m0045851.ppops.net [127.0.0.1])
 by mx0b-0016f401.pphosted.com (8.16.0.43/8.16.0.43) with SMTP id
 15IAaS8O004719 for <dev@dpdk.org>; Fri, 18 Jun 2021 03:39:38 -0700
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;
 h=from : to : cc :
 subject : date : message-id : in-reply-to : references : mime-version :
 content-type; s=pfpt0220; bh=8A8F6AFh4+vs4IX/rX0heRPWnavtjgPQQgqR4j3AUf8=;
 b=EFgnub3Nfzm76o3ZeL4vJa6fqq1oyd09+MvaJtxr0Mj6mBvhtHqALBGQeWxbiun+KzKi
 DgtBLE8xDPZ2Di3cdV3OpjPEmrNQVE8bpOAXSp65GRli+8sVHJYY55qDaqC2dRzMpBds
 pHSz7LItaNn/TKSuqygK7Axl+ehIvwKa/4/RtJWYg+//uSiGkCRlJyfozdyfGfrEjYxf
 R3tNvrR7LgzDgwdEXq+VsoMrCBNnX69hwT+VqFRMozXfx7sb37R+VPrA60/FBKaIuSzY
 QliJg0mjX0KVCCtvRuAcq97wjJYdanGgYhou+vtanHCImv7ESTPdE/mhY1nLf2ORyo4F tA== 
Received: from dc5-exch02.marvell.com ([199.233.59.182])
 by mx0b-0016f401.pphosted.com with ESMTP id 397udry7d7-1
 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)
 for <dev@dpdk.org>; Fri, 18 Jun 2021 03:39:38 -0700
Received: from DC5-EXCH01.marvell.com (10.69.176.38) by DC5-EXCH02.marvell.com
 (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.18;
 Fri, 18 Jun 2021 03:39:36 -0700
Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH01.marvell.com
 (10.69.176.38) with Microsoft SMTP Server id 15.0.1497.18 via Frontend
 Transport; Fri, 18 Jun 2021 03:39:36 -0700
Received: from hyd1588t430.marvell.com (unknown [10.29.52.204])
 by maili.marvell.com (Postfix) with ESMTP id 118735B6951;
 Fri, 18 Jun 2021 03:38:46 -0700 (PDT)
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <dev@dpdk.org>
CC: <jerinj@marvell.com>, <skori@marvell.com>, <skoteshwar@marvell.com>,
 <pbhagavatula@marvell.com>, <kirankumark@marvell.com>,
 <psatheesh@marvell.com>, <asekhar@marvell.com>, <hkalra@marvell.com>
Date: Fri, 18 Jun 2021 16:06:45 +0530
Message-ID: <20210618103741.26526-7-ndabilpuram@marvell.com>
X-Mailer: git-send-email 2.8.4
In-Reply-To: <20210618103741.26526-1-ndabilpuram@marvell.com>
References: <20210306153404.10781-1-ndabilpuram@marvell.com>
 <20210618103741.26526-1-ndabilpuram@marvell.com>
MIME-Version: 1.0
Content-Type: text/plain
X-Proofpoint-ORIG-GUID: ALLyc1gHTR29FBLe5RdHl3A0xeWdQbQu
X-Proofpoint-GUID: ALLyc1gHTR29FBLe5RdHl3A0xeWdQbQu
X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.391, 18.0.790
 definitions=2021-06-18_04:2021-06-18,
 2021-06-18 signatures=0
Subject: [dpdk-dev] [PATCH v3 06/62] common/cnxk: add provision to enable
 RED on RQ
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

From: Satha Rao <skoteshwar@marvell.com>

Send RED pass/drop levels based on rq configurations to kernel.
Fixed the aura and pool shift value calculation.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       |  8 ++++++
 drivers/common/cnxk/roc_nix_queue.c | 50 +++++++++++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_npa.c       |  8 ++++--
 drivers/common/cnxk/roc_npa.h       |  5 ++++
 4 files changed, 69 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 6d9ac10..bb69027 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -161,6 +161,14 @@ struct roc_nix_rq {
 	uint32_t vwqe_max_sz_exp;
 	uint64_t vwqe_wait_tmo;
 	uint64_t vwqe_aura_handle;
+	/* Average LPB aura level drop threshold for RED */
+	uint8_t red_drop;
+	/* Average LPB aura level pass threshold for RED */
+	uint8_t red_pass;
+	/* Average SPB aura level drop threshold for RED */
+	uint8_t spb_red_drop;
+	/* Average SPB aura level pass threshold for RED */
+	uint8_t spb_red_pass;
 	/* End of Input parameters */
 	struct roc_nix *roc_nix;
 };
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 1c62aa2..0604e7a 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -119,6 +119,15 @@ rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 	aq->rq.qint_idx = rq->qid % nix->qints;
 	aq->rq.xqe_drop_ena = 1;
 
+	/* If RED enabled, then fill enable for all cases */
+	if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
+		aq->rq.spb_aura_pass = rq->spb_red_pass;
+		aq->rq.lpb_aura_pass = rq->red_pass;
+
+		aq->rq.spb_aura_drop = rq->spb_red_drop;
+		aq->rq.lpb_aura_drop = rq->red_drop;
+	}
+
 	if (cfg) {
 		if (rq->sso_ena) {
 			/* SSO mode */
@@ -155,6 +164,14 @@ rq_cn9k_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 		aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
 		aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
 		aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
+
+		if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
+			aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
+			aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
+
+			aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
+			aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
+		}
 	}
 
 	return 0;
@@ -244,6 +261,23 @@ rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 	aq->rq.qint_idx = rq->qid % nix->qints;
 	aq->rq.xqe_drop_ena = 1;
 
+	/* If RED enabled, then fill enable for all cases */
+	if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
+		aq->rq.spb_pool_pass = rq->red_pass;
+		aq->rq.spb_aura_pass = rq->red_pass;
+		aq->rq.lpb_pool_pass = rq->red_pass;
+		aq->rq.lpb_aura_pass = rq->red_pass;
+		aq->rq.wqe_pool_pass = rq->red_pass;
+		aq->rq.xqe_pass = rq->red_pass;
+
+		aq->rq.spb_pool_drop = rq->red_drop;
+		aq->rq.spb_aura_drop = rq->red_drop;
+		aq->rq.lpb_pool_drop = rq->red_drop;
+		aq->rq.lpb_aura_drop = rq->red_drop;
+		aq->rq.wqe_pool_drop = rq->red_drop;
+		aq->rq.xqe_drop = rq->red_drop;
+	}
+
 	if (cfg) {
 		if (rq->sso_ena) {
 			/* SSO mode */
@@ -296,6 +330,22 @@ rq_cfg(struct nix *nix, struct roc_nix_rq *rq, bool cfg, bool ena)
 		aq->rq_mask.rq_int_ena = ~aq->rq_mask.rq_int_ena;
 		aq->rq_mask.qint_idx = ~aq->rq_mask.qint_idx;
 		aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
+
+		if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
+			aq->rq_mask.spb_pool_pass = ~aq->rq_mask.spb_pool_pass;
+			aq->rq_mask.spb_aura_pass = ~aq->rq_mask.spb_aura_pass;
+			aq->rq_mask.lpb_pool_pass = ~aq->rq_mask.lpb_pool_pass;
+			aq->rq_mask.lpb_aura_pass = ~aq->rq_mask.lpb_aura_pass;
+			aq->rq_mask.wqe_pool_pass = ~aq->rq_mask.wqe_pool_pass;
+			aq->rq_mask.xqe_pass = ~aq->rq_mask.xqe_pass;
+
+			aq->rq_mask.spb_pool_drop = ~aq->rq_mask.spb_pool_drop;
+			aq->rq_mask.spb_aura_drop = ~aq->rq_mask.spb_aura_drop;
+			aq->rq_mask.lpb_pool_drop = ~aq->rq_mask.lpb_pool_drop;
+			aq->rq_mask.lpb_aura_drop = ~aq->rq_mask.lpb_aura_drop;
+			aq->rq_mask.wqe_pool_drop = ~aq->rq_mask.wqe_pool_drop;
+			aq->rq_mask.xqe_drop = ~aq->rq_mask.xqe_drop;
+		}
 	}
 
 	return 0;
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 5ba6e81..d064d12 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -278,13 +278,15 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 	/* Update aura fields */
 	aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
 	aura->ena = 1;
-	aura->shift = __builtin_clz(block_count) - 8;
+	aura->shift = plt_log2_u32(block_count);
+	aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
 	aura->limit = block_count;
 	aura->pool_caching = 1;
 	aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
 	aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
 	aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
 	aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+	aura->avg_con = ROC_NPA_AVG_CONT;
 	/* Many to one reduction */
 	aura->err_qint_idx = aura_id % lf->qints;
 
@@ -293,13 +295,15 @@ npa_aura_pool_pair_alloc(struct npa_lf *lf, const uint32_t block_size,
 	pool->ena = 1;
 	pool->buf_size = block_size / ROC_ALIGN;
 	pool->stack_max_pages = stack_size;
-	pool->shift = __builtin_clz(block_count) - 8;
+	pool->shift = plt_log2_u32(block_count);
+	pool->shift = pool->shift < 8 ? 0 : pool->shift - 8;
 	pool->ptr_start = 0;
 	pool->ptr_end = ~0;
 	pool->stack_caching = 1;
 	pool->err_int_ena = BIT(NPA_POOL_ERR_INT_OVFLS);
 	pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_RANGE);
 	pool->err_int_ena |= BIT(NPA_POOL_ERR_INT_PERR);
+	pool->avg_con = ROC_NPA_AVG_CONT;
 
 	/* Many to one reduction */
 	pool->err_qint_idx = pool_id % lf->qints;
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index 59d6223..3fc6192 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -12,6 +12,11 @@
 #define ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS 512
 #define ROC_CN10K_NPA_BATCH_FREE_MAX_PTRS  15
 
+/* This value controls how much of the present average resource level is used to
+ * calculate the new resource level.
+ */
+#define ROC_NPA_AVG_CONT 0xE0
+
 /* 16 CASP instructions can be outstanding in CN9k, but we use only 15
  * outstanding CASPs as we run out of registers.
  */
-- 
2.8.4