DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
	"Kiran Kumar K" <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>,
	Harman Kalra <hkalra@marvell.com>
Cc: <dev@dpdk.org>, Ashwin Sekhar T K <asekhar@marvell.com>
Subject: [PATCH 03/33] common/cnxk: use new NPA aq enq mbox for cn20k
Date: Tue, 10 Sep 2024 14:28:39 +0530	[thread overview]
Message-ID: <20240910085909.1514457-4-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20240910085909.1514457-1-ndabilpuram@marvell.com>

From: Ashwin Sekhar T K <asekhar@marvell.com>

A new mbox npa_cn20k_aq_enq_req has been added
for cn20k. Use this mbox for NPA configurations.

Note that the size of these new mbox request and
response remains same when compared to the older
mboxes. Also the new contexts npa_cn20k_aura_s/
npa_cn20k_pool_s which has been added for cn20k are
also same in size as older npa_aura_s/npa_pool_s.
So, we will be able to typecast these structures
into each other for most cases. Only the fields
that have changed in width/positions need to be
taken care of.

Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
 drivers/common/cnxk/hw/npa.h         | 164 ++++++++++++++++++++++++---
 drivers/common/cnxk/roc_mbox.h       |  32 ++++++
 drivers/common/cnxk/roc_nix_debug.c  |   9 +-
 drivers/common/cnxk/roc_nix_fc.c     |  54 ++++++---
 drivers/common/cnxk/roc_nix_tm_ops.c |  15 ++-
 drivers/common/cnxk/roc_npa.c        | 100 ++++++++++++++--
 drivers/common/cnxk/roc_npa_debug.c  |  17 ++-
 7 files changed, 339 insertions(+), 52 deletions(-)

diff --git a/drivers/common/cnxk/hw/npa.h b/drivers/common/cnxk/hw/npa.h
index 891a1b2b5f..4fd1f9a64b 100644
--- a/drivers/common/cnxk/hw/npa.h
+++ b/drivers/common/cnxk/hw/npa.h
@@ -216,10 +216,10 @@ struct npa_aura_op_wdata_s {
 	uint64_t drop : 1;
 };
 
-/* NPA aura context structure */
+/* NPA aura context structure [CN9K, CN10K] */
 struct npa_aura_s {
 	uint64_t pool_addr : 64; /* W0 */
-	uint64_t ena : 1;
+	uint64_t ena : 1; /* W1 */
 	uint64_t rsvd_66_65 : 2;
 	uint64_t pool_caching : 1;
 	uint64_t pool_way_mask : 16;
@@ -233,24 +233,24 @@ struct npa_aura_s {
 	uint64_t shift : 6;
 	uint64_t rsvd_119_118 : 2;
 	uint64_t avg_level : 8;
-	uint64_t count : 36;
+	uint64_t count : 36; /* W2 */
 	uint64_t rsvd_167_164 : 4;
 	uint64_t nix0_bpid : 9;
 	uint64_t rsvd_179_177 : 3;
 	uint64_t nix1_bpid : 9;
 	uint64_t rsvd_191_189 : 3;
-	uint64_t limit : 36;
+	uint64_t limit : 36; /* W3 */
 	uint64_t rsvd_231_228 : 4;
 	uint64_t bp : 8;
 	uint64_t rsvd_242_240 : 3;
-	uint64_t fc_be : 1; /* [CN10K, .) */
+	uint64_t fc_be : 1; /* [CN10K] */
 	uint64_t fc_ena : 1;
 	uint64_t fc_up_crossing : 1;
 	uint64_t fc_stype : 2;
 	uint64_t fc_hyst_bits : 4;
 	uint64_t rsvd_255_252 : 4;
 	uint64_t fc_addr : 64; /* W4 */
-	uint64_t pool_drop : 8;
+	uint64_t pool_drop : 8; /* W5 */
 	uint64_t update_time : 16;
 	uint64_t err_int : 8;
 	uint64_t err_int_ena : 8;
@@ -262,17 +262,17 @@ struct npa_aura_s {
 	uint64_t rsvd_371 : 1;
 	uint64_t err_qint_idx : 7;
 	uint64_t rsvd_383_379 : 5;
-	uint64_t thresh : 36;
+	uint64_t thresh : 36; /* W6 */
 	uint64_t rsvd_423_420 : 4;
-	uint64_t fc_msh_dst : 11; /* [CN10K, .) */
+	uint64_t fc_msh_dst : 11; /* [CN10K] */
 	uint64_t rsvd_447_435 : 13;
 	uint64_t rsvd_511_448 : 64; /* W7 */
 };
 
-/* NPA pool context structure */
+/* NPA pool context structure [CN9K, CN10K] */
 struct npa_pool_s {
 	uint64_t stack_base : 64; /* W0 */
-	uint64_t ena : 1;
+	uint64_t ena : 1; /* W1 */
 	uint64_t nat_align : 1;
 	uint64_t rsvd_67_66 : 2;
 	uint64_t stack_caching : 1;
@@ -282,11 +282,11 @@ struct npa_pool_s {
 	uint64_t rsvd_103_100 : 4;
 	uint64_t buf_size : 11;
 	uint64_t rsvd_127_115 : 13;
-	uint64_t stack_max_pages : 32;
+	uint64_t stack_max_pages : 32; /* W2 */
 	uint64_t stack_pages : 32;
-	uint64_t op_pc : 48;
+	uint64_t op_pc : 48; /* W3 */
 	uint64_t rsvd_255_240 : 16;
-	uint64_t stack_offset : 4;
+	uint64_t stack_offset : 4; /* W4 */
 	uint64_t rsvd_263_260 : 4;
 	uint64_t shift : 6;
 	uint64_t rsvd_271_270 : 2;
@@ -296,14 +296,14 @@ struct npa_pool_s {
 	uint64_t fc_stype : 2;
 	uint64_t fc_hyst_bits : 4;
 	uint64_t fc_up_crossing : 1;
-	uint64_t fc_be : 1; /* [CN10K, .) */
+	uint64_t fc_be : 1; /* [CN10K] */
 	uint64_t rsvd_299_298 : 2;
 	uint64_t update_time : 16;
 	uint64_t rsvd_319_316 : 4;
 	uint64_t fc_addr : 64;	 /* W5 */
 	uint64_t ptr_start : 64; /* W6 */
 	uint64_t ptr_end : 64;	 /* W7 */
-	uint64_t rsvd_535_512 : 24;
+	uint64_t rsvd_535_512 : 24; /* W8 */
 	uint64_t err_int : 8;
 	uint64_t err_int_ena : 8;
 	uint64_t thresh_int : 1;
@@ -314,9 +314,9 @@ struct npa_pool_s {
 	uint64_t rsvd_563 : 1;
 	uint64_t err_qint_idx : 7;
 	uint64_t rsvd_575_571 : 5;
-	uint64_t thresh : 36;
+	uint64_t thresh : 36; /* W9 */
 	uint64_t rsvd_615_612 : 4;
-	uint64_t fc_msh_dst : 11; /* [CN10K, .) */
+	uint64_t fc_msh_dst : 11; /* [CN10K] */
 	uint64_t rsvd_639_627 : 13;
 	uint64_t rsvd_703_640 : 64;  /* W10 */
 	uint64_t rsvd_767_704 : 64;  /* W11 */
@@ -326,6 +326,136 @@ struct npa_pool_s {
 	uint64_t rsvd_1023_960 : 64; /* W15 */
 };
 
+/* NPA aura context structure [CN20K] */
+struct npa_cn20k_aura_s {
+	uint64_t pool_addr : 64; /* W0 */
+	uint64_t ena : 1;   /* W1 */
+	uint64_t rsvd_66_65 : 2;
+	uint64_t pool_caching : 1;
+	uint64_t rsvd_68 : 16;
+	uint64_t avg_con : 9;
+	uint64_t rsvd_93 : 1;
+	uint64_t pool_drop_ena : 1;
+	uint64_t aura_drop_ena : 1;
+	uint64_t bp_ena : 1;
+	uint64_t rsvd_103_97 : 7;
+	uint64_t aura_drop : 8;
+	uint64_t shift : 6;
+	uint64_t rsvd_119_118 : 2;
+	uint64_t avg_level : 8;
+	uint64_t count : 36; /* W2 */
+	uint64_t rsvd_167_164 : 4;
+	uint64_t bpid : 12;
+	uint64_t rsvd_191_180 : 12;
+	uint64_t limit : 36; /* W3 */
+	uint64_t rsvd_231_228 : 4;
+	uint64_t bp : 7;
+	uint64_t rsvd_243_239 : 5;
+	uint64_t fc_ena : 1;
+	uint64_t fc_up_crossing : 1;
+	uint64_t fc_stype : 2;
+	uint64_t fc_hyst_bits : 4;
+	uint64_t rsvd_255_252 : 4;
+	uint64_t fc_addr : 64;  /* W4 */
+	uint64_t pool_drop : 8; /* W5 */
+	uint64_t update_time : 16;
+	uint64_t err_int : 8;
+	uint64_t err_int_ena : 8;
+	uint64_t thresh_int : 1;
+	uint64_t thresh_int_ena : 1;
+	uint64_t thresh_up : 1;
+	uint64_t rsvd_363 : 1;
+	uint64_t thresh_qint_idx : 7;
+	uint64_t rsvd_371 : 1;
+	uint64_t err_qint_idx : 7;
+	uint64_t rsvd_383_379 : 5;
+	uint64_t thresh : 36; /* W6*/
+	uint64_t rsvd_423_420 : 4;
+	uint64_t fc_msh_dst : 11;
+	uint64_t rsvd_438_435 : 4;
+	uint64_t op_dpc_ena : 1;
+	uint64_t op_dpc_set : 6;
+	uint64_t stream_ctx : 1;
+	uint64_t unified_ctx : 1;
+	uint64_t rsvd_511_448 : 64; /* W7 */
+};
+
+/* NPA pool context structure [CN20K] */
+struct npa_cn20k_pool_s {
+	uint64_t stack_base : 64; /* W0 */
+	uint64_t ena : 1; /* W1 */
+	uint64_t nat_align : 1;
+	uint64_t rsvd_67_66 : 2;
+	uint64_t stack_caching : 1;
+	uint64_t rsvd_87_69 : 19;
+	uint64_t buf_offset : 12;
+	uint64_t rsvd_103_100 : 4;
+	uint64_t buf_size : 12;
+	uint64_t rsvd_119_116 : 4;
+	uint64_t ref_cnt_prof : 3;
+	uint64_t rsvd_127_123 : 5;
+	uint64_t stack_max_pages : 32; /* W2 */
+	uint64_t stack_pages : 32;
+	uint64_t bp_0 : 7; /* W3 */
+	uint64_t bp_1 : 7;
+	uint64_t bp_2 : 7;
+	uint64_t bp_3 : 7;
+	uint64_t bp_4 : 7;
+	uint64_t bp_5 : 7;
+	uint64_t bp_6 : 7;
+	uint64_t bp_7 : 7;
+	uint64_t bp_ena_0 : 1;
+	uint64_t bp_ena_1 : 1;
+	uint64_t bp_ena_2 : 1;
+	uint64_t bp_ena_3 : 1;
+	uint64_t bp_ena_4 : 1;
+	uint64_t bp_ena_5 : 1;
+	uint64_t bp_ena_6 : 1;
+	uint64_t bp_ena_7 : 1;
+	uint64_t stack_offset : 4; /* W4 */
+	uint64_t rsvd_263_260 : 4;
+	uint64_t shift : 6;
+	uint64_t rsvd_271_270 : 2;
+	uint64_t avg_level : 8;
+	uint64_t avg_con : 9;
+	uint64_t fc_ena : 1;
+	uint64_t fc_stype : 2;
+	uint64_t fc_hyst_bits : 4;
+	uint64_t fc_up_crossing : 1;
+	uint64_t rsvd_299_297 : 3;
+	uint64_t update_time : 16;
+	uint64_t rsvd_319_316 : 4;
+	uint64_t fc_addr : 64;   /* W5 */
+	uint64_t ptr_start : 64; /* W6 */
+	uint64_t ptr_end : 64;   /* W7 */
+	uint64_t bpid_0 : 12; /* W8 */
+	uint64_t rsvd_535_524 : 12;
+	uint64_t err_int : 8;
+	uint64_t err_int_ena : 8;
+	uint64_t thresh_int : 1;
+	uint64_t thresh_int_ena : 1;
+	uint64_t thresh_up : 1;
+	uint64_t rsvd_555 : 1;
+	uint64_t thresh_qint_idx : 7;
+	uint64_t rsvd_563 : 1;
+	uint64_t err_qint_idx : 7;
+	uint64_t rsvd_575_571 : 5;
+	uint64_t thresh : 36; /* W9 */
+	uint64_t rsvd_615_612 : 4;
+	uint64_t fc_msh_dst : 11;
+	uint64_t rsvd_630_627 : 4;
+	uint64_t op_dpc_ena : 1;
+	uint64_t op_dpc_set : 6;
+	uint64_t stream_ctx : 1;
+	uint64_t rsvd_639 : 1;
+	uint64_t rsvd_703_640 : 64;  /* W10 */
+	uint64_t rsvd_767_704 : 64;  /* W11 */
+	uint64_t rsvd_831_768 : 64;  /* W12 */
+	uint64_t rsvd_895_832 : 64;  /* W13 */
+	uint64_t rsvd_959_896 : 64;  /* W14 */
+	uint64_t rsvd_1023_960 : 64; /* W15 */
+};
+
 /* NPA queue interrupt context hardware structure */
 struct npa_qint_hw_s {
 	uint32_t count : 22;
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index f1a3371ef9..9a9dcbdbda 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -119,6 +119,8 @@ struct mbox_msghdr {
 	M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp)       \
 	M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req,      \
 	  msg_rsp)                                                             \
+	M(NPA_CN20K_AQ_ENQ, 0x404, npa_cn20k_aq_enq, npa_cn20k_aq_enq_req,     \
+	  npa_cn20k_aq_enq_rsp)                                                \
 	/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */                          \
 	M(SSO_LF_ALLOC, 0x600, sso_lf_alloc, sso_lf_alloc_req,                 \
 	  sso_lf_alloc_rsp)                                                    \
@@ -1325,6 +1327,36 @@ struct npa_aq_enq_rsp {
 	};
 };
 
+struct npa_cn20k_aq_enq_req {
+	struct mbox_msghdr hdr;
+	uint32_t __io aura_id;
+	uint8_t __io ctype;
+	uint8_t __io op;
+	union {
+		/* Valid when op == WRITE/INIT and ctype == AURA */
+		__io struct npa_cn20k_aura_s aura;
+		/* Valid when op == WRITE/INIT and ctype == POOL */
+		__io struct npa_cn20k_pool_s pool;
+	};
+	/* Mask data when op == WRITE (1=write, 0=don't write) */
+	union {
+		/* Valid when op == WRITE and ctype == AURA */
+		__io struct npa_cn20k_aura_s aura_mask;
+		/* Valid when op == WRITE and ctype == POOL */
+		__io struct npa_cn20k_pool_s pool_mask;
+	};
+};
+
+struct npa_cn20k_aq_enq_rsp {
+	struct mbox_msghdr hdr;
+	union {
+		/* Valid when op == READ and ctype == AURA */
+		__io struct npa_cn20k_aura_s aura;
+		/* Valid when op == READ and ctype == POOL */
+		__io struct npa_cn20k_pool_s pool;
+	};
+};
+
 /* Disable all contexts of type 'ctype' */
 struct hwctx_disable_req {
 	struct mbox_msghdr hdr;
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index 26546f9297..2e91470c09 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -690,6 +690,7 @@ int
 roc_nix_queues_ctx_dump(struct roc_nix *roc_nix, FILE *file)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct npa_cn20k_aq_enq_req *npa_aq_cn20k;
 	int rc = -1, q, rq = nix->nb_rx_queues;
 	struct npa_aq_enq_rsp *npa_rsp;
 	struct npa_aq_enq_req *npa_aq;
@@ -772,8 +773,12 @@ roc_nix_queues_ctx_dump(struct roc_nix *roc_nix, FILE *file)
 			continue;
 		}
 
-		/* Dump SQB Aura minimal info */
-		npa_aq = mbox_alloc_msg_npa_aq_enq(mbox_get(npa_lf->mbox));
+		if (roc_model_is_cn20k()) {
+			npa_aq_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox_get(npa_lf->mbox));
+			npa_aq = (struct npa_aq_enq_req *)npa_aq_cn20k; /* Common fields */
+		} else {
+			npa_aq = mbox_alloc_msg_npa_aq_enq(mbox_get(npa_lf->mbox));
+		}
 		if (npa_aq == NULL) {
 			rc = -ENOSPC;
 			mbox_put(npa_lf->mbox);
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 12bfb9816b..2f72e67993 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -158,6 +158,8 @@ static int
 nix_fc_rq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 {
 	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct npa_cn20k_aq_enq_req *npa_req_cn20k;
+	struct npa_cn20k_aq_enq_rsp *npa_rsp_cn20k;
 	struct dev *dev = &nix->dev;
 	struct mbox *mbox = mbox_get(dev->mbox);
 	struct nix_aq_enq_rsp *rsp;
@@ -195,24 +197,44 @@ nix_fc_rq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
 	if (rc)
 		goto exit;
 
-	npa_req = mbox_alloc_msg_npa_aq_enq(mbox);
-	if (!npa_req) {
-		rc = -ENOSPC;
-		goto exit;
+	if (roc_model_is_cn20k()) {
+		npa_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+		if (!npa_req_cn20k) {
+			rc = -ENOSPC;
+			goto exit;
+		}
+
+		npa_req_cn20k->aura_id = rsp->rq.lpb_aura;
+		npa_req_cn20k->ctype = NPA_AQ_CTYPE_AURA;
+		npa_req_cn20k->op = NPA_AQ_INSTOP_READ;
+
+		rc = mbox_process_msg(mbox, (void *)&npa_rsp_cn20k);
+		if (rc)
+			goto exit;
+
+		fc_cfg->cq_cfg.cq_drop = npa_rsp_cn20k->aura.bp;
+		fc_cfg->cq_cfg.enable = npa_rsp_cn20k->aura.bp_ena;
+		fc_cfg->type = ROC_NIX_FC_RQ_CFG;
+	} else {
+		npa_req = mbox_alloc_msg_npa_aq_enq(mbox);
+		if (!npa_req) {
+			rc = -ENOSPC;
+			goto exit;
+		}
+
+		npa_req->aura_id = rsp->rq.lpb_aura;
+		npa_req->ctype = NPA_AQ_CTYPE_AURA;
+		npa_req->op = NPA_AQ_INSTOP_READ;
+
+		rc = mbox_process_msg(mbox, (void *)&npa_rsp);
+		if (rc)
+			goto exit;
+
+		fc_cfg->cq_cfg.cq_drop = npa_rsp->aura.bp;
+		fc_cfg->cq_cfg.enable = npa_rsp->aura.bp_ena;
+		fc_cfg->type = ROC_NIX_FC_RQ_CFG;
 	}
 
-	npa_req->aura_id = rsp->rq.lpb_aura;
-	npa_req->ctype = NPA_AQ_CTYPE_AURA;
-	npa_req->op = NPA_AQ_INSTOP_READ;
-
-	rc = mbox_process_msg(mbox, (void *)&npa_rsp);
-	if (rc)
-		goto exit;
-
-	fc_cfg->cq_cfg.cq_drop = npa_rsp->aura.bp;
-	fc_cfg->cq_cfg.enable = npa_rsp->aura.bp_ena;
-	fc_cfg->type = ROC_NIX_FC_RQ_CFG;
-
 exit:
 	mbox_put(mbox);
 	return rc;
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 9f3870a311..8144675f89 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -8,6 +8,7 @@
 int
 roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
 {
+	struct npa_cn20k_aq_enq_req *req_cn20k;
 	struct npa_aq_enq_req *req;
 	struct npa_aq_enq_rsp *rsp;
 	uint64_t aura_handle;
@@ -25,7 +26,12 @@ roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
 	mbox = mbox_get(lf->mbox);
 	/* Set/clear sqb aura fc_ena */
 	aura_handle = sq->aura_handle;
-	req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (roc_model_is_cn20k()) {
+		req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+		req = (struct npa_aq_enq_req *)req_cn20k;
+	} else {
+		req = mbox_alloc_msg_npa_aq_enq(mbox);
+	}
 	if (req == NULL)
 		goto exit;
 
@@ -52,7 +58,12 @@ roc_nix_tm_sq_aura_fc(struct roc_nix_sq *sq, bool enable)
 
 	/* Read back npa aura ctx */
 	if (enable) {
-		req = mbox_alloc_msg_npa_aq_enq(mbox);
+		if (roc_model_is_cn20k()) {
+			req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+			req = (struct npa_aq_enq_req *)req_cn20k;
+		} else {
+			req = mbox_alloc_msg_npa_aq_enq(mbox);
+		}
 		if (req == NULL) {
 			rc = -ENOSPC;
 			goto exit;
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 6c14c49901..934d7361a9 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -76,6 +76,7 @@ static int
 npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura,
 		   struct npa_pool_s *pool)
 {
+	struct npa_cn20k_aq_enq_req *aura_init_req_cn20k, *pool_init_req_cn20k;
 	struct npa_aq_enq_req *aura_init_req, *pool_init_req;
 	struct npa_aq_enq_rsp *aura_init_rsp, *pool_init_rsp;
 	struct mbox_dev *mdev = &m_box->dev[0];
@@ -83,7 +84,12 @@ npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura
 	struct mbox *mbox;
 
 	mbox = mbox_get(m_box);
-	aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (roc_model_is_cn20k()) {
+		aura_init_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+		aura_init_req = (struct npa_aq_enq_req *)aura_init_req_cn20k;
+	} else {
+		aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	}
 	if (aura_init_req == NULL)
 		goto exit;
 	aura_init_req->aura_id = aura_id;
@@ -91,6 +97,12 @@ npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura
 	aura_init_req->op = NPA_AQ_INSTOP_INIT;
 	mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
 
+	if (roc_model_is_cn20k()) {
+		pool_init_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+		pool_init_req = (struct npa_aq_enq_req *)pool_init_req_cn20k;
+	} else {
+		pool_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	}
 	pool_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
 	if (pool_init_req == NULL)
 		goto exit;
@@ -121,13 +133,19 @@ npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura
 static int
 npa_aura_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura)
 {
+	struct npa_cn20k_aq_enq_req *aura_init_req_cn20k;
 	struct npa_aq_enq_req *aura_init_req;
 	struct npa_aq_enq_rsp *aura_init_rsp;
 	struct mbox *mbox;
 	int rc = -ENOSPC;
 
 	mbox = mbox_get(m_box);
-	aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (roc_model_is_cn20k()) {
+		aura_init_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+		aura_init_req = (struct npa_aq_enq_req *)aura_init_req_cn20k;
+	} else {
+		aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	}
 	if (aura_init_req == NULL)
 		goto exit;
 	aura_init_req->aura_id = aura_id;
@@ -151,6 +169,7 @@ npa_aura_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura)
 static int
 npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
 {
+	struct npa_cn20k_aq_enq_req *aura_req_cn20k, *pool_req_cn20k;
 	struct npa_aq_enq_req *aura_req, *pool_req;
 	struct npa_aq_enq_rsp *aura_rsp, *pool_rsp;
 	struct mbox_dev *mdev = &m_box->dev[0];
@@ -168,7 +187,12 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
 	} while (ptr);
 
 	mbox = mbox_get(m_box);
-	pool_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (roc_model_is_cn20k()) {
+		pool_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+		pool_req = (struct npa_aq_enq_req *)pool_req_cn20k;
+	} else {
+		pool_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	}
 	if (pool_req == NULL)
 		goto exit;
 	pool_req->aura_id = aura_id;
@@ -177,7 +201,12 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
 	pool_req->pool.ena = 0;
 	pool_req->pool_mask.ena = ~pool_req->pool_mask.ena;
 
-	aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (roc_model_is_cn20k()) {
+		aura_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+		aura_req = (struct npa_aq_enq_req *)aura_req_cn20k;
+	} else {
+		aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	}
 	if (aura_req == NULL)
 		goto exit;
 	aura_req->aura_id = aura_id;
@@ -185,8 +214,18 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
 	aura_req->op = NPA_AQ_INSTOP_WRITE;
 	aura_req->aura.ena = 0;
 	aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
-	aura_req->aura.bp_ena = 0;
-	aura_req->aura_mask.bp_ena = ~aura_req->aura_mask.bp_ena;
+	if (roc_model_is_cn20k()) {
+		__io struct npa_cn20k_aura_s *aura_cn20k, *aura_mask_cn20k;
+
+		/* The bit positions/width of bp_ena has changed in cn20k */
+		aura_cn20k = (__io struct npa_cn20k_aura_s *)&aura_req->aura;
+		aura_cn20k->bp_ena = 0;
+		aura_mask_cn20k = (__io struct npa_cn20k_aura_s *)&aura_req->aura_mask;
+		aura_mask_cn20k->bp_ena = ~aura_mask_cn20k->bp_ena;
+	} else {
+		aura_req->aura.bp_ena = 0;
+		aura_req->aura_mask.bp_ena = ~aura_req->aura_mask.bp_ena;
+	}
 
 	rc = mbox_process(mbox);
 	if (rc < 0)
@@ -204,6 +243,12 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
 		goto exit;
 	}
 
+	if (roc_model_is_cn20k()) {
+		/* In cn20k, NPA does not use NDC */
+		rc = 0;
+		goto exit;
+	}
+
 	/* Sync NDC-NPA for LF */
 	ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
 	if (ndc_req == NULL) {
@@ -226,6 +271,7 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
 static int
 npa_aura_fini(struct mbox *m_box, uint32_t aura_id)
 {
+	struct npa_cn20k_aq_enq_req *aura_req_cn20k;
 	struct npa_aq_enq_req *aura_req;
 	struct npa_aq_enq_rsp *aura_rsp;
 	struct ndc_sync_op *ndc_req;
@@ -236,7 +282,12 @@ npa_aura_fini(struct mbox *m_box, uint32_t aura_id)
 	plt_delay_us(10);
 
 	mbox = mbox_get(m_box);
-	aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (roc_model_is_cn20k()) {
+		aura_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+		aura_req = (struct npa_aq_enq_req *)aura_req_cn20k;
+	} else {
+		aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	}
 	if (aura_req == NULL)
 		goto exit;
 	aura_req->aura_id = aura_id;
@@ -254,6 +305,12 @@ npa_aura_fini(struct mbox *m_box, uint32_t aura_id)
 		goto exit;
 	}
 
+	if (roc_model_is_cn20k()) {
+		/* In cn20k, NPA does not use NDC */
+		rc = 0;
+		goto exit;
+	}
+
 	/* Sync NDC-NPA for LF */
 	ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
 	if (ndc_req == NULL) {
@@ -335,6 +392,7 @@ roc_npa_pool_op_pc_reset(uint64_t aura_handle)
 int
 roc_npa_aura_drop_set(uint64_t aura_handle, uint64_t limit, bool ena)
 {
+	struct npa_cn20k_aq_enq_req *aura_req_cn20k;
 	struct npa_aq_enq_req *aura_req;
 	struct npa_lf *lf;
 	struct mbox *mbox;
@@ -344,7 +402,12 @@ roc_npa_aura_drop_set(uint64_t aura_handle, uint64_t limit, bool ena)
 	if (lf == NULL)
 		return NPA_ERR_DEVICE_NOT_BOUNDED;
 	mbox = mbox_get(lf->mbox);
-	aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (roc_model_is_cn20k()) {
+		aura_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+		aura_req = (struct npa_aq_enq_req *)aura_req_cn20k;
+	} else {
+		aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	}
 	if (aura_req == NULL) {
 		rc = -ENOMEM;
 		goto exit;
@@ -723,6 +786,7 @@ roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
 int
 roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit)
 {
+	struct npa_cn20k_aq_enq_req *aura_req_cn20k;
 	struct npa_aq_enq_req *aura_req;
 	struct npa_lf *lf;
 	struct mbox *mbox;
@@ -733,7 +797,12 @@ roc_npa_aura_limit_modify(uint64_t aura_handle, uint16_t aura_limit)
 		return NPA_ERR_DEVICE_NOT_BOUNDED;
 
 	mbox = mbox_get(lf->mbox);
-	aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (roc_model_is_cn20k()) {
+		aura_req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+		aura_req = (struct npa_aq_enq_req *)aura_req_cn20k;
+	} else {
+		aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+	}
 	if (aura_req == NULL) {
 		rc = -ENOMEM;
 		goto exit;
@@ -834,12 +903,13 @@ int
 roc_npa_pool_range_update_check(uint64_t aura_handle)
 {
 	uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
-	struct npa_lf *lf;
-	struct npa_aura_lim *lim;
+	struct npa_cn20k_aq_enq_req *req_cn20k;
 	__io struct npa_pool_s *pool;
 	struct npa_aq_enq_req *req;
 	struct npa_aq_enq_rsp *rsp;
+	struct npa_aura_lim *lim;
 	struct mbox *mbox;
+	struct npa_lf *lf;
 	int rc;
 
 	lf = idev_npa_obj_get();
@@ -849,7 +919,12 @@ roc_npa_pool_range_update_check(uint64_t aura_handle)
 	lim = lf->aura_lim;
 
 	mbox = mbox_get(lf->mbox);
-	req = mbox_alloc_msg_npa_aq_enq(mbox);
+	if (roc_model_is_cn20k()) {
+		req_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+		req = (struct npa_aq_enq_req *)req_cn20k;
+	} else {
+		req = mbox_alloc_msg_npa_aq_enq(mbox);
+	}
 	if (req == NULL) {
 		rc = -ENOSPC;
 		goto exit;
@@ -903,6 +978,7 @@ int
 roc_npa_aura_bp_configure(uint64_t aura_handle, uint16_t bpid, uint8_t bp_intf, uint8_t bp_thresh,
 			  bool enable)
 {
+	/* TODO: Add support for CN20K */
 	uint32_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
 	struct npa_lf *lf = idev_npa_obj_get();
 	struct npa_aq_enq_req *req;
diff --git a/drivers/common/cnxk/roc_npa_debug.c b/drivers/common/cnxk/roc_npa_debug.c
index 173d32cd9b..9a16f481a8 100644
--- a/drivers/common/cnxk/roc_npa_debug.c
+++ b/drivers/common/cnxk/roc_npa_debug.c
@@ -89,8 +89,9 @@ npa_aura_dump(__io struct npa_aura_s *aura)
 int
 roc_npa_ctx_dump(void)
 {
-	struct npa_aq_enq_req *aq;
+	struct npa_cn20k_aq_enq_req *aq_cn20k;
 	struct npa_aq_enq_rsp *rsp;
+	struct npa_aq_enq_req *aq;
 	struct mbox *mbox;
 	struct npa_lf *lf;
 	uint32_t q;
@@ -106,7 +107,12 @@ roc_npa_ctx_dump(void)
 		if (plt_bitmap_get(lf->npa_bmp, q))
 			continue;
 
-		aq = mbox_alloc_msg_npa_aq_enq(mbox);
+		if (roc_model_is_cn20k()) {
+			aq_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+			aq = (struct npa_aq_enq_req *)aq_cn20k;
+		} else {
+			aq = mbox_alloc_msg_npa_aq_enq(mbox);
+		}
 		if (aq == NULL) {
 			rc = -ENOSPC;
 			goto exit;
@@ -129,7 +135,12 @@ roc_npa_ctx_dump(void)
 		if (plt_bitmap_get(lf->npa_bmp, q))
 			continue;
 
-		aq = mbox_alloc_msg_npa_aq_enq(mbox);
+		if (roc_model_is_cn20k()) {
+			aq_cn20k = mbox_alloc_msg_npa_cn20k_aq_enq(mbox);
+			aq = (struct npa_aq_enq_req *)aq_cn20k;
+		} else {
+			aq = mbox_alloc_msg_npa_aq_enq(mbox);
+		}
 		if (aq == NULL) {
 			rc = -ENOSPC;
 			goto exit;
-- 
2.34.1


  parent reply	other threads:[~2024-09-10  8:59 UTC|newest]

Thread overview: 75+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-10  8:58 [PATCH 00/33] add Marvell cn20k SOC support for mempool and net Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 01/33] mempool/cnxk: add cn20k PCI device ids Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 02/33] common/cnxk: accommodate change in aura field width Nithin Dabilpuram
2024-09-10  8:58 ` Nithin Dabilpuram [this message]
2024-09-10  8:58 ` [PATCH 04/33] mempool/cnxk: initialize mempool ops for cn20k Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 05/33] net/cnxk: added telemetry support do dump SA information Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 06/33] net/cnxk: handle timestamp correctly for VF Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 07/33] net/cnxk: update Rx offloads to handle timestamp Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 08/33] event/cnxk: handle timestamp for event mode Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 09/33] net/cnxk: update mbuf and rearm data for Rx inject packets Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 10/33] common/cnxk: remove restriction to clear RPM stats Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 11/33] common/cnxk: allow MAC address set/add with active VFs Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 12/33] net/cnxk: move PMD function defines to common code Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 13/33] common/cnxk: add cn20k NIX register definitions Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 14/33] common/cnxk: support NIX queue config for cn20k Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 15/33] common/cnxk: support bandwidth profile " Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 16/33] common/cnxk: support NIX debug " Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 17/33] common/cnxk: add RSS support " Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 18/33] net/cnxk: add cn20k base control path support Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 19/33] net/cnxk: support Rx function select for cn20k Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 20/33] net/cnxk: support Tx " Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 21/33] net/cnxk: support Rx burst scalar " Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 22/33] net/cnxk: support Rx burst vector " Nithin Dabilpuram
2024-09-10  8:58 ` [PATCH 23/33] net/cnxk: support Tx burst scalar " Nithin Dabilpuram
2024-09-10  8:59 ` [PATCH 24/33] net/cnxk: support Tx multi-seg in cn20k Nithin Dabilpuram
2024-09-10  8:59 ` [PATCH 25/33] net/cnxk: support Tx burst vector for cn20k Nithin Dabilpuram
2024-09-10  8:59 ` [PATCH 26/33] net/cnxk: support Tx multi-seg in " Nithin Dabilpuram
2024-09-10  8:59 ` [PATCH 27/33] common/cnxk: add flush wait after write of inline ctx Nithin Dabilpuram
2024-09-10  8:59 ` [PATCH 28/33] common/cnxk: fix CPT HW word size for outbound SA Nithin Dabilpuram
2024-09-10  8:59 ` [PATCH 29/33] net/cnxk: add PMD APIs for IPsec SA base and flush Nithin Dabilpuram
2024-09-10  8:59 ` [PATCH 30/33] net/cnxk: add PMD APIs to submit CPT instruction Nithin Dabilpuram
2024-09-10  8:59 ` [PATCH 31/33] net/cnxk: add PMD API to retrieve CPT queue statistics Nithin Dabilpuram
2024-09-10  8:59 ` [PATCH 32/33] net/cnxk: add option to enable custom inbound sa usage Nithin Dabilpuram
2024-09-10  8:59 ` [PATCH 33/33] net/cnxk: add PMD API to retrieve the model string Nithin Dabilpuram
2024-09-23 15:44 ` [PATCH 00/33] add Marvell cn20k SOC support for mempool and net Jerin Jacob
2024-09-26 16:01 ` [PATCH v2 00/18] " Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 01/18] mempool/cnxk: add cn20k PCI device ids Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 02/18] common/cnxk: accommodate change in aura field width Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 03/18] common/cnxk: use new NPA aq enq mbox for cn20k Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 04/18] mempool/cnxk: initialize mempool ops " Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 05/18] common/cnxk: add cn20k NIX register definitions Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 06/18] common/cnxk: support NIX queue config for cn20k Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 07/18] common/cnxk: support bandwidth profile " Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 08/18] common/cnxk: support NIX debug " Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 09/18] common/cnxk: add RSS support " Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 10/18] net/cnxk: add cn20k base control path support Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 11/18] net/cnxk: support Rx function select for cn20k Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 12/18] net/cnxk: support Tx " Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 13/18] net/cnxk: support Rx burst scalar " Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 14/18] net/cnxk: support Rx burst vector " Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 15/18] net/cnxk: support Tx burst scalar " Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 16/18] net/cnxk: support Tx multi-seg in cn20k Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 17/18] net/cnxk: support Tx burst vector for cn20k Nithin Dabilpuram
2024-09-26 16:01   ` [PATCH v2 18/18] net/cnxk: support Tx multi-seg in " Nithin Dabilpuram
2024-10-01 11:01   ` [PATCH v2 00/18] add Marvell cn20k SOC support for mempool and net Jerin Jacob
2024-10-01 12:40 ` [PATCH v3 " Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 01/18] mempool/cnxk: add cn20k PCI device ids Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 02/18] common/cnxk: accommodate change in aura field width Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 03/18] common/cnxk: use new NPA aq enq mbox for cn20k Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 04/18] mempool/cnxk: initialize mempool ops " Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 05/18] common/cnxk: add cn20k NIX register definitions Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 06/18] common/cnxk: support NIX queue config for cn20k Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 07/18] common/cnxk: support bandwidth profile " Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 08/18] common/cnxk: support NIX debug " Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 09/18] common/cnxk: add RSS support " Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 10/18] net/cnxk: add cn20k base control path support Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 11/18] net/cnxk: support Rx function select for cn20k Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 12/18] net/cnxk: support Tx " Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 13/18] net/cnxk: support Rx burst scalar " Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 14/18] net/cnxk: support Rx burst vector " Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 15/18] net/cnxk: support Tx burst scalar " Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 16/18] net/cnxk: support Tx multi-seg in cn20k Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 17/18] net/cnxk: support Tx burst vector for cn20k Nithin Dabilpuram
2024-10-01 12:40   ` [PATCH v3 18/18] net/cnxk: support Tx multi-seg in " Nithin Dabilpuram
2024-10-03 15:52   ` [PATCH v3 00/18] add Marvell cn20k SOC support for mempool and net Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240910085909.1514457-4-ndabilpuram@marvell.com \
    --to=ndabilpuram@marvell.com \
    --cc=asekhar@marvell.com \
    --cc=dev@dpdk.org \
    --cc=hkalra@marvell.com \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).