DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines
@ 2024-02-08  8:59 Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 02/13] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
                   ` (13 more replies)
  0 siblings, 14 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Since now Inline IPsec in cn9k is using same opcode as LA,
remove the definitions of fast path opcode.

Also fix devarg handling for ipsec_out_max_sa to allow 32-bit.

Fixes: fe5846bcc076 ("net/cnxk: add devargs for min-max SPI")
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c    | 229 -------------------------
 drivers/common/cnxk/cnxk_security.h    |  12 --
 drivers/common/cnxk/roc_ie_on.h        |  60 -------
 drivers/common/cnxk/roc_nix_inl.h      |  50 +-----
 drivers/common/cnxk/version.map        |   4 -
 drivers/net/cnxk/cnxk_ethdev_devargs.c |   2 +-
 6 files changed, 3 insertions(+), 354 deletions(-)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index a8c3ba90cd..40685d0912 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -618,235 +618,6 @@ cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa)
 	return !!sa->w2.s.valid;
 }
 
-static inline int
-ipsec_xfrm_verify(struct rte_security_ipsec_xform *ipsec_xfrm,
-		  struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	if (crypto_xfrm->next == NULL)
-		return -EINVAL;
-
-	if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
-		    crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
-			return -EINVAL;
-	} else {
-		if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
-		    crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_AUTH)
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int
-onf_ipsec_sa_common_param_fill(struct roc_ie_onf_sa_ctl *ctl, uint8_t *salt,
-			       uint8_t *cipher_key, uint8_t *hmac_opad_ipad,
-			       struct rte_security_ipsec_xform *ipsec_xfrm,
-			       struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
-	int rc, length, auth_key_len;
-	const uint8_t *key = NULL;
-	uint8_t ccm_flag = 0;
-
-	/* Set direction */
-	switch (ipsec_xfrm->direction) {
-	case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
-		ctl->direction = ROC_IE_SA_DIR_INBOUND;
-		auth_xfrm = crypto_xfrm;
-		cipher_xfrm = crypto_xfrm->next;
-		break;
-	case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
-		ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
-		cipher_xfrm = crypto_xfrm;
-		auth_xfrm = crypto_xfrm->next;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* Set protocol - ESP vs AH */
-	switch (ipsec_xfrm->proto) {
-	case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
-		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
-		break;
-	case RTE_SECURITY_IPSEC_SA_PROTO_AH:
-		return -ENOTSUP;
-	default:
-		return -EINVAL;
-	}
-
-	/* Set mode - transport vs tunnel */
-	switch (ipsec_xfrm->mode) {
-	case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
-		ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
-		break;
-	case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
-		ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* Set encryption algorithm */
-	if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-		length = crypto_xfrm->aead.key.length;
-
-		switch (crypto_xfrm->aead.algo) {
-		case RTE_CRYPTO_AEAD_AES_GCM:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
-			memcpy(salt, &ipsec_xfrm->salt, 4);
-			key = crypto_xfrm->aead.key.data;
-			break;
-		case RTE_CRYPTO_AEAD_AES_CCM:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CCM;
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
-			ccm_flag = 0x07 & ~ROC_CPT_AES_CCM_CTR_LEN;
-			*salt = ccm_flag;
-			memcpy(PLT_PTR_ADD(salt, 1), &ipsec_xfrm->salt, 3);
-			key = crypto_xfrm->aead.key.data;
-			break;
-		default:
-			return -ENOTSUP;
-		}
-
-	} else {
-		rc = ipsec_xfrm_verify(ipsec_xfrm, crypto_xfrm);
-		if (rc)
-			return rc;
-
-		switch (cipher_xfrm->cipher.algo) {
-		case RTE_CRYPTO_CIPHER_AES_CBC:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
-			break;
-		case RTE_CRYPTO_CIPHER_AES_CTR:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
-			break;
-		default:
-			return -ENOTSUP;
-		}
-
-		switch (auth_xfrm->auth.algo) {
-		case RTE_CRYPTO_AUTH_SHA1_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
-			break;
-		default:
-			return -ENOTSUP;
-		}
-		auth_key_len = auth_xfrm->auth.key.length;
-		if (auth_key_len < 20 || auth_key_len > 64)
-			return -ENOTSUP;
-
-		key = cipher_xfrm->cipher.key.data;
-		length = cipher_xfrm->cipher.key.length;
-
-		ipsec_hmac_opad_ipad_gen(auth_xfrm, hmac_opad_ipad);
-	}
-
-	switch (length) {
-	case ROC_CPT_AES128_KEY_LEN:
-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
-		break;
-	case ROC_CPT_AES192_KEY_LEN:
-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
-		break;
-	case ROC_CPT_AES256_KEY_LEN:
-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	memcpy(cipher_key, key, length);
-
-	if (ipsec_xfrm->options.esn)
-		ctl->esn_en = 1;
-
-	ctl->spi = rte_cpu_to_be_32(ipsec_xfrm->spi);
-	return 0;
-}
-
-int
-cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa,
-			   struct rte_security_ipsec_xform *ipsec_xfrm,
-			   struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
-	int rc;
-
-	rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
-					    sa->hmac_key, ipsec_xfrm,
-					    crypto_xfrm);
-	if (rc)
-		return rc;
-
-	rte_wmb();
-
-	/* Enable SA */
-	ctl->valid = 1;
-	return 0;
-}
-
-int
-cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
-			    struct rte_security_ipsec_xform *ipsec_xfrm,
-			    struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
-	struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
-	int rc;
-
-	/* Fill common params */
-	rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
-					    sa->hmac_key, ipsec_xfrm,
-					    crypto_xfrm);
-	if (rc)
-		return rc;
-
-	if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
-		goto skip_tunnel_info;
-
-	/* Tunnel header info */
-	switch (tunnel->type) {
-	case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
-		memcpy(&sa->ip_src, &tunnel->ipv4.src_ip,
-		       sizeof(struct in_addr));
-		memcpy(&sa->ip_dst, &tunnel->ipv4.dst_ip,
-		       sizeof(struct in_addr));
-		break;
-	case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
-		return -ENOTSUP;
-	default:
-		return -EINVAL;
-	}
-
-	/* Update udp encap ports */
-	if (ipsec_xfrm->options.udp_encap == 1) {
-		sa->udp_src = 4500;
-		sa->udp_dst = 4500;
-	}
-
-skip_tunnel_info:
-	rte_wmb();
-
-	/* Enable SA */
-	ctl->valid = 1;
-	return 0;
-}
-
-bool
-cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa)
-{
-	return !!sa->ctl.valid;
-}
-
-bool
-cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa)
-{
-	return !!sa->ctl.valid;
-}
-
 uint8_t
 cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
 		     enum rte_crypto_auth_algorithm a_algo,
diff --git a/drivers/common/cnxk/cnxk_security.h b/drivers/common/cnxk/cnxk_security.h
index 2277ce9144..72628ef3b8 100644
--- a/drivers/common/cnxk/cnxk_security.h
+++ b/drivers/common/cnxk/cnxk_security.h
@@ -48,18 +48,6 @@ cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
 bool __roc_api cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa);
 bool __roc_api cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa);
 
-/* [CN9K, CN10K) */
-int __roc_api
-cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa,
-			   struct rte_security_ipsec_xform *ipsec_xfrm,
-			   struct rte_crypto_sym_xform *crypto_xfrm);
-int __roc_api
-cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
-			    struct rte_security_ipsec_xform *ipsec_xfrm,
-			    struct rte_crypto_sym_xform *crypto_xfrm);
-bool __roc_api cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa);
-bool __roc_api cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa);
-
 /* [CN9K] */
 int __roc_api
 cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
diff --git a/drivers/common/cnxk/roc_ie_on.h b/drivers/common/cnxk/roc_ie_on.h
index 9933ffa148..11c995e9d1 100644
--- a/drivers/common/cnxk/roc_ie_on.h
+++ b/drivers/common/cnxk/roc_ie_on.h
@@ -269,66 +269,6 @@ struct roc_ie_on_inb_sa {
 #define ROC_IE_ON_UCC_L2_HDR_INFO_ERR	  0xCF
 #define ROC_IE_ON_UCC_L2_HDR_LEN_ERR	  0xE0
 
-struct roc_ie_onf_sa_ctl {
-	uint32_t spi;
-	uint64_t exp_proto_inter_frag : 8;
-	uint64_t rsvd_41_40 : 2;
-	/* Disable SPI, SEQ data in RPTR for Inbound inline */
-	uint64_t spi_seq_dis : 1;
-	uint64_t esn_en : 1;
-	uint64_t rsvd_44_45 : 2;
-	uint64_t encap_type : 2;
-	uint64_t enc_type : 3;
-	uint64_t rsvd_48 : 1;
-	uint64_t auth_type : 4;
-	uint64_t valid : 1;
-	uint64_t direction : 1;
-	uint64_t outer_ip_ver : 1;
-	uint64_t inner_ip_ver : 1;
-	uint64_t ipsec_mode : 1;
-	uint64_t ipsec_proto : 1;
-	uint64_t aes_key_len : 2;
-};
-
-struct roc_onf_ipsec_outb_sa {
-	/* w0 */
-	struct roc_ie_onf_sa_ctl ctl;
-
-	/* w1 */
-	uint8_t nonce[4];
-	uint16_t udp_src;
-	uint16_t udp_dst;
-
-	/* w2 */
-	uint32_t ip_src;
-	uint32_t ip_dst;
-
-	/* w3-w6 */
-	uint8_t cipher_key[32];
-
-	/* w7-w12 */
-	uint8_t hmac_key[48];
-};
-
-struct roc_onf_ipsec_inb_sa {
-	/* w0 */
-	struct roc_ie_onf_sa_ctl ctl;
-
-	/* w1 */
-	uint8_t nonce[4]; /* Only for AES-GCM */
-	uint32_t unused;
-
-	/* w2 */
-	uint32_t esn_hi;
-	uint32_t esn_low;
-
-	/* w3-w6 */
-	uint8_t cipher_key[32];
-
-	/* w7-w12 */
-	uint8_t hmac_key[48];
-};
-
 #define ROC_ONF_IPSEC_INB_MAX_L2_SZ	  32UL
 #define ROC_ONF_IPSEC_OUTB_MAX_L2_SZ	  30UL
 #define ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ (ROC_ONF_IPSEC_OUTB_MAX_L2_SZ + 2)
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index ab1e9c0f98..f5ce26f03f 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -4,24 +4,6 @@
 #ifndef _ROC_NIX_INL_H_
 #define _ROC_NIX_INL_H_
 
-/* ONF INB HW area */
-#define ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ                                        \
-	PLT_ALIGN(sizeof(struct roc_onf_ipsec_inb_sa), ROC_ALIGN)
-/* ONF INB SW reserved area */
-#define ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD 384
-#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ                                        \
-	(ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD)
-#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2 9
-
-/* ONF OUTB HW area */
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ                                       \
-	PLT_ALIGN(sizeof(struct roc_onf_ipsec_outb_sa), ROC_ALIGN)
-/* ONF OUTB SW reserved area */
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD 128
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ                                       \
-	(ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD)
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2 8
-
 /* ON INB HW area */
 #define ROC_NIX_INL_ON_IPSEC_INB_HW_SZ                                         \
 	PLT_ALIGN(sizeof(struct roc_ie_on_inb_sa), ROC_ALIGN)
@@ -31,10 +13,10 @@
 	(ROC_NIX_INL_ON_IPSEC_INB_HW_SZ + ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD)
 #define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2 10
 
-/* ONF OUTB HW area */
+/* ON OUTB HW area */
 #define ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ                                        \
 	PLT_ALIGN(sizeof(struct roc_ie_on_outb_sa), ROC_ALIGN)
-/* ONF OUTB SW reserved area */
+/* ON OUTB SW reserved area */
 #define ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD 256
 #define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ                                        \
 	(ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD)
@@ -86,34 +68,6 @@ roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(void *sa)
 	return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ);
 }
 
-static inline struct roc_onf_ipsec_inb_sa *
-roc_nix_inl_onf_ipsec_inb_sa(uintptr_t base, uint64_t idx)
-{
-	uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2;
-
-	return PLT_PTR_ADD(base, off);
-}
-
-static inline struct roc_onf_ipsec_outb_sa *
-roc_nix_inl_onf_ipsec_outb_sa(uintptr_t base, uint64_t idx)
-{
-	uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2;
-
-	return PLT_PTR_ADD(base, off);
-}
-
-static inline void *
-roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(void *sa)
-{
-	return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ);
-}
-
-static inline void *
-roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(void *sa)
-{
-	return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ);
-}
-
 /* Inline device SSO Work callback */
 typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
 					  uint32_t soft_exp_event);
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 7b6afa63a9..2123ebf701 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -17,10 +17,6 @@ INTERNAL {
 	cnxk_logtype_sso;
 	cnxk_logtype_tim;
 	cnxk_logtype_tm;
-	cnxk_onf_ipsec_inb_sa_fill;
-	cnxk_onf_ipsec_outb_sa_fill;
-	cnxk_onf_ipsec_inb_sa_valid;
-	cnxk_onf_ipsec_outb_sa_valid;
 	cnxk_ot_ipsec_inb_sa_fill;
 	cnxk_ot_ipsec_outb_sa_fill;
 	cnxk_ot_ipsec_inb_sa_valid;
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 8e862be933..a0e9300cff 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -75,7 +75,7 @@ parse_ipsec_out_max_sa(const char *key, const char *value, void *extra_args)
 	if (errno)
 		val = 0;
 
-	*(uint16_t *)extra_args = val;
+	*(uint32_t *)extra_args = val;
 
 	return 0;
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 02/13] net/cnxk: add IPsec SA defines for PMD API
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 03/13] net/cnxk: add transport mode to security capability on cn9k Nithin Dabilpuram
                   ` (12 subsequent siblings)
  13 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Define inbound and outbound IPsec data type for PMD API's
rte_pmd_cnxk_hw_sa_read() and rte_pmd_cnxk_hw_sa_write().

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn10k_ethdev_sec.c |  18 +-
 drivers/net/cnxk/rte_pmd_cnxk.h     | 397 +++++++++++++++++++++++++++-
 2 files changed, 411 insertions(+), 4 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 575d0fabd5..05ec49d981 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -14,6 +14,20 @@
 #include <cnxk_security.h>
 #include <roc_priv.h>
 
+PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_inb_sa, ctx.ar_winbits) ==
+		  offsetof(struct roc_ot_ipsec_inb_sa, ctx.ar_winbits));
+
+PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_outb_sa, ctx.mib_pkts) ==
+		  offsetof(struct roc_ot_ipsec_outb_sa, ctx.mib_pkts));
+
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_CKEY_LEN == ROC_CTX_MAX_CKEY_LEN);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN == RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN);
+
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MIN == ROC_AR_WIN_SIZE_MIN);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MAX == ROC_AR_WIN_SIZE_MAX);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_LOG_MIN_AR_WIN_SIZE_M1 == ROC_LOG_MIN_AR_WIN_SIZE_M1);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WINBITS_SZ == ROC_AR_WINBITS_SZ);
+
 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
 	{	/* AES GCM */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -1143,7 +1157,7 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
 
 int
 rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
-			void *data, uint32_t len)
+			union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
@@ -1166,7 +1180,7 @@ rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
 
 int
 rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
-			 void *data, uint32_t len)
+			 union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
diff --git a/drivers/net/cnxk/rte_pmd_cnxk.h b/drivers/net/cnxk/rte_pmd_cnxk.h
index 7827c33ac9..43f2a7ed9b 100644
--- a/drivers/net/cnxk/rte_pmd_cnxk.h
+++ b/drivers/net/cnxk/rte_pmd_cnxk.h
@@ -60,6 +60,399 @@ struct rte_pmd_cnxk_sec_action {
 	enum rte_pmd_cnxk_sec_action_alg alg;
 };
 
+#define RTE_PMD_CNXK_CTX_MAX_CKEY_LEN	   32
+#define RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN 128
+
+/** Anti reply window size supported */
+#define RTE_PMD_CNXK_AR_WIN_SIZE_MIN	    64
+#define RTE_PMD_CNXK_AR_WIN_SIZE_MAX	    4096
+#define RTE_PMD_CNXK_LOG_MIN_AR_WIN_SIZE_M1 5
+
+/** u64 array size to fit anti replay window bits */
+#define RTE_PMD_CNXK_AR_WINBITS_SZ (RTE_ALIGN_CEIL(RTE_PMD_CNXK_AR_WIN_SIZE_MAX, 64) / 64)
+
+/** Outer header info for Inbound or Outbound */
+union rte_pmd_cnxk_ipsec_outer_ip_hdr {
+	struct {
+		/** IPv4 destination */
+		uint32_t dst_addr;
+		/** IPv4 source */
+		uint32_t src_addr;
+	} ipv4;
+	struct {
+		/** IPv6 source */
+		uint8_t src_addr[16];
+		/** IPv6 destination */
+		uint8_t dst_addr[16];
+	} ipv6;
+};
+
+/** Inbound IPsec context update region */
+struct rte_pmd_cnxk_ipsec_inb_ctx_update_reg {
+	/** Highest sequence number received */
+	uint64_t ar_base;
+	/** Valid bit for 64-bit words of replay window */
+	uint64_t ar_valid_mask;
+	/** Hard life for SA */
+	uint64_t hard_life;
+	/** Soft life for SA */
+	uint64_t soft_life;
+	/** MIB octets */
+	uint64_t mib_octs;
+	/** MIB packets */
+	uint64_t mib_pkts;
+	/** AR window bits */
+	uint64_t ar_winbits[RTE_PMD_CNXK_AR_WINBITS_SZ];
+};
+
+/** Outbound IPsec IV data */
+union rte_pmd_cnxk_ipsec_outb_iv {
+	uint64_t u64[2];
+	/** IV debug - 16B*/
+	uint8_t iv_dbg[16];
+	struct {
+		/** IV debug - 8B */
+		uint8_t iv_dbg1[4];
+		/** Salt */
+		uint8_t salt[4];
+
+		uint32_t rsvd;
+		/** IV debug - 8B */
+		uint8_t iv_dbg2[4];
+	} s;
+};
+
+/** Outbound IPsec context update region */
+struct rte_pmd_cnxk_ipsec_outb_ctx_update_reg {
+	union {
+		struct {
+			uint64_t reserved_0_2 : 3;
+			uint64_t address : 57;
+			uint64_t mode : 4;
+		} s;
+		uint64_t u64;
+	} err_ctl;
+
+	uint64_t esn_val;
+	uint64_t hard_life;
+	uint64_t soft_life;
+	uint64_t mib_octs;
+	uint64_t mib_pkts;
+};
+
+/**
+ * Inbound IPsec SA
+ */
+struct rte_pmd_cnxk_ipsec_inb_sa {
+	/** Word0 */
+	union {
+		struct {
+			/** AR window size */
+			uint64_t ar_win : 3;
+			/** Hard life enable */
+			uint64_t hard_life_dec : 1;
+			/** Soft life enable */
+			uint64_t soft_life_dec : 1;
+
+			/** Count global octets */
+			uint64_t count_glb_octets : 1;
+			/** Count global pkts */
+			uint64_t count_glb_pkts : 1;
+			/** Count bytes */
+			uint64_t count_mib_bytes : 1;
+
+			/** Count pkts */
+			uint64_t count_mib_pkts : 1;
+			/** HW context offset */
+			uint64_t hw_ctx_off : 7;
+
+			/** Context ID */
+			uint64_t ctx_id : 16;
+
+			/** Original packet free absolute */
+			uint64_t orig_pkt_fabs : 1;
+			/** Original packet free */
+			uint64_t orig_pkt_free : 1;
+			/** PKIND for second pass */
+			uint64_t pkind : 6;
+
+			uint64_t rsvd0 : 1;
+			/** Ether type overwrite */
+			uint64_t et_ovrwr : 1;
+			/** Packet output type */
+			uint64_t pkt_output : 2;
+			/** Packet format type */
+			uint64_t pkt_format : 1;
+			/** Defrag option */
+			uint64_t defrag_opt : 2;
+			/** Reserved for X2P dest */
+			uint64_t x2p_dst : 1;
+
+			/** Context push size */
+			uint64_t ctx_push_size : 7;
+			uint64_t rsvd1 : 1;
+
+			/** Context header size */
+			uint64_t ctx_hdr_size : 2;
+			/** AOP enable */
+			uint64_t aop_valid : 1;
+			uint64_t rsvd2 : 1;
+			/** Context size */
+			uint64_t ctx_size : 4;
+		} s;
+		uint64_t u64;
+	} w0;
+
+	/** Word1 */
+	union {
+		struct {
+			/** Original packet aura */
+			uint64_t orig_pkt_aura : 20;
+			uint64_t rsvd3 : 4;
+			/** Original packet free offset */
+			uint64_t orig_pkt_foff : 8;
+			/** SA cookie */
+			uint64_t cookie : 32;
+		} s;
+		uint64_t u64;
+	} w1;
+
+	/** Word 2 */
+	union {
+		struct {
+			/** SA valid */
+			uint64_t valid : 1;
+			/** SA direction */
+			uint64_t dir : 1;
+			uint64_t rsvd11 : 1;
+			uint64_t rsvd4 : 1;
+			/** IPsec mode */
+			uint64_t ipsec_mode : 1;
+			/** IPsec protocol */
+			uint64_t ipsec_protocol : 1;
+			/** AES key length */
+			uint64_t aes_key_len : 2;
+
+			/** Encryption algo */
+			uint64_t enc_type : 3;
+			/** Soft life and hard life unit */
+			uint64_t life_unit : 1;
+			/** Authentication algo */
+			uint64_t auth_type : 4;
+
+			/** Encapsulation type */
+			uint64_t encap_type : 2;
+			/** Ether type override enable */
+			uint64_t et_ovrwr_ddr_en : 1;
+			/** ESN enable */
+			uint64_t esn_en : 1;
+			/** Transport mode L4 checksum incrementally update */
+			uint64_t tport_l4_incr_csum : 1;
+			/** Outer IP header verification */
+			uint64_t ip_hdr_verify : 2;
+			/** UDP enacapsulation ports verification */
+			uint64_t udp_ports_verify : 1;
+
+			/** Return 64B of L2/L3 header on error */
+			uint64_t l3hdr_on_err : 1;
+			uint64_t rsvd6 : 6;
+			uint64_t rsvd12 : 1;
+
+			/** SPI */
+			uint64_t spi : 32;
+		} s;
+		uint64_t u64;
+	} w2;
+
+	/** Word3 */
+	uint64_t rsvd7;
+
+	/** Word4 - Word7 */
+	uint8_t cipher_key[RTE_PMD_CNXK_CTX_MAX_CKEY_LEN];
+
+	/** Word8 - Word9 */
+	union {
+		struct {
+			uint32_t rsvd8;
+			/** IV salt */
+			uint8_t salt[4];
+		} s;
+		uint64_t u64;
+	} w8;
+	uint64_t rsvd9;
+
+	/** Word10 */
+	union {
+		struct {
+			uint64_t rsvd10 : 32;
+			/** UDP encapsulation source port */
+			uint64_t udp_src_port : 16;
+			/** UDP encapsulation destination port */
+			uint64_t udp_dst_port : 16;
+		} s;
+		uint64_t u64;
+	} w10;
+
+	/** Word11 - Word14 */
+	union rte_pmd_cnxk_ipsec_outer_ip_hdr outer_hdr;
+
+	/** Word15 - Word30 */
+	uint8_t hmac_opad_ipad[RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN];
+
+	/** Word31 - Word100 */
+	struct rte_pmd_cnxk_ipsec_inb_ctx_update_reg ctx;
+};
+
+/**
+ * Outbound IPsec SA
+ */
+struct rte_pmd_cnxk_ipsec_outb_sa {
+	/** Word0 */
+	union {
+		struct {
+			/** ESN enable */
+			uint64_t esn_en : 1;
+			/** IP ID generation type */
+			uint64_t ip_id : 1;
+			uint64_t rsvd0 : 1;
+			/** Hard life enable */
+			uint64_t hard_life_dec : 1;
+			/** Soft life enable */
+			uint64_t soft_life_dec : 1;
+
+			/** Count global octets */
+			uint64_t count_glb_octets : 1;
+			/** Count global pkts */
+			uint64_t count_glb_pkts : 1;
+			/** Count bytes */
+			uint64_t count_mib_bytes : 1;
+
+			/** Count pkts */
+			uint64_t count_mib_pkts : 1;
+			/** HW context offset */
+			uint64_t hw_ctx_off : 7;
+
+			/** Context ID */
+			uint64_t ctx_id : 16;
+			uint64_t rsvd1 : 16;
+
+			/** Context push size */
+			uint64_t ctx_push_size : 7;
+			uint64_t rsvd2 : 1;
+
+			/** Context header size */
+			uint64_t ctx_hdr_size : 2;
+			/** AOP enable */
+			uint64_t aop_valid : 1;
+			uint64_t rsvd3 : 1;
+			/** Context size */
+			uint64_t ctx_size : 4;
+		} s;
+		uint64_t u64;
+	} w0;
+
+	/** Word1 */
+	union {
+		struct {
+			uint64_t rsvd4 : 32;
+			/** SA cookie */
+			uint64_t cookie : 32;
+		} s;
+		uint64_t u64;
+	} w1;
+
+	/** Word 2 */
+	union {
+		struct {
+			/** SA valid */
+			uint64_t valid : 1;
+			/** SA direction */
+			uint64_t dir : 1;
+			uint64_t rsvd11 : 1;
+			uint64_t rsvd5 : 1;
+			/** IPsec mode */
+			uint64_t ipsec_mode : 1;
+			/** IPsec protocol */
+			uint64_t ipsec_protocol : 1;
+
+			/** AES key length */
+			uint64_t aes_key_len : 2;
+
+			/** Encryption algo */
+			uint64_t enc_type : 3;
+			/** Soft life and hard life unit */
+			uint64_t life_unit : 1;
+			/** Authentication algo */
+			uint64_t auth_type : 4;
+
+			/** Encapsulation type */
+			uint64_t encap_type : 2;
+			/** DF source */
+			uint64_t ipv4_df_src_or_ipv6_flw_lbl_src : 1;
+			/** DSCP source */
+			uint64_t dscp_src : 1;
+			/** IV source */
+			uint64_t iv_src : 2;
+			/** IPID value in outer header */
+			uint64_t ipid_gen : 1;
+			uint64_t rsvd6 : 1;
+
+			uint64_t rsvd7 : 7;
+			uint64_t rsvd12 : 1;
+
+			/** SPI */
+			uint64_t spi : 32;
+		} s;
+		uint64_t u64;
+	} w2;
+
+	/** Word3 */
+	uint64_t rsvd8;
+
+	/** Word4 - Word7 */
+	uint8_t cipher_key[RTE_PMD_CNXK_CTX_MAX_CKEY_LEN];
+
+	/** Word8 - Word9 */
+	union rte_pmd_cnxk_ipsec_outb_iv iv;
+
+	/** Word10 */
+	union {
+		struct {
+			uint64_t rsvd9 : 4;
+			/** Outer header IPv4 DF or IPv6 flow label */
+			uint64_t ipv4_df_or_ipv6_flw_lbl : 20;
+
+			/** DSCP for outer header */
+			uint64_t dscp : 6;
+			uint64_t rsvd10 : 2;
+
+			/** UDP encapsulation destination port */
+			uint64_t udp_dst_port : 16;
+
+			/** UDP encapsulation source port */
+			uint64_t udp_src_port : 16;
+		} s;
+		uint64_t u64;
+	} w10;
+
+	/** Word11 - Word14 */
+	union rte_pmd_cnxk_ipsec_outer_ip_hdr outer_hdr;
+
+	/** Word15 - Word30 */
+	uint8_t hmac_opad_ipad[RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN];
+
+	/** Word31 - Word36 */
+	struct rte_pmd_cnxk_ipsec_outb_ctx_update_reg ctx;
+};
+
+/** Inbound/Outbound IPsec SA */
+union rte_pmd_cnxk_ipsec_hw_sa {
+	/** Inbound SA */
+	struct rte_pmd_cnxk_ipsec_inb_sa inb;
+	/** Outbound SA */
+	struct rte_pmd_cnxk_ipsec_outb_sa outb;
+};
+
 /**
  * Read HW SA context from session.
  *
@@ -77,7 +470,7 @@ struct rte_pmd_cnxk_sec_action {
  */
 __rte_experimental
 int rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
-			    void *data, uint32_t len);
+			    union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len);
 /**
  * Write HW SA context to session.
  *
@@ -95,7 +488,7 @@ int rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
  */
 __rte_experimental
 int rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
-			     void *data, uint32_t len);
+			     union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len);
 
 /**
  * Get pointer to CPT result info for inline inbound processed pkt.
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 03/13] net/cnxk: add transport mode to security capability on cn9k
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 02/13] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 04/13] common/cnxk: dump selected SQ entries Nithin Dabilpuram
                   ` (11 subsequent siblings)
  13 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Add transport mode to security capabilities since it
is supported by UCODE.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev_sec.c | 33 ++++++++++++++++++++++++++++++
 1 file changed, 33 insertions(+)

diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 688b13ae1e..a0e0a73639 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -351,6 +351,39 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
 	},
+	{	/* IPsec Inline Protocol ESP Transport Ingress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX,
+			.options = {
+				.udp_encap = 1,
+				.esn = 1,
+			},
+		},
+		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
+		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+	},
+	{	/* IPsec Inline Protocol ESP Transport Egress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+			.replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX,
+			.options = {
+				.iv_gen_disable = 1,
+				.udp_encap = 1,
+				.esn = 1,
+			},
+		},
+		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
+		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+	},
 	{
 		.action = RTE_SECURITY_ACTION_TYPE_NONE
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 04/13] common/cnxk: dump selected SQ entries
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 02/13] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 03/13] net/cnxk: add transport mode to security capability on cn9k Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 05/13] net/cnxk: added Tx descriptor dump API Nithin Dabilpuram
                   ` (10 subsequent siblings)
  13 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

New API to dump detailed SQ entries.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       |   2 +
 drivers/common/cnxk/roc_nix_debug.c | 172 ++++++++++++++++++++++++++++
 drivers/common/cnxk/version.map     |   1 +
 3 files changed, 175 insertions(+)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 84e6fc3df5..9d57ca0be7 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -553,6 +553,8 @@ void __roc_api roc_nix_cqe_dump(FILE *file, const struct nix_cqe_hdr_s *cq);
 void __roc_api roc_nix_rq_dump(struct roc_nix_rq *rq, FILE *file);
 void __roc_api roc_nix_cq_dump(struct roc_nix_cq *cq, FILE *file);
 void __roc_api roc_nix_sq_dump(struct roc_nix_sq *sq, FILE *file);
+int __roc_api roc_nix_sq_desc_dump(struct roc_nix *roc_nix, uint16_t q, uint16_t offset,
+				   uint16_t num, FILE *file);
 void __roc_api roc_nix_tm_dump(struct roc_nix *roc_nix, FILE *file);
 void __roc_api roc_nix_dump(struct roc_nix *roc_nix, FILE *file);
 
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index 8962a76097..26546f9297 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -1362,3 +1362,175 @@ roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix, FILE *file)
 		cpt_lf_print(&lf_base[i]);
 	}
 }
+
+static void
+nix_tm_sqe_dump(uint64_t *sqe, int head_off, int end_off, int instr_sz, FILE *file, int full,
+		uint16_t *num)
+{
+	int i, j, inc = (8 * (0x2 >> instr_sz)), segs;
+	uint64_t *ptr;
+
+	if (!sqe || !(*num))
+		return;
+
+	ptr = sqe + (head_off * inc);
+	for (i = head_off; i < end_off; i++) {
+		if (!(*num))
+			return;
+		ptr = sqe + (i * inc);
+		nix_dump(file, "Entry : %d >>>>>\n", i);
+		nix_dump(file, "\t\tSEND_HDR[0]: 0x%016lx SEND_HDR[1]: 0x%016lx\n", *ptr,
+			 *(ptr + 1));
+		*num = *num - 1;
+		if (!full)
+			continue;
+		ptr += 2;
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_EXT) {
+			nix_dump(file, "\t\tSUBDC_EXT[0]: 0x%016lx DUBDC_EXT[1]: 0x%016lx\n", *ptr,
+				 *(ptr + 1));
+			ptr += 2;
+		}
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_AGE_AND_STATS) {
+			nix_dump(file,
+				 "\t\tSUBDC_AGE_STATS[0]: 0x%016lx SUBDC_AGE_STATS[1]: 0x%016lx\n",
+				 *ptr, *(ptr + 1));
+			ptr += 2;
+		}
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_JUMP) {
+			nix_dump(file, "\t\tSUBDC_JUMP: 0x%016lx\n", *ptr);
+			ptr += 1;
+			ptr = (uint64_t *)*ptr;
+		}
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_CRC) {
+			nix_dump(file, "\t\tSUBDC_CRC[0]: 0x%016lx SUBDC_CRC[1]: 0x%016lx\n", *ptr,
+				 *(ptr + 1));
+			ptr += 2;
+		}
+		/* We are not parsing immediate send descriptor */
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_IMM) {
+			nix_dump(file, "\t\tSUBDC_IMM: 0x%016lx ", *ptr);
+			continue;
+		}
+		while (1) {
+			if (((*ptr >> 60) & 0xF) == NIX_SUBDC_SG) {
+				nix_dump(file, "\t\tSUBDC_SG: 0x%016lx   ", *ptr);
+				segs = (*ptr >> 48) & 0x3;
+				ptr += 1;
+				for (j = 0; j < segs; j++) {
+					nix_dump(file, "\t\t\t  0x%016lx   ", *ptr);
+					ptr += 1;
+				}
+				if (segs == 2)
+					ptr += 1;
+			} else if (((*ptr >> 60) & 0xF) == NIX_SUBDC_SG2) {
+				nix_dump(file, "\t\tSUBDC_SG2: 0x%016lx   ", *ptr);
+				ptr += 1;
+				nix_dump(file, "\t\t\t  0x%016lx   ", *ptr);
+				ptr += 1;
+			} else
+				break;
+		}
+	}
+}
+
+int
+roc_nix_sq_desc_dump(struct roc_nix *roc_nix, uint16_t q, uint16_t offset, uint16_t num, FILE *file)
+{
+	int head_off, count, rc = 0, tail_off, full = 0;
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct roc_nix_sq *sq = nix->sqs[q];
+	void *sqb_buf, *dat, *tail_sqb;
+	struct ndc_sync_op *ndc_req;
+	struct dev *dev = &nix->dev;
+	uint16_t sqes_per_sqb;
+	struct mbox *mbox;
+
+	mbox = dev->mbox;
+	/* Sync NDC-NIX-TX for LF */
+	ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox));
+	if (ndc_req == NULL) {
+		mbox_put(mbox);
+		return -EFAULT;
+	}
+
+	ndc_req->nix_lf_tx_sync = 1;
+	if (mbox_process(mbox))
+		rc |= NIX_ERR_NDC_SYNC;
+	mbox_put(mbox);
+
+	if (rc)
+		plt_err("NDC_SYNC failed rc %d", rc);
+
+	rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_SQ, q, (void *)&dat);
+	if (rc)
+		return rc;
+	if (roc_model_is_cn9k()) {
+		volatile struct nix_sq_ctx_s *ctx = (struct nix_sq_ctx_s *)dat;
+
+		if (ctx->mnq_dis || ctx->lmt_dis)
+			full = 1;
+
+		count = ctx->sqb_count;
+		sqb_buf = (void *)ctx->head_sqb;
+		tail_sqb = (void *)ctx->tail_sqb;
+		head_off = ctx->head_offset;
+		tail_off = ctx->tail_offset;
+	} else {
+		volatile struct nix_cn10k_sq_ctx_s *ctx = (struct nix_cn10k_sq_ctx_s *)dat;
+
+		if (ctx->mnq_dis || ctx->lmt_dis)
+			full = 1;
+
+		count = ctx->sqb_count;
+		sqb_buf = (void *)ctx->head_sqb;
+		tail_sqb = (void *)ctx->tail_sqb;
+		head_off = ctx->head_offset;
+		tail_off = ctx->tail_offset;
+	}
+	sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
+	while (count) {
+		void *next_sqb;
+
+		if (sqb_buf == tail_sqb) {
+			if ((head_off + offset) >= tail_off) /* Nothing to be dump */
+				return 0;
+			head_off += tail_off;
+			break;
+		} else if ((head_off + offset) >= sqes_per_sqb) {
+			next_sqb = *(void **)((uint64_t *)sqb_buf +
+					      (uint32_t)((sqes_per_sqb - 1) *
+							 (0x2 >> sq->max_sqe_sz) * 8));
+			/* While traffic running HW may freed/reused this SQE */
+			if (!next_sqb)
+				return 0;
+			sqb_buf = next_sqb;
+			head_off = 0;
+			count--;
+		} else {
+			head_off += offset;
+			break;
+		}
+	}
+	while (count) {
+		void *next_sqb;
+
+		if (sqb_buf == tail_sqb)
+			nix_tm_sqe_dump(sqb_buf, head_off, tail_off, sq->max_sqe_sz, file, full,
+					&num);
+		else
+			nix_tm_sqe_dump(sqb_buf, head_off, (sqes_per_sqb - 1), sq->max_sqe_sz, file,
+					full, &num);
+		if (!num)
+			break;
+		next_sqb = *(void **)((uint64_t *)sqb_buf +
+				      (uint32_t)((sqes_per_sqb - 1) * (0x2 >> sq->max_sqe_sz) * 8));
+		/* While traffic running HW may freed/reused this SQE */
+		if (!next_sqb)
+			return 0;
+		sqb_buf = next_sqb;
+		head_off = 0;
+		count--;
+	}
+
+	return 0;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 2123ebf701..45b5148522 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -336,6 +336,7 @@ INTERNAL {
 	roc_nix_rx_drop_re_set;
 	roc_nix_rx_queue_intr_disable;
 	roc_nix_rx_queue_intr_enable;
+	roc_nix_sq_desc_dump;
 	roc_nix_sq_dump;
 	roc_nix_sq_ena_dis;
 	roc_nix_sq_fini;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 05/13] net/cnxk: added Tx descriptor dump API
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                   ` (2 preceding siblings ...)
  2024-02-08  8:59 ` [PATCH 04/13] common/cnxk: dump selected SQ entries Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 06/13] net/cnxk: fix issue with buff size compute Nithin Dabilpuram
                   ` (9 subsequent siblings)
  13 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

New API to dump selected descriptor entries from SQE list.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c     |  1 +
 drivers/net/cnxk/cnxk_ethdev.h     |  2 ++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 10 ++++++++++
 3 files changed, 13 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 2372a4e793..7640910782 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1821,6 +1821,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.cman_config_init = cnxk_nix_cman_config_init,
 	.cman_config_set = cnxk_nix_cman_config_set,
 	.cman_config_get = cnxk_nix_cman_config_get,
+	.eth_tx_descriptor_dump = cnxk_nix_tx_descriptor_dump,
 };
 
 void
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 4d3ebf123b..4232cae3d2 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -557,6 +557,8 @@ int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
 int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
 			     int mark_yellow, int mark_red,
 			     struct rte_tm_error *error);
+int cnxk_nix_tx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t offset,
+				uint16_t num, FILE *file);
 
 /* MTR */
 int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops);
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 5de2919047..e9ab8da781 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -1313,3 +1313,13 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
 exit:
 	return rc;
 }
+
+int
+cnxk_nix_tx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t offset,
+			    uint16_t num, FILE *file)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *nix = &dev->nix;
+
+	return roc_nix_sq_desc_dump(nix, qid, offset, num, file);
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 06/13] net/cnxk: fix issue with buff size compute
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                   ` (3 preceding siblings ...)
  2024-02-08  8:59 ` [PATCH 05/13] net/cnxk: added Tx descriptor dump API Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 07/13] common/cnxk: skip setting Tx MTU separately Nithin Dabilpuram
                   ` (8 subsequent siblings)
  13 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

In case where cnxk_nix_mtu_set() is called before
data->min_rx_buf_size is set, use buf size from first RQ's
mempool.

Fixes: 34b46320f446 ("net/cnxk: perform early MTU setup for event mode")

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev_ops.c | 25 ++++++++++++++++++++++---
 1 file changed, 22 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index e9ab8da781..e816884d47 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -544,8 +544,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq_sp;
+	uint32_t buffsz = 0;
 	int rc = -EINVAL;
-	uint32_t buffsz;
 
 	frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
 
@@ -561,8 +562,24 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 		goto exit;
 	}
 
-	buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
-	old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
+	if (!eth_dev->data->nb_rx_queues)
+		goto skip_buffsz_check;
+
+	/* Perform buff size check */
+	if (data->min_rx_buf_size) {
+		buffsz = data->min_rx_buf_size;
+	} else if (eth_dev->data->rx_queues && eth_dev->data->rx_queues[0]) {
+		rxq_sp = cnxk_eth_rxq_to_sp(data->rx_queues[0]);
+
+		if (rxq_sp->qconf.mp)
+			buffsz = rte_pktmbuf_data_room_size(rxq_sp->qconf.mp);
+	}
+
+	/* Skip validation if RQ's are not yet setup */
+	if (!buffsz)
+		goto skip_buffsz_check;
+
+	buffsz -= RTE_PKTMBUF_HEADROOM;
 
 	/* Refuse MTU that requires the support of scattered packets
 	 * when this feature has not been enabled before.
@@ -580,6 +597,8 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 		goto exit;
 	}
 
+skip_buffsz_check:
+	old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
 	/* if new MTU was smaller than old one, then flush all SQs before MTU change */
 	if (old_frame_size > frame_size) {
 		if (data->dev_started) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 07/13] common/cnxk: skip setting Tx MTU separately
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                   ` (4 preceding siblings ...)
  2024-02-08  8:59 ` [PATCH 06/13] net/cnxk: fix issue with buff size compute Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 08/13] net/cnxk: fix max MTU limit Nithin Dabilpuram
                   ` (7 subsequent siblings)
  13 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Skip setting Tx MTU separately as now the Tx credit configuration
is based on max MTU possible for that link.
Also init MTU with max value for that port.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix.c      |  2 +-
 drivers/common/cnxk/roc_nix.h      |  2 --
 drivers/net/cnxk/cnxk_ethdev_ops.c | 12 +-----------
 3 files changed, 2 insertions(+), 14 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index f64933a1d9..afbc3eb901 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -482,7 +482,7 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 	sdp_lbk_id_update(pci_dev, nix);
 	nix->pci_dev = pci_dev;
 	nix->reta_sz = reta_sz;
-	nix->mtu = ROC_NIX_DEFAULT_HW_FRS;
+	nix->mtu = roc_nix_max_pkt_len(roc_nix);
 	nix->dmac_flt_idx = -1;
 
 	/* Register error and ras interrupts */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 9d57ca0be7..3799b551f2 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -267,8 +267,6 @@ struct roc_nix_eeprom_info {
 #define ROC_NIX_RSS_KEY_LEN	     48 /* 352 Bits */
 #define ROC_NIX_RSS_MCAM_IDX_DEFAULT (-1)
 
-#define ROC_NIX_DEFAULT_HW_FRS 1514
-
 #define ROC_NIX_VWQE_MAX_SIZE_LOG2 11
 #define ROC_NIX_VWQE_MIN_SIZE_LOG2 2
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index e816884d47..4962f3bced 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -610,19 +610,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 
 	frame_size -= RTE_ETHER_CRC_LEN;
 
-	/* Update mtu on Tx */
-	rc = roc_nix_mac_mtu_set(nix, frame_size);
-	if (rc) {
-		plt_err("Failed to set MTU, rc=%d", rc);
-		goto exit;
-	}
-
-	/* Sync same frame size on Rx */
+	/* Set frame size on Rx */
 	rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
 	if (rc) {
-		/* Rollback to older mtu */
-		roc_nix_mac_mtu_set(nix,
-				    old_frame_size - RTE_ETHER_CRC_LEN);
 		plt_err("Failed to max Rx frame length, rc=%d", rc);
 		goto exit;
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 08/13] net/cnxk: fix max MTU limit
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                   ` (5 preceding siblings ...)
  2024-02-08  8:59 ` [PATCH 07/13] common/cnxk: skip setting Tx MTU separately Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 09/13] common/cnxk: update queue entries copy in RETA table Nithin Dabilpuram
                   ` (6 subsequent siblings)
  13 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Sunil Kumar Kori <skori@marvell.com>

Device can support maximum frame size up to 9212 bytes. While configuring
mtu, overhead is considered as ethernet header size, crc and
2 * (vlan tags) which translates to 26 bytes.

Exposed overhead to the user via rte_eth_dev_info() is 18 bytes which were
leading to set wrong Rx frame size.

Fixes: 8589ec212e80 ("net/cnxk: support MTU set")

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev_ops.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 4962f3bced..56049c5dd2 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -20,8 +20,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
 	devinfo->max_mac_addrs = dev->max_mac_entries;
 	devinfo->max_vfs = pci_dev->max_vfs;
-	devinfo->max_mtu = devinfo->max_rx_pktlen -
-				(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
+	devinfo->max_mtu = devinfo->max_rx_pktlen - CNXK_NIX_L2_OVERHEAD;
 	devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
 
 	devinfo->rx_offload_capa = dev->rx_offload_capa;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 09/13] common/cnxk: update queue entries copy in RETA table
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                   ` (6 preceding siblings ...)
  2024-02-08  8:59 ` [PATCH 08/13] net/cnxk: fix max MTU limit Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 10/13] net/cnxk: fix indirect mbuf handling in Tx path Nithin Dabilpuram
                   ` (5 subsequent siblings)
  13 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, Kommula Shiva Shankar

From: Kommula Shiva Shankar <kshankar@marvell.com>

This patch updates queue entries copy in reta table
based on data type

Signed-off-by: Kommula Shiva Shankar <kshankar@marvell.com>
---
 drivers/common/cnxk/roc_nix_rss.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_rss.c b/drivers/common/cnxk/roc_nix_rss.c
index 3599eb9bae..2b88e1360d 100644
--- a/drivers/common/cnxk/roc_nix_rss.c
+++ b/drivers/common/cnxk/roc_nix_rss.c
@@ -196,7 +196,7 @@ roc_nix_rss_reta_set(struct roc_nix *roc_nix, uint8_t group,
 	if (rc)
 		return rc;
 
-	memcpy(&nix->reta[group], reta, ROC_NIX_RSS_RETA_MAX);
+	memcpy(&nix->reta[group], reta, sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX);
 	return 0;
 }
 
@@ -209,7 +209,7 @@ roc_nix_rss_reta_get(struct roc_nix *roc_nix, uint8_t group,
 	if (group >= ROC_NIX_RSS_GRPS)
 		return NIX_ERR_PARAM;
 
-	memcpy(reta, &nix->reta[group], ROC_NIX_RSS_RETA_MAX);
+	memcpy(reta, &nix->reta[group], sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX);
 	return 0;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 10/13] net/cnxk: fix indirect mbuf handling in Tx path
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                   ` (7 preceding siblings ...)
  2024-02-08  8:59 ` [PATCH 09/13] common/cnxk: update queue entries copy in RETA table Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 11/13] net/cnxk: fix check cookies for multi-seg offload Nithin Dabilpuram
                   ` (4 subsequent siblings)
  13 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Indirect mbuf can be pointing to data from different pool. Use the right
aura in NIX send header in SG2 and SG case.

Fixes: 862e28128707 ("net/cnxk: add vector Tx for CN9K")
Fixes: f71b7dbbf04b ("net/cnxk: add vector Tx for CN10K")
Fixes: 7e95c11df4f1 ("net/cnxk: add multi-segment Tx for CN9K")
Fixes: 3626d5195d49 ("net/cnxk: add multi-segment Tx for CN10K")

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn10k_ethdev.c   |   6 +
 drivers/net/cnxk/cn10k_rxtx.h     |   1 +
 drivers/net/cnxk/cn10k_tx.h       | 265 ++++++++++++++++++---------
 drivers/net/cnxk/cn9k_ethdev.c    |   6 +
 drivers/net/cnxk/cn9k_ethdev.h    |   1 +
 drivers/net/cnxk/cn9k_tx.h        | 288 +++++++++++++++++++++---------
 drivers/net/cnxk/cnxk_ethdev_dp.h |  10 +-
 7 files changed, 402 insertions(+), 175 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index a2e943a3d0..a5696c092a 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -389,7 +389,13 @@ cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 		struct roc_nix_sq *sq = &dev->sqs[qidx];
 		do {
 			handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
+			/* Check if SQ is empty */
 			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+			if (head != tail)
+				continue;
+
+			/* Check if completion CQ is empty */
+			roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail);
 		} while (head != tail);
 	}
 
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index aeffc4ac92..9f33d0192e 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -177,6 +177,7 @@ handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe)
 			m = m_next;
 		}
 		rte_pktmbuf_free_seg(m);
+		txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL;
 
 		head++;
 		head &= qmask;
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 467f0ccc65..9721b7584a 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -786,8 +786,9 @@ cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr,
 
 static __rte_always_inline uint64_t
 cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq,
-		struct nix_send_hdr_s *send_hdr)
+		      struct nix_send_hdr_s *send_hdr, uint64_t *aura)
 {
+	struct rte_mbuf *prev = NULL;
 	uint32_t sqe_id;
 
 	if (RTE_MBUF_HAS_EXTBUF(m)) {
@@ -796,7 +797,10 @@ cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq,
 			return 1;
 		}
 		if (send_hdr->w0.pnc) {
-			txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m;
+			sqe_id = send_hdr->w1.sqe_id;
+			prev = txq->tx_compl.ptr[sqe_id];
+			m->next = prev;
+			txq->tx_compl.ptr[sqe_id] = m;
 		} else {
 			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
 			send_hdr->w0.pnc = 1;
@@ -806,10 +810,151 @@ cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq,
 		}
 		return 1;
 	} else {
-		return cnxk_nix_prefree_seg(m);
+		return cnxk_nix_prefree_seg(m, aura);
 	}
 }
 
+#if defined(RTE_ARCH_ARM64)
+/* Only called for first segments of single segmented mbufs */
+static __rte_always_inline void
+cn10k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct cn10k_eth_txq *txq,
+			  uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0,
+			  uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1)
+{
+	struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr;
+	uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask;
+	bool tx_compl_ena = txq->tx_compl.ena;
+	struct rte_mbuf *m0, *m1, *m2, *m3;
+	struct rte_mbuf *cookie;
+	uint64_t w0, w1, aura;
+	uint64_t sqe_id;
+
+	m0 = mbufs[0];
+	m1 = mbufs[1];
+	m2 = mbufs[2];
+	m3 = mbufs[3];
+
+	/* mbuf 0 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m0)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m0);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m0;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, &cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0);
+
+	/* mbuf1 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m1)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m1);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m1;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, &cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1);
+
+	/* mbuf 2 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m2)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m2);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m2;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, &cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0);
+
+	/* mbuf3 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m3)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m3);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m3;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, &cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1);
+#ifndef RTE_LIBRTE_MEMPOOL_DEBUG
+	RTE_SET_USED(cookie);
+#endif
+}
+#endif
+
 static __rte_always_inline void
 cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
 {
@@ -889,6 +1034,9 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq,
 		sg = (union nix_send_sg_s *)(cmd + 2);
 	}
 
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+		send_hdr->w0.pnc = 0;
+
 	if (flags & (NIX_TX_NEED_SEND_HDR_W1 | NIX_TX_OFFLOAD_SECURITY_F)) {
 		ol_flags = m->ol_flags;
 		w1.u = 0;
@@ -1049,19 +1197,30 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq,
 		send_hdr->w1.u = w1.u;
 
 	if (!(flags & NIX_TX_MULTI_SEG_F)) {
+		struct rte_mbuf *cookie;
+
 		sg->seg1_size = send_hdr->w0.total;
 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+			uint64_t aura;
+
 			/* DF bit = 1 if refcount of current mbuf or parent mbuf
 			 *		is greater than 1
 			 * DF bit = 0 otherwise
 			 */
-			send_hdr->w0.df = cn10k_nix_prefree_seg(m, txq, send_hdr);
+			aura = send_hdr->w0.aura;
+			send_hdr->w0.df = cn10k_nix_prefree_seg(m, txq, send_hdr, &aura);
+			send_hdr->w0.aura = aura;
 		}
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!send_hdr->w0.df)
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+#else
+		RTE_SET_USED(cookie);
+#endif
 	} else {
 		sg->seg1_size = m->data_len;
 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
@@ -1135,6 +1294,7 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 	struct nix_send_hdr_s *send_hdr;
 	union nix_send_sg_s *sg, l_sg;
 	union nix_send_sg2_s l_sg2;
+	struct rte_mbuf *cookie;
 	struct rte_mbuf *m_next;
 	uint8_t off, is_sg2;
 	uint64_t len, dlen;
@@ -1163,21 +1323,26 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 	len -= dlen;
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
+	m->next = NULL;
 	slist = &cmd[3 + off + 1];
 
+	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 	/* Set invert df if buffer is not to be freed by H/W */
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-		prefree = cn10k_nix_prefree_seg(m, txq, send_hdr);
+		aura = send_hdr->w0.aura;
+		prefree = cn10k_nix_prefree_seg(m, txq, send_hdr, &aura);
+		send_hdr->w0.aura = aura;
 		l_sg.i1 = prefree;
 	}
 
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	/* Mark mempool object as "put" since it is freed by NIX */
 	if (!prefree)
-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+		RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 	rte_io_wmb();
+#else
+	RTE_SET_USED(cookie);
 #endif
-	m->next = NULL;
 
 	/* Quickly handle single segmented packets. With this if-condition
 	 * compiler will completely optimize out the below do-while loop
@@ -1207,9 +1372,12 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 		aura = aura0;
 		prefree = 0;
 
+		m->next = NULL;
+
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
 			aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
-			prefree = cn10k_nix_prefree_seg(m, txq, send_hdr);
+			prefree = cn10k_nix_prefree_seg(m, txq, send_hdr, &aura);
 			is_sg2 = aura != aura0 && !prefree;
 		}
 
@@ -1259,13 +1427,14 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 			l_sg.subdc = NIX_SUBDC_SG;
 			slist++;
 		}
-		m->next = NULL;
 
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		/* Mark mempool object as "put" since it is freed by NIX
 		 */
 		if (!prefree)
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+#else
+		RTE_SET_USED(cookie);
 #endif
 		m = m_next;
 	} while (nb_segs);
@@ -1997,13 +2166,10 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 	uint64x2_t sgdesc01_w0, sgdesc23_w0;
 	uint64x2_t sgdesc01_w1, sgdesc23_w1;
 	struct cn10k_eth_txq *txq = tx_queue;
-	uint64x2_t xmask01_w0, xmask23_w0;
-	uint64x2_t xmask01_w1, xmask23_w1;
 	rte_iova_t io_addr = txq->io_addr;
 	uint8_t lnum, shift = 0, loff = 0;
 	uintptr_t laddr = txq->lmt_base;
 	uint8_t c_lnum, c_shft, c_loff;
-	struct nix_send_hdr_s send_hdr;
 	uint64x2_t ltypes01, ltypes23;
 	uint64x2_t xtmp128, ytmp128;
 	uint64x2_t xmask01, xmask23;
@@ -2153,7 +2319,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		}
 		/* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
 		senddesc01_w0 =
-			vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF));
+			vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF));
 		sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF));
 
 		senddesc23_w0 = senddesc01_w0;
@@ -2859,73 +3025,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		    !(flags & NIX_TX_MULTI_SEG_F) &&
 		    !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
 			/* Set don't free bit if reference count > 1 */
-			xmask01_w0 = vdupq_n_u64(0);
-			xmask01_w1 = vdupq_n_u64(0);
-			xmask23_w0 = xmask01_w0;
-			xmask23_w1 = xmask01_w1;
-
-			/* Move mbufs to iova */
-			mbuf0 = (uint64_t *)tx_pkts[0];
-			mbuf1 = (uint64_t *)tx_pkts[1];
-			mbuf2 = (uint64_t *)tx_pkts[2];
-			mbuf3 = (uint64_t *)tx_pkts[3];
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf0)->pool,
-					(void **)&mbuf0, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf1)->pool,
-					(void **)&mbuf1, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf2)->pool,
-					(void **)&mbuf2, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf3)->pool,
-					(void **)&mbuf3, 1, 0);
-			}
-
-			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0);
-			senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0);
-			senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1);
-			senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1);
+			cn10k_nix_prefree_seg_vec(tx_pkts, txq, &senddesc01_w0, &senddesc23_w0,
+						  &senddesc01_w1, &senddesc23_w1);
 		} else if (!(flags & NIX_TX_MULTI_SEG_F) &&
 			   !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
 			/* Move mbufs to iova */
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 67f21a9c7f..ea92b1dcb6 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -347,7 +347,13 @@ cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 		struct roc_nix_sq *sq = &dev->sqs[qidx];
 		do {
 			handle_tx_completion_pkts(txq, 0);
+			/* Check if SQ is empty */
 			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+			if (head != tail)
+				continue;
+
+			/* Check if completion CQ is empty */
+			roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail);
 		} while (head != tail);
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index 9e0a3c5bb2..6ae0db62ca 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -169,6 +169,7 @@ handle_tx_completion_pkts(struct cn9k_eth_txq *txq, uint8_t mt_safe)
 			m = m_next;
 		}
 		rte_pktmbuf_free_seg(m);
+		txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL;
 
 		head++;
 		head &= qmask;
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index fba4bb4215..f28cecebd0 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -83,9 +83,10 @@ cn9k_nix_tx_skeleton(struct cn9k_eth_txq *txq, uint64_t *cmd,
 }
 
 static __rte_always_inline uint64_t
-cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq,
-		struct nix_send_hdr_s *send_hdr)
+cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq, struct nix_send_hdr_s *send_hdr,
+		     uint64_t *aura)
 {
+	struct rte_mbuf *prev;
 	uint32_t sqe_id;
 
 	if (RTE_MBUF_HAS_EXTBUF(m)) {
@@ -94,7 +95,10 @@ cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq,
 			return 1;
 		}
 		if (send_hdr->w0.pnc) {
-			txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m;
+			sqe_id = send_hdr->w1.sqe_id;
+			prev = txq->tx_compl.ptr[sqe_id];
+			m->next = prev;
+			txq->tx_compl.ptr[sqe_id] = m;
 		} else {
 			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
 			send_hdr->w0.pnc = 1;
@@ -104,10 +108,151 @@ cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq,
 		}
 		return 1;
 	} else {
-		return cnxk_nix_prefree_seg(m);
+		return cnxk_nix_prefree_seg(m, aura);
 	}
 }
 
+#if defined(RTE_ARCH_ARM64)
+/* Only called for first segments of single segmented mbufs */
+static __rte_always_inline void
+cn9k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct cn9k_eth_txq *txq,
+			 uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0,
+			 uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1)
+{
+	struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr;
+	uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask;
+	bool tx_compl_ena = txq->tx_compl.ena;
+	struct rte_mbuf *m0, *m1, *m2, *m3;
+	struct rte_mbuf *cookie;
+	uint64_t w0, w1, aura;
+	uint64_t sqe_id;
+
+	m0 = mbufs[0];
+	m1 = mbufs[1];
+	m2 = mbufs[2];
+	m3 = mbufs[3];
+
+	/* mbuf 0 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m0)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m0);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m0;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, &cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0);
+
+	/* mbuf1 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m1)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m1);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m1;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, &cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1);
+
+	/* mbuf 2 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m2)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m2);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m2;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, &cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0);
+
+	/* mbuf3 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m3)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m3);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m3;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, &cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1);
+#ifndef RTE_LIBRTE_MEMPOOL_DEBUG
+	RTE_SET_USED(cookie);
+#endif
+}
+#endif
+
 static __rte_always_inline void
 cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
 {
@@ -191,6 +336,8 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq,
 		ol_flags = m->ol_flags;
 		w1.u = 0;
 	}
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+		send_hdr->w0.pnc = 0;
 
 	if (!(flags & NIX_TX_MULTI_SEG_F))
 		send_hdr->w0.total = m->data_len;
@@ -345,23 +492,33 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq,
 		send_hdr->w1.u = w1.u;
 
 	if (!(flags & NIX_TX_MULTI_SEG_F)) {
+		struct rte_mbuf *cookie;
+
 		sg->seg1_size = m->data_len;
 		*(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+			uint64_t aura;
 			/* DF bit = 1 if refcount of current mbuf or parent mbuf
 			 *		is greater than 1
 			 * DF bit = 0 otherwise
 			 */
-			send_hdr->w0.df = cn9k_nix_prefree_seg(m, txq, send_hdr);
+			aura = send_hdr->w0.aura;
+			send_hdr->w0.df = cn9k_nix_prefree_seg(m, txq, send_hdr, &aura);
+			send_hdr->w0.aura = aura;
 			/* Ensuring mbuf fields which got updated in
 			 * cnxk_nix_prefree_seg are written before LMTST.
 			 */
 			rte_io_wmb();
 		}
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!send_hdr->w0.df)
 			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+#else
+		RTE_SET_USED(cookie);
+#endif
 	} else {
 		sg->seg1_size = m->data_len;
 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
@@ -443,6 +600,8 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 		      struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 {
 	struct nix_send_hdr_s *send_hdr;
+	uint64_t prefree = 0, aura;
+	struct rte_mbuf *cookie;
 	union nix_send_sg_s *sg;
 	struct rte_mbuf *m_next;
 	uint64_t *slist, sg_u;
@@ -467,9 +626,13 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 	m_next = m->next;
 	slist = &cmd[3 + off + 1];
 
+	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 	/* Set invert df if buffer is not to be freed by H/W */
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-		sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55);
+		aura = send_hdr->w0.aura;
+		prefree = (cn9k_nix_prefree_seg(m, txq, send_hdr, &aura) << 55);
+		send_hdr->w0.aura = aura;
+		sg_u |= prefree;
 		rte_io_wmb();
 	}
 
@@ -478,6 +641,8 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 	if (!(sg_u & (1ULL << 55)))
 		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 	rte_io_wmb();
+#else
+	RTE_SET_USED(cookie);
 #endif
 	m = m_next;
 	if (!m)
@@ -490,7 +655,7 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 		*slist = rte_mbuf_data_iova(m);
 		/* Set invert df if buffer is not to be freed by H/W */
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55));
+			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr, NULL) << (i + 55));
 			/* Commit changes to mbuf */
 			rte_io_wmb();
 		}
@@ -709,8 +874,8 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 			       struct nix_send_hdr_s *send_hdr,
 			       union nix_send_sg_s *sg, const uint32_t flags)
 {
-	struct rte_mbuf *m_next;
-	uint64_t *slist, sg_u;
+	struct rte_mbuf *m_next, *cookie;
+	uint64_t *slist, sg_u, aura;
 	uint16_t nb_segs;
 	uint64_t segdw;
 	int i = 1;
@@ -727,13 +892,19 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 	m_next = m->next;
 
 	/* Set invert df if buffer is not to be freed by H/W */
-	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
-		sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55);
-		/* Mark mempool object as "put" since it is freed by NIX */
+	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+		aura = send_hdr->w0.aura;
+		sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr, &aura) << 55);
+		send_hdr->w0.aura = aura;
+	}
+	/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	if (!(sg_u & (1ULL << 55)))
-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+		RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 	rte_io_wmb();
+#else
+	RTE_SET_USED(cookie);
 #endif
 
 	m = m_next;
@@ -742,14 +913,15 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 		m_next = m->next;
 		sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
 		*slist = rte_mbuf_data_iova(m);
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 		/* Set invert df if buffer is not to be freed by H/W */
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
-			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55));
+			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr, &aura) << (i + 55));
 			/* Mark mempool object as "put" since it is freed by NIX
 			 */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		if (!(sg_u & (1ULL << (i + 55))))
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 		rte_io_wmb();
 #endif
 		slist++;
@@ -789,15 +961,20 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq,
 			  uint64x2_t *cmd1, const uint32_t flags)
 {
 	struct nix_send_hdr_s send_hdr;
+	struct rte_mbuf *cookie;
 	union nix_send_sg_s sg;
+	uint64_t aura;
 	uint8_t ret;
 
 	if (m->nb_segs == 1) {
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
 			send_hdr.w0.u = vgetq_lane_u64(cmd0[0], 0);
 			send_hdr.w1.u = vgetq_lane_u64(cmd0[0], 1);
 			sg.u = vgetq_lane_u64(cmd1[0], 0);
-			sg.u |= (cn9k_nix_prefree_seg(m, txq, &send_hdr) << 55);
+			aura = send_hdr.w0.aura;
+			sg.u |= (cn9k_nix_prefree_seg(m, txq, &send_hdr, &aura) << 55);
+			send_hdr.w0.aura = aura;
 			cmd1[0] = vsetq_lane_u64(sg.u, cmd1[0], 0);
 			cmd0[0] = vsetq_lane_u64(send_hdr.w0.u, cmd0[0], 0);
 			cmd0[0] = vsetq_lane_u64(send_hdr.w1.u, cmd0[0], 1);
@@ -806,8 +983,10 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq,
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		sg.u = vgetq_lane_u64(cmd1[0], 0);
 		if (!(sg.u & (1ULL << 55)))
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 		rte_io_wmb();
+#else
+		RTE_SET_USED(cookie);
 #endif
 		return 2 + !!(flags & NIX_TX_NEED_EXT_HDR) +
 		       !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
@@ -962,10 +1141,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint64x2_t sgdesc01_w1, sgdesc23_w1;
 	struct cn9k_eth_txq *txq = tx_queue;
 	uint64_t *lmt_addr = txq->lmt_addr;
-	uint64x2_t xmask01_w0, xmask23_w0;
-	uint64x2_t xmask01_w1, xmask23_w1;
 	rte_iova_t io_addr = txq->io_addr;
-	struct nix_send_hdr_s send_hdr;
 	uint64x2_t ltypes01, ltypes23;
 	uint64x2_t xtmp128, ytmp128;
 	uint64x2_t xmask01, xmask23;
@@ -1028,7 +1204,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 	for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
 		/* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
 		senddesc01_w0 =
-			vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF));
+			vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF));
 		sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF));
 
 		senddesc23_w0 = senddesc01_w0;
@@ -1732,74 +1908,8 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 		if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
 		    !(flags & NIX_TX_MULTI_SEG_F)) {
 			/* Set don't free bit if reference count > 1 */
-			xmask01_w0 = vdupq_n_u64(0);
-			xmask01_w1 = vdupq_n_u64(0);
-			xmask23_w0 = xmask01_w0;
-			xmask23_w1 = xmask01_w1;
-
-			/* Move mbufs to iova */
-			mbuf0 = (uint64_t *)tx_pkts[0];
-			mbuf1 = (uint64_t *)tx_pkts[1];
-			mbuf2 = (uint64_t *)tx_pkts[2];
-			mbuf3 = (uint64_t *)tx_pkts[3];
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf0)->pool,
-					(void **)&mbuf0, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf1)->pool,
-					(void **)&mbuf1, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf2)->pool,
-					(void **)&mbuf2, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf3)->pool,
-					(void **)&mbuf3, 1, 0);
-			}
-
-			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0);
-			senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0);
-			senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1);
-			senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1);
-
+			cn9k_nix_prefree_seg_vec(tx_pkts, txq, &senddesc01_w0, &senddesc23_w0,
+						 &senddesc01_w1, &senddesc23_w1);
 			/* Ensuring mbuf fields which got updated in
 			 * cnxk_nix_prefree_seg are written before LMTST.
 			 */
diff --git a/drivers/net/cnxk/cnxk_ethdev_dp.h b/drivers/net/cnxk/cnxk_ethdev_dp.h
index c1f99a2616..67f40b8e25 100644
--- a/drivers/net/cnxk/cnxk_ethdev_dp.h
+++ b/drivers/net/cnxk/cnxk_ethdev_dp.h
@@ -84,7 +84,7 @@ struct cnxk_timesync_info {
 
 /* Inlines */
 static __rte_always_inline uint64_t
-cnxk_pktmbuf_detach(struct rte_mbuf *m)
+cnxk_pktmbuf_detach(struct rte_mbuf *m, uint64_t *aura)
 {
 	struct rte_mempool *mp = m->pool;
 	uint32_t mbuf_size, buf_len;
@@ -94,6 +94,8 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m)
 
 	/* Update refcount of direct mbuf */
 	md = rte_mbuf_from_indirect(m);
+	if (aura)
+		*aura = roc_npa_aura_handle_to_aura(md->pool->pool_id);
 	refcount = rte_mbuf_refcnt_update(md, -1);
 
 	priv_size = rte_pktmbuf_priv_size(mp);
@@ -126,18 +128,18 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m)
 }
 
 static __rte_always_inline uint64_t
-cnxk_nix_prefree_seg(struct rte_mbuf *m)
+cnxk_nix_prefree_seg(struct rte_mbuf *m, uint64_t *aura)
 {
 	if (likely(rte_mbuf_refcnt_read(m) == 1)) {
 		if (!RTE_MBUF_DIRECT(m))
-			return cnxk_pktmbuf_detach(m);
+			return cnxk_pktmbuf_detach(m, aura);
 
 		m->next = NULL;
 		m->nb_segs = 1;
 		return 0;
 	} else if (rte_mbuf_refcnt_update(m, -1) == 0) {
 		if (!RTE_MBUF_DIRECT(m))
-			return cnxk_pktmbuf_detach(m);
+			return cnxk_pktmbuf_detach(m, aura);
 
 		rte_mbuf_refcnt_set(m, 1);
 		m->next = NULL;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 11/13] net/cnxk: fix check cookies for multi-seg offload
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                   ` (8 preceding siblings ...)
  2024-02-08  8:59 ` [PATCH 10/13] net/cnxk: fix indirect mbuf handling in Tx path Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 12/13] common/cnxk: add IO attribute to mbox structs Nithin Dabilpuram
                   ` (3 subsequent siblings)
  13 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, Rahul Bhansali

From: Rahul Bhansali <rbhansali@marvell.com>

Fix missing check cookies with multi-seg offload case

Fixes: 3626d5195d49 ("net/cnxk: add multi-segment Tx for CN10K")

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 21 ++++++++++++++++++++-
 1 file changed, 20 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 9721b7584a..a995696e66 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1863,6 +1863,9 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 	len -= dlen;
 	sg_u = sg_u | ((uint64_t)dlen);
 
+	/* Mark mempool object as "put" since it is freed by NIX */
+	RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
 	m->next = NULL;
@@ -1888,6 +1891,9 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 			slist++;
 		}
 		m->next = NULL;
+		/* Mark mempool object as "put" since it is freed by NIX */
+		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+
 		m = m_next;
 	} while (nb_segs);
 
@@ -1911,8 +1917,11 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0,
 	union nix_send_hdr_w0_u sh;
 	union nix_send_sg_s sg;
 
-	if (m->nb_segs == 1)
+	if (m->nb_segs == 1) {
+		/* Mark mempool object as "put" since it is freed by NIX */
+		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 		return;
+	}
 
 	sh.u = vgetq_lane_u64(cmd0[0], 0);
 	sg.u = vgetq_lane_u64(cmd1[0], 0);
@@ -1972,6 +1981,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq,
 			*data128 |= ((__uint128_t)7) << *shift;
 			*shift += 3;
 
+			/* Mark mempool object as "put" since it is freed by NIX */
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[0]->pool, (void **)&mbufs[0], 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[1]->pool, (void **)&mbufs[1], 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[2]->pool, (void **)&mbufs[2], 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[3]->pool, (void **)&mbufs[3], 1, 0);
 			return 1;
 		}
 	}
@@ -1990,6 +2004,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq,
 				vst1q_u64(lmt_addr + 10, cmd2[j + 1]);
 				vst1q_u64(lmt_addr + 12, cmd1[j + 1]);
 				vst1q_u64(lmt_addr + 14, cmd3[j + 1]);
+
+				/* Mark mempool object as "put" since it is freed by NIX */
+				RTE_MEMPOOL_CHECK_COOKIES(mbufs[j]->pool, (void **)&mbufs[j], 1, 0);
+				RTE_MEMPOOL_CHECK_COOKIES(mbufs[j + 1]->pool,
+							  (void **)&mbufs[j + 1], 1, 0);
 			} else if (flags & NIX_TX_NEED_EXT_HDR) {
 				/* EXT header take 3 each, space for 2 segs.*/
 				cn10k_nix_prepare_mseg_vec(mbufs[j],
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 12/13] common/cnxk: add IO attribute to mbox structs
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                   ` (9 preceding siblings ...)
  2024-02-08  8:59 ` [PATCH 11/13] net/cnxk: fix check cookies for multi-seg offload Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-08  8:59 ` [PATCH 13/13] common/cnxk: use SQ enable and disable API Nithin Dabilpuram
                   ` (2 subsequent siblings)
  13 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

IO attribute is needed to mbox structs to avoid unaligned or pair
access causing by compiler optimization. Add them to structs
where it is missing.

Fixes: 503b82de2cbf ("common/cnxk: add mbox request and response definitions")
Fixes: ddf955d3917e ("common/cnxk: support CPT second pass")

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_mbox.h | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 3257a370bc..f1e9ad2893 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -1425,12 +1425,12 @@ struct nix_cn10k_aq_enq_req {
 struct nix_cn10k_aq_enq_rsp {
 	struct mbox_msghdr hdr;
 	union {
-		struct nix_cn10k_rq_ctx_s rq;
-		struct nix_cn10k_sq_ctx_s sq;
-		struct nix_cq_ctx_s cq;
-		struct nix_rsse_s rss;
-		struct nix_rx_mce_s mce;
-		struct nix_band_prof_s prof;
+		__io struct nix_cn10k_rq_ctx_s rq;
+		__io struct nix_cn10k_sq_ctx_s sq;
+		__io struct nix_cq_ctx_s cq;
+		__io struct nix_rsse_s rss;
+		__io struct nix_rx_mce_s mce;
+		__io struct nix_band_prof_s prof;
 	};
 };
 
@@ -1666,11 +1666,11 @@ struct nix_rq_cpt_field_mask_cfg_req {
 #define RQ_CTX_MASK_MAX 6
 	union {
 		uint64_t __io rq_ctx_word_set[RQ_CTX_MASK_MAX];
-		struct nix_cn10k_rq_ctx_s rq_set;
+		__io struct nix_cn10k_rq_ctx_s rq_set;
 	};
 	union {
 		uint64_t __io rq_ctx_word_mask[RQ_CTX_MASK_MAX];
-		struct nix_cn10k_rq_ctx_s rq_mask;
+		__io struct nix_cn10k_rq_ctx_s rq_mask;
 	};
 	struct nix_lf_rx_ipec_cfg1_req {
 		uint32_t __io spb_cpt_aura;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH 13/13] common/cnxk: use SQ enable and disable API
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                   ` (10 preceding siblings ...)
  2024-02-08  8:59 ` [PATCH 12/13] common/cnxk: add IO attribute to mbox structs Nithin Dabilpuram
@ 2024-02-08  8:59 ` Nithin Dabilpuram
  2024-02-09  9:28   ` Jerin Jacob
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
  13 siblings, 1 reply; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-08  8:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Use SQ enable and disable API in TM setup to affect
the state change.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_tm_ops.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 900b182c76..9f3870a311 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -495,7 +495,7 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 		if (!sq)
 			continue;
 
-		rc = roc_nix_tm_sq_aura_fc(sq, false);
+		rc = roc_nix_sq_ena_dis(sq, false);
 		if (rc) {
 			plt_err("Failed to disable sqb aura fc, rc=%d", rc);
 			goto cleanup;
@@ -607,7 +607,7 @@ roc_nix_tm_hierarchy_xmit_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree t
 		sq_id = node->id;
 		sq = nix->sqs[sq_id];
 
-		rc = roc_nix_tm_sq_aura_fc(sq, true);
+		rc = roc_nix_sq_ena_dis(sq, true);
 		if (rc) {
 			plt_err("TM sw xon failed on SQ %u, rc=%d", node->id,
 				rc);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH 13/13] common/cnxk: use SQ enable and disable API
  2024-02-08  8:59 ` [PATCH 13/13] common/cnxk: use SQ enable and disable API Nithin Dabilpuram
@ 2024-02-09  9:28   ` Jerin Jacob
  0 siblings, 0 replies; 43+ messages in thread
From: Jerin Jacob @ 2024-02-09  9:28 UTC (permalink / raw)
  To: Nithin Dabilpuram; +Cc: Kiran Kumar K, Sunil Kumar Kori, Satha Rao, dev

On Thu, Feb 8, 2024 at 2:32 PM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> Use SQ enable and disable API in TM setup to affect
> the state change.
>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>

Please fix

Wrong headline prefix:
        common/cnxk: remove cn9k Inline IPsec FP opcode defines
        common/cnxk: skip setting Tx MTU separately
Is it candidate for Cc: stable@dpdk.org backport?
        common/cnxk: remove cn9k Inline IPsec FP opcode defines
        net/cnxk: fix issue with buff size compute
        net/cnxk: fix max MTU limit
        net/cnxk: fix indirect mbuf handling in Tx path
        net/cnxk: fix check cookies for multi-seg offload
        common/cnxk: add IO attribute to mbox structs

Invalid patch(es) found - checked 14 patches
check-git-log failed

### [PATCH] net/cnxk: fix indirect mbuf handling in Tx path

Warning in drivers/net/cnxk/cn10k_tx.h:
Using __atomic_xxx/__ATOMIC_XXX built-ins, prefer
rte_atomic_xxx/rte_memory_order_xxx

^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                   ` (11 preceding siblings ...)
  2024-02-08  8:59 ` [PATCH 13/13] common/cnxk: use SQ enable and disable API Nithin Dabilpuram
@ 2024-02-22 11:02 ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 02/14] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
                     ` (12 more replies)
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
  13 siblings, 13 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Since now Inline IPsec in cn9k is using same opcode as LA,
remove the definitions of fast path opcode.

Also fix devarg handling for ipsec_out_max_sa to allow 32-bit.

Fixes: fe5846bcc076 ("net/cnxk: add devargs for min-max SPI")
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c    | 230 -------------------------
 drivers/common/cnxk/cnxk_security.h    |  12 --
 drivers/common/cnxk/roc_ie_on.h        |  60 -------
 drivers/common/cnxk/roc_nix_inl.h      |  50 +-----
 drivers/common/cnxk/version.map        |   4 -
 drivers/net/cnxk/cnxk_ethdev_devargs.c |   2 +-
 6 files changed, 3 insertions(+), 355 deletions(-)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 64c901a57a..bab015e3b3 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -574,236 +574,6 @@ cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa)
 	return !!sa->w2.s.valid;
 }
 
-static inline int
-ipsec_xfrm_verify(struct rte_security_ipsec_xform *ipsec_xfrm,
-		  struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	if (crypto_xfrm->next == NULL)
-		return -EINVAL;
-
-	if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
-		    crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
-			return -EINVAL;
-	} else {
-		if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
-		    crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_AUTH)
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int
-onf_ipsec_sa_common_param_fill(struct roc_ie_onf_sa_ctl *ctl, uint8_t *salt,
-			       uint8_t *cipher_key, uint8_t *hmac_opad_ipad,
-			       struct rte_security_ipsec_xform *ipsec_xfrm,
-			       struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
-	int rc, length, auth_key_len;
-	const uint8_t *key = NULL;
-	uint8_t ccm_flag = 0;
-
-	/* Set direction */
-	switch (ipsec_xfrm->direction) {
-	case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
-		ctl->direction = ROC_IE_SA_DIR_INBOUND;
-		auth_xfrm = crypto_xfrm;
-		cipher_xfrm = crypto_xfrm->next;
-		break;
-	case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
-		ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
-		cipher_xfrm = crypto_xfrm;
-		auth_xfrm = crypto_xfrm->next;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* Set protocol - ESP vs AH */
-	switch (ipsec_xfrm->proto) {
-	case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
-		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
-		break;
-	case RTE_SECURITY_IPSEC_SA_PROTO_AH:
-		return -ENOTSUP;
-	default:
-		return -EINVAL;
-	}
-
-	/* Set mode - transport vs tunnel */
-	switch (ipsec_xfrm->mode) {
-	case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
-		ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
-		break;
-	case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
-		ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* Set encryption algorithm */
-	if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-		length = crypto_xfrm->aead.key.length;
-
-		switch (crypto_xfrm->aead.algo) {
-		case RTE_CRYPTO_AEAD_AES_GCM:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
-			memcpy(salt, &ipsec_xfrm->salt, 4);
-			key = crypto_xfrm->aead.key.data;
-			break;
-		case RTE_CRYPTO_AEAD_AES_CCM:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CCM;
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
-			ccm_flag = 0x07 & ~ROC_CPT_AES_CCM_CTR_LEN;
-			*salt = ccm_flag;
-			memcpy(PLT_PTR_ADD(salt, 1), &ipsec_xfrm->salt, 3);
-			key = crypto_xfrm->aead.key.data;
-			break;
-		default:
-			return -ENOTSUP;
-		}
-
-	} else {
-		rc = ipsec_xfrm_verify(ipsec_xfrm, crypto_xfrm);
-		if (rc)
-			return rc;
-
-		switch (cipher_xfrm->cipher.algo) {
-		case RTE_CRYPTO_CIPHER_AES_CBC:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
-			break;
-		case RTE_CRYPTO_CIPHER_AES_CTR:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
-			break;
-		default:
-			return -ENOTSUP;
-		}
-
-		switch (auth_xfrm->auth.algo) {
-		case RTE_CRYPTO_AUTH_SHA1_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
-			break;
-		default:
-			return -ENOTSUP;
-		}
-		auth_key_len = auth_xfrm->auth.key.length;
-		if (auth_key_len < 20 || auth_key_len > 64)
-			return -ENOTSUP;
-
-		key = cipher_xfrm->cipher.key.data;
-		length = cipher_xfrm->cipher.key.length;
-
-		roc_se_hmac_opad_ipad_gen(ctl->auth_type, auth_xfrm->auth.key.data,
-					  auth_xfrm->auth.key.length, hmac_opad_ipad, ROC_SE_IPSEC);
-	}
-
-	switch (length) {
-	case ROC_CPT_AES128_KEY_LEN:
-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
-		break;
-	case ROC_CPT_AES192_KEY_LEN:
-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
-		break;
-	case ROC_CPT_AES256_KEY_LEN:
-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	memcpy(cipher_key, key, length);
-
-	if (ipsec_xfrm->options.esn)
-		ctl->esn_en = 1;
-
-	ctl->spi = rte_cpu_to_be_32(ipsec_xfrm->spi);
-	return 0;
-}
-
-int
-cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa,
-			   struct rte_security_ipsec_xform *ipsec_xfrm,
-			   struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
-	int rc;
-
-	rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
-					    sa->hmac_key, ipsec_xfrm,
-					    crypto_xfrm);
-	if (rc)
-		return rc;
-
-	rte_wmb();
-
-	/* Enable SA */
-	ctl->valid = 1;
-	return 0;
-}
-
-int
-cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
-			    struct rte_security_ipsec_xform *ipsec_xfrm,
-			    struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
-	struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
-	int rc;
-
-	/* Fill common params */
-	rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
-					    sa->hmac_key, ipsec_xfrm,
-					    crypto_xfrm);
-	if (rc)
-		return rc;
-
-	if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
-		goto skip_tunnel_info;
-
-	/* Tunnel header info */
-	switch (tunnel->type) {
-	case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
-		memcpy(&sa->ip_src, &tunnel->ipv4.src_ip,
-		       sizeof(struct in_addr));
-		memcpy(&sa->ip_dst, &tunnel->ipv4.dst_ip,
-		       sizeof(struct in_addr));
-		break;
-	case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
-		return -ENOTSUP;
-	default:
-		return -EINVAL;
-	}
-
-	/* Update udp encap ports */
-	if (ipsec_xfrm->options.udp_encap == 1) {
-		sa->udp_src = 4500;
-		sa->udp_dst = 4500;
-	}
-
-skip_tunnel_info:
-	rte_wmb();
-
-	/* Enable SA */
-	ctl->valid = 1;
-	return 0;
-}
-
-bool
-cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa)
-{
-	return !!sa->ctl.valid;
-}
-
-bool
-cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa)
-{
-	return !!sa->ctl.valid;
-}
-
 uint8_t
 cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
 		     enum rte_crypto_auth_algorithm a_algo,
diff --git a/drivers/common/cnxk/cnxk_security.h b/drivers/common/cnxk/cnxk_security.h
index b323b8b757..19eb9bb03d 100644
--- a/drivers/common/cnxk/cnxk_security.h
+++ b/drivers/common/cnxk/cnxk_security.h
@@ -48,18 +48,6 @@ cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
 bool __roc_api cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa);
 bool __roc_api cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa);
 
-/* [CN9K, CN10K) */
-int __roc_api
-cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa,
-			   struct rte_security_ipsec_xform *ipsec_xfrm,
-			   struct rte_crypto_sym_xform *crypto_xfrm);
-int __roc_api
-cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
-			    struct rte_security_ipsec_xform *ipsec_xfrm,
-			    struct rte_crypto_sym_xform *crypto_xfrm);
-bool __roc_api cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa);
-bool __roc_api cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa);
-
 /* [CN9K] */
 int __roc_api cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
 					  struct rte_crypto_sym_xform *crypto_xform,
diff --git a/drivers/common/cnxk/roc_ie_on.h b/drivers/common/cnxk/roc_ie_on.h
index 9933ffa148..11c995e9d1 100644
--- a/drivers/common/cnxk/roc_ie_on.h
+++ b/drivers/common/cnxk/roc_ie_on.h
@@ -269,66 +269,6 @@ struct roc_ie_on_inb_sa {
 #define ROC_IE_ON_UCC_L2_HDR_INFO_ERR	  0xCF
 #define ROC_IE_ON_UCC_L2_HDR_LEN_ERR	  0xE0
 
-struct roc_ie_onf_sa_ctl {
-	uint32_t spi;
-	uint64_t exp_proto_inter_frag : 8;
-	uint64_t rsvd_41_40 : 2;
-	/* Disable SPI, SEQ data in RPTR for Inbound inline */
-	uint64_t spi_seq_dis : 1;
-	uint64_t esn_en : 1;
-	uint64_t rsvd_44_45 : 2;
-	uint64_t encap_type : 2;
-	uint64_t enc_type : 3;
-	uint64_t rsvd_48 : 1;
-	uint64_t auth_type : 4;
-	uint64_t valid : 1;
-	uint64_t direction : 1;
-	uint64_t outer_ip_ver : 1;
-	uint64_t inner_ip_ver : 1;
-	uint64_t ipsec_mode : 1;
-	uint64_t ipsec_proto : 1;
-	uint64_t aes_key_len : 2;
-};
-
-struct roc_onf_ipsec_outb_sa {
-	/* w0 */
-	struct roc_ie_onf_sa_ctl ctl;
-
-	/* w1 */
-	uint8_t nonce[4];
-	uint16_t udp_src;
-	uint16_t udp_dst;
-
-	/* w2 */
-	uint32_t ip_src;
-	uint32_t ip_dst;
-
-	/* w3-w6 */
-	uint8_t cipher_key[32];
-
-	/* w7-w12 */
-	uint8_t hmac_key[48];
-};
-
-struct roc_onf_ipsec_inb_sa {
-	/* w0 */
-	struct roc_ie_onf_sa_ctl ctl;
-
-	/* w1 */
-	uint8_t nonce[4]; /* Only for AES-GCM */
-	uint32_t unused;
-
-	/* w2 */
-	uint32_t esn_hi;
-	uint32_t esn_low;
-
-	/* w3-w6 */
-	uint8_t cipher_key[32];
-
-	/* w7-w12 */
-	uint8_t hmac_key[48];
-};
-
 #define ROC_ONF_IPSEC_INB_MAX_L2_SZ	  32UL
 #define ROC_ONF_IPSEC_OUTB_MAX_L2_SZ	  30UL
 #define ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ (ROC_ONF_IPSEC_OUTB_MAX_L2_SZ + 2)
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index ab1e9c0f98..f5ce26f03f 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -4,24 +4,6 @@
 #ifndef _ROC_NIX_INL_H_
 #define _ROC_NIX_INL_H_
 
-/* ONF INB HW area */
-#define ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ                                        \
-	PLT_ALIGN(sizeof(struct roc_onf_ipsec_inb_sa), ROC_ALIGN)
-/* ONF INB SW reserved area */
-#define ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD 384
-#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ                                        \
-	(ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD)
-#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2 9
-
-/* ONF OUTB HW area */
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ                                       \
-	PLT_ALIGN(sizeof(struct roc_onf_ipsec_outb_sa), ROC_ALIGN)
-/* ONF OUTB SW reserved area */
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD 128
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ                                       \
-	(ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD)
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2 8
-
 /* ON INB HW area */
 #define ROC_NIX_INL_ON_IPSEC_INB_HW_SZ                                         \
 	PLT_ALIGN(sizeof(struct roc_ie_on_inb_sa), ROC_ALIGN)
@@ -31,10 +13,10 @@
 	(ROC_NIX_INL_ON_IPSEC_INB_HW_SZ + ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD)
 #define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2 10
 
-/* ONF OUTB HW area */
+/* ON OUTB HW area */
 #define ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ                                        \
 	PLT_ALIGN(sizeof(struct roc_ie_on_outb_sa), ROC_ALIGN)
-/* ONF OUTB SW reserved area */
+/* ON OUTB SW reserved area */
 #define ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD 256
 #define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ                                        \
 	(ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD)
@@ -86,34 +68,6 @@ roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(void *sa)
 	return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ);
 }
 
-static inline struct roc_onf_ipsec_inb_sa *
-roc_nix_inl_onf_ipsec_inb_sa(uintptr_t base, uint64_t idx)
-{
-	uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2;
-
-	return PLT_PTR_ADD(base, off);
-}
-
-static inline struct roc_onf_ipsec_outb_sa *
-roc_nix_inl_onf_ipsec_outb_sa(uintptr_t base, uint64_t idx)
-{
-	uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2;
-
-	return PLT_PTR_ADD(base, off);
-}
-
-static inline void *
-roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(void *sa)
-{
-	return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ);
-}
-
-static inline void *
-roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(void *sa)
-{
-	return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ);
-}
-
 /* Inline device SSO Work callback */
 typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
 					  uint32_t soft_exp_event);
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 4981d42ab7..a7402d9941 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -17,10 +17,6 @@ INTERNAL {
 	cnxk_logtype_sso;
 	cnxk_logtype_tim;
 	cnxk_logtype_tm;
-	cnxk_onf_ipsec_inb_sa_fill;
-	cnxk_onf_ipsec_outb_sa_fill;
-	cnxk_onf_ipsec_inb_sa_valid;
-	cnxk_onf_ipsec_outb_sa_valid;
 	cnxk_ot_ipsec_inb_sa_fill;
 	cnxk_ot_ipsec_outb_sa_fill;
 	cnxk_ot_ipsec_inb_sa_valid;
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 8e862be933..a0e9300cff 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -75,7 +75,7 @@ parse_ipsec_out_max_sa(const char *key, const char *value, void *extra_args)
 	if (errno)
 		val = 0;
 
-	*(uint16_t *)extra_args = val;
+	*(uint32_t *)extra_args = val;
 
 	return 0;
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 02/14] net/cnxk: add IPsec SA defines for PMD API
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 03/14] net/cnxk: add transport mode to security capability on cn9k Nithin Dabilpuram
                     ` (11 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Define inbound and outbound IPsec data type for PMD API's
rte_pmd_cnxk_hw_sa_read() and rte_pmd_cnxk_hw_sa_write().

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn10k_ethdev_sec.c |  18 +-
 drivers/net/cnxk/rte_pmd_cnxk.h     | 397 +++++++++++++++++++++++++++-
 2 files changed, 411 insertions(+), 4 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 575d0fabd5..05ec49d981 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -14,6 +14,20 @@
 #include <cnxk_security.h>
 #include <roc_priv.h>
 
+PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_inb_sa, ctx.ar_winbits) ==
+		  offsetof(struct roc_ot_ipsec_inb_sa, ctx.ar_winbits));
+
+PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_outb_sa, ctx.mib_pkts) ==
+		  offsetof(struct roc_ot_ipsec_outb_sa, ctx.mib_pkts));
+
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_CKEY_LEN == ROC_CTX_MAX_CKEY_LEN);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN == RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN);
+
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MIN == ROC_AR_WIN_SIZE_MIN);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MAX == ROC_AR_WIN_SIZE_MAX);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_LOG_MIN_AR_WIN_SIZE_M1 == ROC_LOG_MIN_AR_WIN_SIZE_M1);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WINBITS_SZ == ROC_AR_WINBITS_SZ);
+
 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
 	{	/* AES GCM */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -1143,7 +1157,7 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
 
 int
 rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
-			void *data, uint32_t len)
+			union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
@@ -1166,7 +1180,7 @@ rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
 
 int
 rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
-			 void *data, uint32_t len)
+			 union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
diff --git a/drivers/net/cnxk/rte_pmd_cnxk.h b/drivers/net/cnxk/rte_pmd_cnxk.h
index 7827c33ac9..43f2a7ed9b 100644
--- a/drivers/net/cnxk/rte_pmd_cnxk.h
+++ b/drivers/net/cnxk/rte_pmd_cnxk.h
@@ -60,6 +60,399 @@ struct rte_pmd_cnxk_sec_action {
 	enum rte_pmd_cnxk_sec_action_alg alg;
 };
 
+#define RTE_PMD_CNXK_CTX_MAX_CKEY_LEN	   32
+#define RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN 128
+
+/** Anti reply window size supported */
+#define RTE_PMD_CNXK_AR_WIN_SIZE_MIN	    64
+#define RTE_PMD_CNXK_AR_WIN_SIZE_MAX	    4096
+#define RTE_PMD_CNXK_LOG_MIN_AR_WIN_SIZE_M1 5
+
+/** u64 array size to fit anti replay window bits */
+#define RTE_PMD_CNXK_AR_WINBITS_SZ (RTE_ALIGN_CEIL(RTE_PMD_CNXK_AR_WIN_SIZE_MAX, 64) / 64)
+
+/** Outer header info for Inbound or Outbound */
+union rte_pmd_cnxk_ipsec_outer_ip_hdr {
+	struct {
+		/** IPv4 destination */
+		uint32_t dst_addr;
+		/** IPv4 source */
+		uint32_t src_addr;
+	} ipv4;
+	struct {
+		/** IPv6 source */
+		uint8_t src_addr[16];
+		/** IPv6 destination */
+		uint8_t dst_addr[16];
+	} ipv6;
+};
+
+/** Inbound IPsec context update region */
+struct rte_pmd_cnxk_ipsec_inb_ctx_update_reg {
+	/** Highest sequence number received */
+	uint64_t ar_base;
+	/** Valid bit for 64-bit words of replay window */
+	uint64_t ar_valid_mask;
+	/** Hard life for SA */
+	uint64_t hard_life;
+	/** Soft life for SA */
+	uint64_t soft_life;
+	/** MIB octets */
+	uint64_t mib_octs;
+	/** MIB packets */
+	uint64_t mib_pkts;
+	/** AR window bits */
+	uint64_t ar_winbits[RTE_PMD_CNXK_AR_WINBITS_SZ];
+};
+
+/** Outbound IPsec IV data */
+union rte_pmd_cnxk_ipsec_outb_iv {
+	uint64_t u64[2];
+	/** IV debug - 16B*/
+	uint8_t iv_dbg[16];
+	struct {
+		/** IV debug - 8B */
+		uint8_t iv_dbg1[4];
+		/** Salt */
+		uint8_t salt[4];
+
+		uint32_t rsvd;
+		/** IV debug - 8B */
+		uint8_t iv_dbg2[4];
+	} s;
+};
+
+/** Outbound IPsec context update region */
+struct rte_pmd_cnxk_ipsec_outb_ctx_update_reg {
+	union {
+		struct {
+			uint64_t reserved_0_2 : 3;
+			uint64_t address : 57;
+			uint64_t mode : 4;
+		} s;
+		uint64_t u64;
+	} err_ctl;
+
+	uint64_t esn_val;
+	uint64_t hard_life;
+	uint64_t soft_life;
+	uint64_t mib_octs;
+	uint64_t mib_pkts;
+};
+
+/**
+ * Inbound IPsec SA
+ */
+struct rte_pmd_cnxk_ipsec_inb_sa {
+	/** Word0 */
+	union {
+		struct {
+			/** AR window size */
+			uint64_t ar_win : 3;
+			/** Hard life enable */
+			uint64_t hard_life_dec : 1;
+			/** Soft life enable */
+			uint64_t soft_life_dec : 1;
+
+			/** Count global octets */
+			uint64_t count_glb_octets : 1;
+			/** Count global pkts */
+			uint64_t count_glb_pkts : 1;
+			/** Count bytes */
+			uint64_t count_mib_bytes : 1;
+
+			/** Count pkts */
+			uint64_t count_mib_pkts : 1;
+			/** HW context offset */
+			uint64_t hw_ctx_off : 7;
+
+			/** Context ID */
+			uint64_t ctx_id : 16;
+
+			/** Original packet free absolute */
+			uint64_t orig_pkt_fabs : 1;
+			/** Original packet free */
+			uint64_t orig_pkt_free : 1;
+			/** PKIND for second pass */
+			uint64_t pkind : 6;
+
+			uint64_t rsvd0 : 1;
+			/** Ether type overwrite */
+			uint64_t et_ovrwr : 1;
+			/** Packet output type */
+			uint64_t pkt_output : 2;
+			/** Packet format type */
+			uint64_t pkt_format : 1;
+			/** Defrag option */
+			uint64_t defrag_opt : 2;
+			/** Reserved for X2P dest */
+			uint64_t x2p_dst : 1;
+
+			/** Context push size */
+			uint64_t ctx_push_size : 7;
+			uint64_t rsvd1 : 1;
+
+			/** Context header size */
+			uint64_t ctx_hdr_size : 2;
+			/** AOP enable */
+			uint64_t aop_valid : 1;
+			uint64_t rsvd2 : 1;
+			/** Context size */
+			uint64_t ctx_size : 4;
+		} s;
+		uint64_t u64;
+	} w0;
+
+	/** Word1 */
+	union {
+		struct {
+			/** Original packet aura */
+			uint64_t orig_pkt_aura : 20;
+			uint64_t rsvd3 : 4;
+			/** Original packet free offset */
+			uint64_t orig_pkt_foff : 8;
+			/** SA cookie */
+			uint64_t cookie : 32;
+		} s;
+		uint64_t u64;
+	} w1;
+
+	/** Word 2 */
+	union {
+		struct {
+			/** SA valid */
+			uint64_t valid : 1;
+			/** SA direction */
+			uint64_t dir : 1;
+			uint64_t rsvd11 : 1;
+			uint64_t rsvd4 : 1;
+			/** IPsec mode */
+			uint64_t ipsec_mode : 1;
+			/** IPsec protocol */
+			uint64_t ipsec_protocol : 1;
+			/** AES key length */
+			uint64_t aes_key_len : 2;
+
+			/** Encryption algo */
+			uint64_t enc_type : 3;
+			/** Soft life and hard life unit */
+			uint64_t life_unit : 1;
+			/** Authentication algo */
+			uint64_t auth_type : 4;
+
+			/** Encapsulation type */
+			uint64_t encap_type : 2;
+			/** Ether type override enable */
+			uint64_t et_ovrwr_ddr_en : 1;
+			/** ESN enable */
+			uint64_t esn_en : 1;
+			/** Transport mode L4 checksum incrementally update */
+			uint64_t tport_l4_incr_csum : 1;
+			/** Outer IP header verification */
+			uint64_t ip_hdr_verify : 2;
+			/** UDP enacapsulation ports verification */
+			uint64_t udp_ports_verify : 1;
+
+			/** Return 64B of L2/L3 header on error */
+			uint64_t l3hdr_on_err : 1;
+			uint64_t rsvd6 : 6;
+			uint64_t rsvd12 : 1;
+
+			/** SPI */
+			uint64_t spi : 32;
+		} s;
+		uint64_t u64;
+	} w2;
+
+	/** Word3 */
+	uint64_t rsvd7;
+
+	/** Word4 - Word7 */
+	uint8_t cipher_key[RTE_PMD_CNXK_CTX_MAX_CKEY_LEN];
+
+	/** Word8 - Word9 */
+	union {
+		struct {
+			uint32_t rsvd8;
+			/** IV salt */
+			uint8_t salt[4];
+		} s;
+		uint64_t u64;
+	} w8;
+	uint64_t rsvd9;
+
+	/** Word10 */
+	union {
+		struct {
+			uint64_t rsvd10 : 32;
+			/** UDP encapsulation source port */
+			uint64_t udp_src_port : 16;
+			/** UDP encapsulation destination port */
+			uint64_t udp_dst_port : 16;
+		} s;
+		uint64_t u64;
+	} w10;
+
+	/** Word11 - Word14 */
+	union rte_pmd_cnxk_ipsec_outer_ip_hdr outer_hdr;
+
+	/** Word15 - Word30 */
+	uint8_t hmac_opad_ipad[RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN];
+
+	/** Word31 - Word100 */
+	struct rte_pmd_cnxk_ipsec_inb_ctx_update_reg ctx;
+};
+
+/**
+ * Outbound IPsec SA
+ */
+struct rte_pmd_cnxk_ipsec_outb_sa {
+	/** Word0 */
+	union {
+		struct {
+			/** ESN enable */
+			uint64_t esn_en : 1;
+			/** IP ID generation type */
+			uint64_t ip_id : 1;
+			uint64_t rsvd0 : 1;
+			/** Hard life enable */
+			uint64_t hard_life_dec : 1;
+			/** Soft life enable */
+			uint64_t soft_life_dec : 1;
+
+			/** Count global octets */
+			uint64_t count_glb_octets : 1;
+			/** Count global pkts */
+			uint64_t count_glb_pkts : 1;
+			/** Count bytes */
+			uint64_t count_mib_bytes : 1;
+
+			/** Count pkts */
+			uint64_t count_mib_pkts : 1;
+			/** HW context offset */
+			uint64_t hw_ctx_off : 7;
+
+			/** Context ID */
+			uint64_t ctx_id : 16;
+			uint64_t rsvd1 : 16;
+
+			/** Context push size */
+			uint64_t ctx_push_size : 7;
+			uint64_t rsvd2 : 1;
+
+			/** Context header size */
+			uint64_t ctx_hdr_size : 2;
+			/** AOP enable */
+			uint64_t aop_valid : 1;
+			uint64_t rsvd3 : 1;
+			/** Context size */
+			uint64_t ctx_size : 4;
+		} s;
+		uint64_t u64;
+	} w0;
+
+	/** Word1 */
+	union {
+		struct {
+			uint64_t rsvd4 : 32;
+			/** SA cookie */
+			uint64_t cookie : 32;
+		} s;
+		uint64_t u64;
+	} w1;
+
+	/** Word 2 */
+	union {
+		struct {
+			/** SA valid */
+			uint64_t valid : 1;
+			/** SA direction */
+			uint64_t dir : 1;
+			uint64_t rsvd11 : 1;
+			uint64_t rsvd5 : 1;
+			/** IPsec mode */
+			uint64_t ipsec_mode : 1;
+			/** IPsec protocol */
+			uint64_t ipsec_protocol : 1;
+
+			/** AES key length */
+			uint64_t aes_key_len : 2;
+
+			/** Encryption algo */
+			uint64_t enc_type : 3;
+			/** Soft life and hard life unit */
+			uint64_t life_unit : 1;
+			/** Authentication algo */
+			uint64_t auth_type : 4;
+
+			/** Encapsulation type */
+			uint64_t encap_type : 2;
+			/** DF source */
+			uint64_t ipv4_df_src_or_ipv6_flw_lbl_src : 1;
+			/** DSCP source */
+			uint64_t dscp_src : 1;
+			/** IV source */
+			uint64_t iv_src : 2;
+			/** IPID value in outer header */
+			uint64_t ipid_gen : 1;
+			uint64_t rsvd6 : 1;
+
+			uint64_t rsvd7 : 7;
+			uint64_t rsvd12 : 1;
+
+			/** SPI */
+			uint64_t spi : 32;
+		} s;
+		uint64_t u64;
+	} w2;
+
+	/** Word3 */
+	uint64_t rsvd8;
+
+	/** Word4 - Word7 */
+	uint8_t cipher_key[RTE_PMD_CNXK_CTX_MAX_CKEY_LEN];
+
+	/** Word8 - Word9 */
+	union rte_pmd_cnxk_ipsec_outb_iv iv;
+
+	/** Word10 */
+	union {
+		struct {
+			uint64_t rsvd9 : 4;
+			/** Outer header IPv4 DF or IPv6 flow label */
+			uint64_t ipv4_df_or_ipv6_flw_lbl : 20;
+
+			/** DSCP for outer header */
+			uint64_t dscp : 6;
+			uint64_t rsvd10 : 2;
+
+			/** UDP encapsulation destination port */
+			uint64_t udp_dst_port : 16;
+
+			/** UDP encapsulation source port */
+			uint64_t udp_src_port : 16;
+		} s;
+		uint64_t u64;
+	} w10;
+
+	/** Word11 - Word14 */
+	union rte_pmd_cnxk_ipsec_outer_ip_hdr outer_hdr;
+
+	/** Word15 - Word30 */
+	uint8_t hmac_opad_ipad[RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN];
+
+	/** Word31 - Word36 */
+	struct rte_pmd_cnxk_ipsec_outb_ctx_update_reg ctx;
+};
+
+/** Inbound/Outbound IPsec SA */
+union rte_pmd_cnxk_ipsec_hw_sa {
+	/** Inbound SA */
+	struct rte_pmd_cnxk_ipsec_inb_sa inb;
+	/** Outbound SA */
+	struct rte_pmd_cnxk_ipsec_outb_sa outb;
+};
+
 /**
  * Read HW SA context from session.
  *
@@ -77,7 +470,7 @@ struct rte_pmd_cnxk_sec_action {
  */
 __rte_experimental
 int rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
-			    void *data, uint32_t len);
+			    union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len);
 /**
  * Write HW SA context to session.
  *
@@ -95,7 +488,7 @@ int rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
  */
 __rte_experimental
 int rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
-			     void *data, uint32_t len);
+			     union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len);
 
 /**
  * Get pointer to CPT result info for inline inbound processed pkt.
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 03/14] net/cnxk: add transport mode to security capability on cn9k
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 02/14] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 04/14] common/cnxk: dump selected SQ entries Nithin Dabilpuram
                     ` (10 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Add transport mode to security capabilities since it
is supported by UCODE.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev_sec.c | 33 ++++++++++++++++++++++++++++++
 1 file changed, 33 insertions(+)

diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 688b13ae1e..a0e0a73639 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -351,6 +351,39 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
 	},
+	{	/* IPsec Inline Protocol ESP Transport Ingress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX,
+			.options = {
+				.udp_encap = 1,
+				.esn = 1,
+			},
+		},
+		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
+		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+	},
+	{	/* IPsec Inline Protocol ESP Transport Egress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+			.replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX,
+			.options = {
+				.iv_gen_disable = 1,
+				.udp_encap = 1,
+				.esn = 1,
+			},
+		},
+		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
+		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+	},
 	{
 		.action = RTE_SECURITY_ACTION_TYPE_NONE
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 04/14] common/cnxk: dump selected SQ entries
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 02/14] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 03/14] net/cnxk: add transport mode to security capability on cn9k Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 05/14] net/cnxk: added Tx descriptor dump API Nithin Dabilpuram
                     ` (9 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

New API to dump detailed SQ entries.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       |   2 +
 drivers/common/cnxk/roc_nix_debug.c | 172 ++++++++++++++++++++++++++++
 drivers/common/cnxk/version.map     |   1 +
 3 files changed, 175 insertions(+)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 84e6fc3df5..9d57ca0be7 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -553,6 +553,8 @@ void __roc_api roc_nix_cqe_dump(FILE *file, const struct nix_cqe_hdr_s *cq);
 void __roc_api roc_nix_rq_dump(struct roc_nix_rq *rq, FILE *file);
 void __roc_api roc_nix_cq_dump(struct roc_nix_cq *cq, FILE *file);
 void __roc_api roc_nix_sq_dump(struct roc_nix_sq *sq, FILE *file);
+int __roc_api roc_nix_sq_desc_dump(struct roc_nix *roc_nix, uint16_t q, uint16_t offset,
+				   uint16_t num, FILE *file);
 void __roc_api roc_nix_tm_dump(struct roc_nix *roc_nix, FILE *file);
 void __roc_api roc_nix_dump(struct roc_nix *roc_nix, FILE *file);
 
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index 8962a76097..26546f9297 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -1362,3 +1362,175 @@ roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix, FILE *file)
 		cpt_lf_print(&lf_base[i]);
 	}
 }
+
+static void
+nix_tm_sqe_dump(uint64_t *sqe, int head_off, int end_off, int instr_sz, FILE *file, int full,
+		uint16_t *num)
+{
+	int i, j, inc = (8 * (0x2 >> instr_sz)), segs;
+	uint64_t *ptr;
+
+	if (!sqe || !(*num))
+		return;
+
+	ptr = sqe + (head_off * inc);
+	for (i = head_off; i < end_off; i++) {
+		if (!(*num))
+			return;
+		ptr = sqe + (i * inc);
+		nix_dump(file, "Entry : %d >>>>>\n", i);
+		nix_dump(file, "\t\tSEND_HDR[0]: 0x%016lx SEND_HDR[1]: 0x%016lx\n", *ptr,
+			 *(ptr + 1));
+		*num = *num - 1;
+		if (!full)
+			continue;
+		ptr += 2;
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_EXT) {
+			nix_dump(file, "\t\tSUBDC_EXT[0]: 0x%016lx DUBDC_EXT[1]: 0x%016lx\n", *ptr,
+				 *(ptr + 1));
+			ptr += 2;
+		}
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_AGE_AND_STATS) {
+			nix_dump(file,
+				 "\t\tSUBDC_AGE_STATS[0]: 0x%016lx SUBDC_AGE_STATS[1]: 0x%016lx\n",
+				 *ptr, *(ptr + 1));
+			ptr += 2;
+		}
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_JUMP) {
+			nix_dump(file, "\t\tSUBDC_JUMP: 0x%016lx\n", *ptr);
+			ptr += 1;
+			ptr = (uint64_t *)*ptr;
+		}
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_CRC) {
+			nix_dump(file, "\t\tSUBDC_CRC[0]: 0x%016lx SUBDC_CRC[1]: 0x%016lx\n", *ptr,
+				 *(ptr + 1));
+			ptr += 2;
+		}
+		/* We are not parsing immediate send descriptor */
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_IMM) {
+			nix_dump(file, "\t\tSUBDC_IMM: 0x%016lx ", *ptr);
+			continue;
+		}
+		while (1) {
+			if (((*ptr >> 60) & 0xF) == NIX_SUBDC_SG) {
+				nix_dump(file, "\t\tSUBDC_SG: 0x%016lx   ", *ptr);
+				segs = (*ptr >> 48) & 0x3;
+				ptr += 1;
+				for (j = 0; j < segs; j++) {
+					nix_dump(file, "\t\t\t  0x%016lx   ", *ptr);
+					ptr += 1;
+				}
+				if (segs == 2)
+					ptr += 1;
+			} else if (((*ptr >> 60) & 0xF) == NIX_SUBDC_SG2) {
+				nix_dump(file, "\t\tSUBDC_SG2: 0x%016lx   ", *ptr);
+				ptr += 1;
+				nix_dump(file, "\t\t\t  0x%016lx   ", *ptr);
+				ptr += 1;
+			} else
+				break;
+		}
+	}
+}
+
+int
+roc_nix_sq_desc_dump(struct roc_nix *roc_nix, uint16_t q, uint16_t offset, uint16_t num, FILE *file)
+{
+	int head_off, count, rc = 0, tail_off, full = 0;
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct roc_nix_sq *sq = nix->sqs[q];
+	void *sqb_buf, *dat, *tail_sqb;
+	struct ndc_sync_op *ndc_req;
+	struct dev *dev = &nix->dev;
+	uint16_t sqes_per_sqb;
+	struct mbox *mbox;
+
+	mbox = dev->mbox;
+	/* Sync NDC-NIX-TX for LF */
+	ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox));
+	if (ndc_req == NULL) {
+		mbox_put(mbox);
+		return -EFAULT;
+	}
+
+	ndc_req->nix_lf_tx_sync = 1;
+	if (mbox_process(mbox))
+		rc |= NIX_ERR_NDC_SYNC;
+	mbox_put(mbox);
+
+	if (rc)
+		plt_err("NDC_SYNC failed rc %d", rc);
+
+	rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_SQ, q, (void *)&dat);
+	if (rc)
+		return rc;
+	if (roc_model_is_cn9k()) {
+		volatile struct nix_sq_ctx_s *ctx = (struct nix_sq_ctx_s *)dat;
+
+		if (ctx->mnq_dis || ctx->lmt_dis)
+			full = 1;
+
+		count = ctx->sqb_count;
+		sqb_buf = (void *)ctx->head_sqb;
+		tail_sqb = (void *)ctx->tail_sqb;
+		head_off = ctx->head_offset;
+		tail_off = ctx->tail_offset;
+	} else {
+		volatile struct nix_cn10k_sq_ctx_s *ctx = (struct nix_cn10k_sq_ctx_s *)dat;
+
+		if (ctx->mnq_dis || ctx->lmt_dis)
+			full = 1;
+
+		count = ctx->sqb_count;
+		sqb_buf = (void *)ctx->head_sqb;
+		tail_sqb = (void *)ctx->tail_sqb;
+		head_off = ctx->head_offset;
+		tail_off = ctx->tail_offset;
+	}
+	sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
+	while (count) {
+		void *next_sqb;
+
+		if (sqb_buf == tail_sqb) {
+			if ((head_off + offset) >= tail_off) /* Nothing to be dump */
+				return 0;
+			head_off += tail_off;
+			break;
+		} else if ((head_off + offset) >= sqes_per_sqb) {
+			next_sqb = *(void **)((uint64_t *)sqb_buf +
+					      (uint32_t)((sqes_per_sqb - 1) *
+							 (0x2 >> sq->max_sqe_sz) * 8));
+			/* While traffic running HW may freed/reused this SQE */
+			if (!next_sqb)
+				return 0;
+			sqb_buf = next_sqb;
+			head_off = 0;
+			count--;
+		} else {
+			head_off += offset;
+			break;
+		}
+	}
+	while (count) {
+		void *next_sqb;
+
+		if (sqb_buf == tail_sqb)
+			nix_tm_sqe_dump(sqb_buf, head_off, tail_off, sq->max_sqe_sz, file, full,
+					&num);
+		else
+			nix_tm_sqe_dump(sqb_buf, head_off, (sqes_per_sqb - 1), sq->max_sqe_sz, file,
+					full, &num);
+		if (!num)
+			break;
+		next_sqb = *(void **)((uint64_t *)sqb_buf +
+				      (uint32_t)((sqes_per_sqb - 1) * (0x2 >> sq->max_sqe_sz) * 8));
+		/* While traffic running HW may freed/reused this SQE */
+		if (!next_sqb)
+			return 0;
+		sqb_buf = next_sqb;
+		head_off = 0;
+		count--;
+	}
+
+	return 0;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index a7402d9941..05432ad245 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -340,6 +340,7 @@ INTERNAL {
 	roc_nix_rx_drop_re_set;
 	roc_nix_rx_queue_intr_disable;
 	roc_nix_rx_queue_intr_enable;
+	roc_nix_sq_desc_dump;
 	roc_nix_sq_dump;
 	roc_nix_sq_ena_dis;
 	roc_nix_sq_fini;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 05/14] net/cnxk: added Tx descriptor dump API
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (2 preceding siblings ...)
  2024-02-22 11:02   ` [PATCH v4 04/14] common/cnxk: dump selected SQ entries Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 06/14] net/cnxk: fix issue with buff size compute Nithin Dabilpuram
                     ` (8 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

New API to dump selected descriptor entries from SQE list.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c     |  1 +
 drivers/net/cnxk/cnxk_ethdev.h     |  2 ++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 10 ++++++++++
 3 files changed, 13 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 2372a4e793..7640910782 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1821,6 +1821,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.cman_config_init = cnxk_nix_cman_config_init,
 	.cman_config_set = cnxk_nix_cman_config_set,
 	.cman_config_get = cnxk_nix_cman_config_get,
+	.eth_tx_descriptor_dump = cnxk_nix_tx_descriptor_dump,
 };
 
 void
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 37b6395b93..45b9055234 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -557,6 +557,8 @@ int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
 int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
 			     int mark_yellow, int mark_red,
 			     struct rte_tm_error *error);
+int cnxk_nix_tx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t offset,
+				uint16_t num, FILE *file);
 
 /* MTR */
 int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops);
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 5de2919047..e9ab8da781 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -1313,3 +1313,13 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
 exit:
 	return rc;
 }
+
+int
+cnxk_nix_tx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t offset,
+			    uint16_t num, FILE *file)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *nix = &dev->nix;
+
+	return roc_nix_sq_desc_dump(nix, qid, offset, num, file);
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 06/14] net/cnxk: fix issue with buff size compute
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (3 preceding siblings ...)
  2024-02-22 11:02   ` [PATCH v4 05/14] net/cnxk: added Tx descriptor dump API Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 07/14] common/cnxk: skip setting Tx MTU separately Nithin Dabilpuram
                     ` (7 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev, stable

In case where cnxk_nix_mtu_set() is called before
data->min_rx_buf_size is set, use buf size from first RQ's
mempool.

Fixes: 34b46320f446 ("net/cnxk: perform early MTU setup for event mode")
Cc: stable@dpdk.org

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev_ops.c | 25 ++++++++++++++++++++++---
 1 file changed, 22 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index e9ab8da781..e816884d47 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -544,8 +544,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq_sp;
+	uint32_t buffsz = 0;
 	int rc = -EINVAL;
-	uint32_t buffsz;
 
 	frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
 
@@ -561,8 +562,24 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 		goto exit;
 	}
 
-	buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
-	old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
+	if (!eth_dev->data->nb_rx_queues)
+		goto skip_buffsz_check;
+
+	/* Perform buff size check */
+	if (data->min_rx_buf_size) {
+		buffsz = data->min_rx_buf_size;
+	} else if (eth_dev->data->rx_queues && eth_dev->data->rx_queues[0]) {
+		rxq_sp = cnxk_eth_rxq_to_sp(data->rx_queues[0]);
+
+		if (rxq_sp->qconf.mp)
+			buffsz = rte_pktmbuf_data_room_size(rxq_sp->qconf.mp);
+	}
+
+	/* Skip validation if RQ's are not yet setup */
+	if (!buffsz)
+		goto skip_buffsz_check;
+
+	buffsz -= RTE_PKTMBUF_HEADROOM;
 
 	/* Refuse MTU that requires the support of scattered packets
 	 * when this feature has not been enabled before.
@@ -580,6 +597,8 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 		goto exit;
 	}
 
+skip_buffsz_check:
+	old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
 	/* if new MTU was smaller than old one, then flush all SQs before MTU change */
 	if (old_frame_size > frame_size) {
 		if (data->dev_started) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 07/14] common/cnxk: skip setting Tx MTU separately
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (4 preceding siblings ...)
  2024-02-22 11:02   ` [PATCH v4 06/14] net/cnxk: fix issue with buff size compute Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 08/14] net/cnxk: fix max MTU limit Nithin Dabilpuram
                     ` (6 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Skip setting Tx MTU separately as now the Tx credit configuration
is based on max MTU possible for that link.
Also init MTU with max value for that port.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix.c      |  2 +-
 drivers/common/cnxk/roc_nix.h      |  2 --
 drivers/net/cnxk/cnxk_ethdev_ops.c | 12 +-----------
 3 files changed, 2 insertions(+), 14 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 97c0ae3e25..90ccb260fb 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -484,7 +484,7 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 	sdp_lbk_id_update(pci_dev, nix);
 	nix->pci_dev = pci_dev;
 	nix->reta_sz = reta_sz;
-	nix->mtu = ROC_NIX_DEFAULT_HW_FRS;
+	nix->mtu = roc_nix_max_pkt_len(roc_nix);
 	nix->dmac_flt_idx = -1;
 
 	/* Register error and ras interrupts */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 9d57ca0be7..3799b551f2 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -267,8 +267,6 @@ struct roc_nix_eeprom_info {
 #define ROC_NIX_RSS_KEY_LEN	     48 /* 352 Bits */
 #define ROC_NIX_RSS_MCAM_IDX_DEFAULT (-1)
 
-#define ROC_NIX_DEFAULT_HW_FRS 1514
-
 #define ROC_NIX_VWQE_MAX_SIZE_LOG2 11
 #define ROC_NIX_VWQE_MIN_SIZE_LOG2 2
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index e816884d47..4962f3bced 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -610,19 +610,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 
 	frame_size -= RTE_ETHER_CRC_LEN;
 
-	/* Update mtu on Tx */
-	rc = roc_nix_mac_mtu_set(nix, frame_size);
-	if (rc) {
-		plt_err("Failed to set MTU, rc=%d", rc);
-		goto exit;
-	}
-
-	/* Sync same frame size on Rx */
+	/* Set frame size on Rx */
 	rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
 	if (rc) {
-		/* Rollback to older mtu */
-		roc_nix_mac_mtu_set(nix,
-				    old_frame_size - RTE_ETHER_CRC_LEN);
 		plt_err("Failed to max Rx frame length, rc=%d", rc);
 		goto exit;
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 08/14] net/cnxk: fix max MTU limit
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (5 preceding siblings ...)
  2024-02-22 11:02   ` [PATCH v4 07/14] common/cnxk: skip setting Tx MTU separately Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 09/14] common/cnxk: update queue entries copy in RETA table Nithin Dabilpuram
                     ` (5 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev, stable

From: Sunil Kumar Kori <skori@marvell.com>

Device can support maximum frame size up to 9212 bytes. While configuring
mtu, overhead is considered as ethernet header size, crc and
2 * (vlan tags) which translates to 26 bytes.

Exposed overhead to the user via rte_eth_dev_info() is 18 bytes which were
leading to set wrong Rx frame size.

Fixes: 8589ec212e80 ("net/cnxk: support MTU set")
Cc: stable@dpdk.org

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev_ops.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 4962f3bced..56049c5dd2 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -20,8 +20,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
 	devinfo->max_mac_addrs = dev->max_mac_entries;
 	devinfo->max_vfs = pci_dev->max_vfs;
-	devinfo->max_mtu = devinfo->max_rx_pktlen -
-				(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
+	devinfo->max_mtu = devinfo->max_rx_pktlen - CNXK_NIX_L2_OVERHEAD;
 	devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
 
 	devinfo->rx_offload_capa = dev->rx_offload_capa;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 09/14] common/cnxk: update queue entries copy in RETA table
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (6 preceding siblings ...)
  2024-02-22 11:02   ` [PATCH v4 08/14] net/cnxk: fix max MTU limit Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 10/14] net/cnxk: fix indirect mbuf handling in Tx path Nithin Dabilpuram
                     ` (4 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, Kommula Shiva Shankar

From: Kommula Shiva Shankar <kshankar@marvell.com>

This patch updates queue entries copy in reta table
based on data type

Signed-off-by: Kommula Shiva Shankar <kshankar@marvell.com>
---
 drivers/common/cnxk/roc_nix_rss.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_rss.c b/drivers/common/cnxk/roc_nix_rss.c
index 3599eb9bae..2b88e1360d 100644
--- a/drivers/common/cnxk/roc_nix_rss.c
+++ b/drivers/common/cnxk/roc_nix_rss.c
@@ -196,7 +196,7 @@ roc_nix_rss_reta_set(struct roc_nix *roc_nix, uint8_t group,
 	if (rc)
 		return rc;
 
-	memcpy(&nix->reta[group], reta, ROC_NIX_RSS_RETA_MAX);
+	memcpy(&nix->reta[group], reta, sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX);
 	return 0;
 }
 
@@ -209,7 +209,7 @@ roc_nix_rss_reta_get(struct roc_nix *roc_nix, uint8_t group,
 	if (group >= ROC_NIX_RSS_GRPS)
 		return NIX_ERR_PARAM;
 
-	memcpy(reta, &nix->reta[group], ROC_NIX_RSS_RETA_MAX);
+	memcpy(reta, &nix->reta[group], sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX);
 	return 0;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 10/14] net/cnxk: fix indirect mbuf handling in Tx path
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (7 preceding siblings ...)
  2024-02-22 11:02   ` [PATCH v4 09/14] common/cnxk: update queue entries copy in RETA table Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 11/14] net/cnxk: fix check cookies for multi-seg offload Nithin Dabilpuram
                     ` (3 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, stable, Rahul Bhansali

Indirect mbuf can be pointing to data from different pool. Use the right
aura in NIX send header in SG2 and SG case.

Fixes: 862e28128707 ("net/cnxk: add vector Tx for CN9K")
Fixes: f71b7dbbf04b ("net/cnxk: add vector Tx for CN10K")
Fixes: 7e95c11df4f1 ("net/cnxk: add multi-segment Tx for CN9K")
Fixes: 3626d5195d49 ("net/cnxk: add multi-segment Tx for CN10K")
Cc: stable@dpdk.org

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn10k_ethdev.c   |   6 +
 drivers/net/cnxk/cn10k_rxtx.h     |   1 +
 drivers/net/cnxk/cn10k_tx.h       | 265 ++++++++++++++++++---------
 drivers/net/cnxk/cn9k_ethdev.c    |   6 +
 drivers/net/cnxk/cn9k_ethdev.h    |   1 +
 drivers/net/cnxk/cn9k_tx.h        | 295 ++++++++++++++++++++----------
 drivers/net/cnxk/cnxk_ethdev_dp.h |  10 +-
 7 files changed, 406 insertions(+), 178 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index a2e943a3d0..a5696c092a 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -389,7 +389,13 @@ cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 		struct roc_nix_sq *sq = &dev->sqs[qidx];
 		do {
 			handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
+			/* Check if SQ is empty */
 			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+			if (head != tail)
+				continue;
+
+			/* Check if completion CQ is empty */
+			roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail);
 		} while (head != tail);
 	}
 
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index aeffc4ac92..9f33d0192e 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -177,6 +177,7 @@ handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe)
 			m = m_next;
 		}
 		rte_pktmbuf_free_seg(m);
+		txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL;
 
 		head++;
 		head &= qmask;
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 467f0ccc65..a1e923cd9e 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -786,8 +786,9 @@ cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr,
 
 static __rte_always_inline uint64_t
 cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq,
-		struct nix_send_hdr_s *send_hdr)
+		      struct nix_send_hdr_s *send_hdr, uint64_t *aura)
 {
+	struct rte_mbuf *prev = NULL;
 	uint32_t sqe_id;
 
 	if (RTE_MBUF_HAS_EXTBUF(m)) {
@@ -796,7 +797,10 @@ cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq,
 			return 1;
 		}
 		if (send_hdr->w0.pnc) {
-			txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m;
+			sqe_id = send_hdr->w1.sqe_id;
+			prev = txq->tx_compl.ptr[sqe_id];
+			m->next = prev;
+			txq->tx_compl.ptr[sqe_id] = m;
 		} else {
 			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
 			send_hdr->w0.pnc = 1;
@@ -806,10 +810,151 @@ cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq,
 		}
 		return 1;
 	} else {
-		return cnxk_nix_prefree_seg(m);
+		return cnxk_nix_prefree_seg(m, aura);
 	}
 }
 
+#if defined(RTE_ARCH_ARM64)
+/* Only called for first segments of single segmented mbufs */
+static __rte_always_inline void
+cn10k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct cn10k_eth_txq *txq,
+			  uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0,
+			  uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1)
+{
+	struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr;
+	uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask;
+	bool tx_compl_ena = txq->tx_compl.ena;
+	struct rte_mbuf *m0, *m1, *m2, *m3;
+	struct rte_mbuf *cookie;
+	uint64_t w0, w1, aura;
+	uint64_t sqe_id;
+
+	m0 = mbufs[0];
+	m1 = mbufs[1];
+	m2 = mbufs[2];
+	m3 = mbufs[3];
+
+	/* mbuf 0 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m0)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m0);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m0;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0);
+
+	/* mbuf1 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m1)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m1);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m1;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1);
+
+	/* mbuf 2 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m2)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m2);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m2;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0);
+
+	/* mbuf3 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m3)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m3);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m3;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1);
+#ifndef RTE_LIBRTE_MEMPOOL_DEBUG
+	RTE_SET_USED(cookie);
+#endif
+}
+#endif
+
 static __rte_always_inline void
 cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
 {
@@ -889,6 +1034,9 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq,
 		sg = (union nix_send_sg_s *)(cmd + 2);
 	}
 
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+		send_hdr->w0.pnc = 0;
+
 	if (flags & (NIX_TX_NEED_SEND_HDR_W1 | NIX_TX_OFFLOAD_SECURITY_F)) {
 		ol_flags = m->ol_flags;
 		w1.u = 0;
@@ -1049,19 +1197,30 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq,
 		send_hdr->w1.u = w1.u;
 
 	if (!(flags & NIX_TX_MULTI_SEG_F)) {
+		struct rte_mbuf *cookie;
+
 		sg->seg1_size = send_hdr->w0.total;
 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+			uint64_t aura;
+
 			/* DF bit = 1 if refcount of current mbuf or parent mbuf
 			 *		is greater than 1
 			 * DF bit = 0 otherwise
 			 */
-			send_hdr->w0.df = cn10k_nix_prefree_seg(m, txq, send_hdr);
+			aura = send_hdr->w0.aura;
+			send_hdr->w0.df = cn10k_nix_prefree_seg(m, txq, send_hdr, &aura);
+			send_hdr->w0.aura = aura;
 		}
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!send_hdr->w0.df)
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+#else
+		RTE_SET_USED(cookie);
+#endif
 	} else {
 		sg->seg1_size = m->data_len;
 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
@@ -1135,6 +1294,7 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 	struct nix_send_hdr_s *send_hdr;
 	union nix_send_sg_s *sg, l_sg;
 	union nix_send_sg2_s l_sg2;
+	struct rte_mbuf *cookie;
 	struct rte_mbuf *m_next;
 	uint8_t off, is_sg2;
 	uint64_t len, dlen;
@@ -1163,21 +1323,26 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 	len -= dlen;
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
+	m->next = NULL;
 	slist = &cmd[3 + off + 1];
 
+	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 	/* Set invert df if buffer is not to be freed by H/W */
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-		prefree = cn10k_nix_prefree_seg(m, txq, send_hdr);
+		aura = send_hdr->w0.aura;
+		prefree = cn10k_nix_prefree_seg(m, txq, send_hdr, &aura);
+		send_hdr->w0.aura = aura;
 		l_sg.i1 = prefree;
 	}
 
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	/* Mark mempool object as "put" since it is freed by NIX */
 	if (!prefree)
-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+		RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 	rte_io_wmb();
+#else
+	RTE_SET_USED(cookie);
 #endif
-	m->next = NULL;
 
 	/* Quickly handle single segmented packets. With this if-condition
 	 * compiler will completely optimize out the below do-while loop
@@ -1207,9 +1372,12 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 		aura = aura0;
 		prefree = 0;
 
+		m->next = NULL;
+
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
 			aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
-			prefree = cn10k_nix_prefree_seg(m, txq, send_hdr);
+			prefree = cn10k_nix_prefree_seg(m, txq, send_hdr, &aura);
 			is_sg2 = aura != aura0 && !prefree;
 		}
 
@@ -1259,13 +1427,14 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 			l_sg.subdc = NIX_SUBDC_SG;
 			slist++;
 		}
-		m->next = NULL;
 
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		/* Mark mempool object as "put" since it is freed by NIX
 		 */
 		if (!prefree)
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+#else
+		RTE_SET_USED(cookie);
 #endif
 		m = m_next;
 	} while (nb_segs);
@@ -1997,13 +2166,10 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 	uint64x2_t sgdesc01_w0, sgdesc23_w0;
 	uint64x2_t sgdesc01_w1, sgdesc23_w1;
 	struct cn10k_eth_txq *txq = tx_queue;
-	uint64x2_t xmask01_w0, xmask23_w0;
-	uint64x2_t xmask01_w1, xmask23_w1;
 	rte_iova_t io_addr = txq->io_addr;
 	uint8_t lnum, shift = 0, loff = 0;
 	uintptr_t laddr = txq->lmt_base;
 	uint8_t c_lnum, c_shft, c_loff;
-	struct nix_send_hdr_s send_hdr;
 	uint64x2_t ltypes01, ltypes23;
 	uint64x2_t xtmp128, ytmp128;
 	uint64x2_t xmask01, xmask23;
@@ -2153,7 +2319,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		}
 		/* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
 		senddesc01_w0 =
-			vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF));
+			vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF));
 		sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF));
 
 		senddesc23_w0 = senddesc01_w0;
@@ -2859,73 +3025,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		    !(flags & NIX_TX_MULTI_SEG_F) &&
 		    !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
 			/* Set don't free bit if reference count > 1 */
-			xmask01_w0 = vdupq_n_u64(0);
-			xmask01_w1 = vdupq_n_u64(0);
-			xmask23_w0 = xmask01_w0;
-			xmask23_w1 = xmask01_w1;
-
-			/* Move mbufs to iova */
-			mbuf0 = (uint64_t *)tx_pkts[0];
-			mbuf1 = (uint64_t *)tx_pkts[1];
-			mbuf2 = (uint64_t *)tx_pkts[2];
-			mbuf3 = (uint64_t *)tx_pkts[3];
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf0)->pool,
-					(void **)&mbuf0, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf1)->pool,
-					(void **)&mbuf1, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf2)->pool,
-					(void **)&mbuf2, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf3)->pool,
-					(void **)&mbuf3, 1, 0);
-			}
-
-			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0);
-			senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0);
-			senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1);
-			senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1);
+			cn10k_nix_prefree_seg_vec(tx_pkts, txq, &senddesc01_w0, &senddesc23_w0,
+						  &senddesc01_w1, &senddesc23_w1);
 		} else if (!(flags & NIX_TX_MULTI_SEG_F) &&
 			   !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
 			/* Move mbufs to iova */
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 67f21a9c7f..ea92b1dcb6 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -347,7 +347,13 @@ cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 		struct roc_nix_sq *sq = &dev->sqs[qidx];
 		do {
 			handle_tx_completion_pkts(txq, 0);
+			/* Check if SQ is empty */
 			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+			if (head != tail)
+				continue;
+
+			/* Check if completion CQ is empty */
+			roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail);
 		} while (head != tail);
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index 9e0a3c5bb2..6ae0db62ca 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -169,6 +169,7 @@ handle_tx_completion_pkts(struct cn9k_eth_txq *txq, uint8_t mt_safe)
 			m = m_next;
 		}
 		rte_pktmbuf_free_seg(m);
+		txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL;
 
 		head++;
 		head &= qmask;
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index fba4bb4215..e6512471b9 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -83,9 +83,10 @@ cn9k_nix_tx_skeleton(struct cn9k_eth_txq *txq, uint64_t *cmd,
 }
 
 static __rte_always_inline uint64_t
-cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq,
-		struct nix_send_hdr_s *send_hdr)
+cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq, struct nix_send_hdr_s *send_hdr,
+		     uint64_t *aura)
 {
+	struct rte_mbuf *prev;
 	uint32_t sqe_id;
 
 	if (RTE_MBUF_HAS_EXTBUF(m)) {
@@ -94,7 +95,10 @@ cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq,
 			return 1;
 		}
 		if (send_hdr->w0.pnc) {
-			txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m;
+			sqe_id = send_hdr->w1.sqe_id;
+			prev = txq->tx_compl.ptr[sqe_id];
+			m->next = prev;
+			txq->tx_compl.ptr[sqe_id] = m;
 		} else {
 			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
 			send_hdr->w0.pnc = 1;
@@ -104,10 +108,151 @@ cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq,
 		}
 		return 1;
 	} else {
-		return cnxk_nix_prefree_seg(m);
+		return cnxk_nix_prefree_seg(m, aura);
 	}
 }
 
+#if defined(RTE_ARCH_ARM64)
+/* Only called for first segments of single segmented mbufs */
+static __rte_always_inline void
+cn9k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct cn9k_eth_txq *txq,
+			 uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0,
+			 uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1)
+{
+	struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr;
+	uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask;
+	bool tx_compl_ena = txq->tx_compl.ena;
+	struct rte_mbuf *m0, *m1, *m2, *m3;
+	struct rte_mbuf *cookie;
+	uint64_t w0, w1, aura;
+	uint64_t sqe_id;
+
+	m0 = mbufs[0];
+	m1 = mbufs[1];
+	m2 = mbufs[2];
+	m3 = mbufs[3];
+
+	/* mbuf 0 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m0)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m0);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m0;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0);
+
+	/* mbuf1 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m1)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m1);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m1;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1);
+
+	/* mbuf 2 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m2)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m2);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m2;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0);
+
+	/* mbuf3 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m3)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m3);
+		} else {
+			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m3;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1);
+#ifndef RTE_LIBRTE_MEMPOOL_DEBUG
+	RTE_SET_USED(cookie);
+#endif
+}
+#endif
+
 static __rte_always_inline void
 cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
 {
@@ -191,6 +336,8 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq,
 		ol_flags = m->ol_flags;
 		w1.u = 0;
 	}
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+		send_hdr->w0.pnc = 0;
 
 	if (!(flags & NIX_TX_MULTI_SEG_F))
 		send_hdr->w0.total = m->data_len;
@@ -345,23 +492,33 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq,
 		send_hdr->w1.u = w1.u;
 
 	if (!(flags & NIX_TX_MULTI_SEG_F)) {
+		struct rte_mbuf *cookie;
+
 		sg->seg1_size = m->data_len;
 		*(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+			uint64_t aura;
 			/* DF bit = 1 if refcount of current mbuf or parent mbuf
 			 *		is greater than 1
 			 * DF bit = 0 otherwise
 			 */
-			send_hdr->w0.df = cn9k_nix_prefree_seg(m, txq, send_hdr);
+			aura = send_hdr->w0.aura;
+			send_hdr->w0.df = cn9k_nix_prefree_seg(m, txq, send_hdr, &aura);
+			send_hdr->w0.aura = aura;
 			/* Ensuring mbuf fields which got updated in
 			 * cnxk_nix_prefree_seg are written before LMTST.
 			 */
 			rte_io_wmb();
 		}
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!send_hdr->w0.df)
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+#else
+		RTE_SET_USED(cookie);
+#endif
 	} else {
 		sg->seg1_size = m->data_len;
 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
@@ -443,6 +600,8 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 		      struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 {
 	struct nix_send_hdr_s *send_hdr;
+	uint64_t prefree = 0, aura;
+	struct rte_mbuf *cookie;
 	union nix_send_sg_s *sg;
 	struct rte_mbuf *m_next;
 	uint64_t *slist, sg_u;
@@ -467,17 +626,23 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 	m_next = m->next;
 	slist = &cmd[3 + off + 1];
 
+	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 	/* Set invert df if buffer is not to be freed by H/W */
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-		sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55);
+		aura = send_hdr->w0.aura;
+		prefree = (cn9k_nix_prefree_seg(m, txq, send_hdr, &aura) << 55);
+		send_hdr->w0.aura = aura;
+		sg_u |= prefree;
 		rte_io_wmb();
 	}
 
 	/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	if (!(sg_u & (1ULL << 55)))
-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+		RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 	rte_io_wmb();
+#else
+	RTE_SET_USED(cookie);
 #endif
 	m = m_next;
 	if (!m)
@@ -488,16 +653,17 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 		m_next = m->next;
 		sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
 		*slist = rte_mbuf_data_iova(m);
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 		/* Set invert df if buffer is not to be freed by H/W */
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55));
+			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr, NULL) << (i + 55));
 			/* Commit changes to mbuf */
 			rte_io_wmb();
 		}
 		/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		if (!(sg_u & (1ULL << (i + 55))))
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 		rte_io_wmb();
 #endif
 		slist++;
@@ -709,8 +875,8 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 			       struct nix_send_hdr_s *send_hdr,
 			       union nix_send_sg_s *sg, const uint32_t flags)
 {
-	struct rte_mbuf *m_next;
-	uint64_t *slist, sg_u;
+	struct rte_mbuf *m_next, *cookie;
+	uint64_t *slist, sg_u, aura;
 	uint16_t nb_segs;
 	uint64_t segdw;
 	int i = 1;
@@ -727,13 +893,19 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 	m_next = m->next;
 
 	/* Set invert df if buffer is not to be freed by H/W */
-	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
-		sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55);
-		/* Mark mempool object as "put" since it is freed by NIX */
+	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+		aura = send_hdr->w0.aura;
+		sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr, &aura) << 55);
+		send_hdr->w0.aura = aura;
+	}
+	/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	if (!(sg_u & (1ULL << 55)))
-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+		RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 	rte_io_wmb();
+#else
+	RTE_SET_USED(cookie);
 #endif
 
 	m = m_next;
@@ -742,14 +914,15 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 		m_next = m->next;
 		sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
 		*slist = rte_mbuf_data_iova(m);
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 		/* Set invert df if buffer is not to be freed by H/W */
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
-			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55));
+			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr, &aura) << (i + 55));
 			/* Mark mempool object as "put" since it is freed by NIX
 			 */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		if (!(sg_u & (1ULL << (i + 55))))
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 		rte_io_wmb();
 #endif
 		slist++;
@@ -789,15 +962,20 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq,
 			  uint64x2_t *cmd1, const uint32_t flags)
 {
 	struct nix_send_hdr_s send_hdr;
+	struct rte_mbuf *cookie;
 	union nix_send_sg_s sg;
+	uint64_t aura;
 	uint8_t ret;
 
 	if (m->nb_segs == 1) {
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
 			send_hdr.w0.u = vgetq_lane_u64(cmd0[0], 0);
 			send_hdr.w1.u = vgetq_lane_u64(cmd0[0], 1);
 			sg.u = vgetq_lane_u64(cmd1[0], 0);
-			sg.u |= (cn9k_nix_prefree_seg(m, txq, &send_hdr) << 55);
+			aura = send_hdr.w0.aura;
+			sg.u |= (cn9k_nix_prefree_seg(m, txq, &send_hdr, &aura) << 55);
+			send_hdr.w0.aura = aura;
 			cmd1[0] = vsetq_lane_u64(sg.u, cmd1[0], 0);
 			cmd0[0] = vsetq_lane_u64(send_hdr.w0.u, cmd0[0], 0);
 			cmd0[0] = vsetq_lane_u64(send_hdr.w1.u, cmd0[0], 1);
@@ -806,8 +984,10 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq,
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		sg.u = vgetq_lane_u64(cmd1[0], 0);
 		if (!(sg.u & (1ULL << 55)))
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 		rte_io_wmb();
+#else
+		RTE_SET_USED(cookie);
 #endif
 		return 2 + !!(flags & NIX_TX_NEED_EXT_HDR) +
 		       !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
@@ -962,10 +1142,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint64x2_t sgdesc01_w1, sgdesc23_w1;
 	struct cn9k_eth_txq *txq = tx_queue;
 	uint64_t *lmt_addr = txq->lmt_addr;
-	uint64x2_t xmask01_w0, xmask23_w0;
-	uint64x2_t xmask01_w1, xmask23_w1;
 	rte_iova_t io_addr = txq->io_addr;
-	struct nix_send_hdr_s send_hdr;
 	uint64x2_t ltypes01, ltypes23;
 	uint64x2_t xtmp128, ytmp128;
 	uint64x2_t xmask01, xmask23;
@@ -1028,7 +1205,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 	for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
 		/* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
 		senddesc01_w0 =
-			vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF));
+			vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF));
 		sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF));
 
 		senddesc23_w0 = senddesc01_w0;
@@ -1732,74 +1909,8 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 		if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
 		    !(flags & NIX_TX_MULTI_SEG_F)) {
 			/* Set don't free bit if reference count > 1 */
-			xmask01_w0 = vdupq_n_u64(0);
-			xmask01_w1 = vdupq_n_u64(0);
-			xmask23_w0 = xmask01_w0;
-			xmask23_w1 = xmask01_w1;
-
-			/* Move mbufs to iova */
-			mbuf0 = (uint64_t *)tx_pkts[0];
-			mbuf1 = (uint64_t *)tx_pkts[1];
-			mbuf2 = (uint64_t *)tx_pkts[2];
-			mbuf3 = (uint64_t *)tx_pkts[3];
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf0)->pool,
-					(void **)&mbuf0, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf1)->pool,
-					(void **)&mbuf1, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf2)->pool,
-					(void **)&mbuf2, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf3)->pool,
-					(void **)&mbuf3, 1, 0);
-			}
-
-			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0);
-			senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0);
-			senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1);
-			senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1);
-
+			cn9k_nix_prefree_seg_vec(tx_pkts, txq, &senddesc01_w0, &senddesc23_w0,
+						 &senddesc01_w1, &senddesc23_w1);
 			/* Ensuring mbuf fields which got updated in
 			 * cnxk_nix_prefree_seg are written before LMTST.
 			 */
diff --git a/drivers/net/cnxk/cnxk_ethdev_dp.h b/drivers/net/cnxk/cnxk_ethdev_dp.h
index c1f99a2616..67f40b8e25 100644
--- a/drivers/net/cnxk/cnxk_ethdev_dp.h
+++ b/drivers/net/cnxk/cnxk_ethdev_dp.h
@@ -84,7 +84,7 @@ struct cnxk_timesync_info {
 
 /* Inlines */
 static __rte_always_inline uint64_t
-cnxk_pktmbuf_detach(struct rte_mbuf *m)
+cnxk_pktmbuf_detach(struct rte_mbuf *m, uint64_t *aura)
 {
 	struct rte_mempool *mp = m->pool;
 	uint32_t mbuf_size, buf_len;
@@ -94,6 +94,8 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m)
 
 	/* Update refcount of direct mbuf */
 	md = rte_mbuf_from_indirect(m);
+	if (aura)
+		*aura = roc_npa_aura_handle_to_aura(md->pool->pool_id);
 	refcount = rte_mbuf_refcnt_update(md, -1);
 
 	priv_size = rte_pktmbuf_priv_size(mp);
@@ -126,18 +128,18 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m)
 }
 
 static __rte_always_inline uint64_t
-cnxk_nix_prefree_seg(struct rte_mbuf *m)
+cnxk_nix_prefree_seg(struct rte_mbuf *m, uint64_t *aura)
 {
 	if (likely(rte_mbuf_refcnt_read(m) == 1)) {
 		if (!RTE_MBUF_DIRECT(m))
-			return cnxk_pktmbuf_detach(m);
+			return cnxk_pktmbuf_detach(m, aura);
 
 		m->next = NULL;
 		m->nb_segs = 1;
 		return 0;
 	} else if (rte_mbuf_refcnt_update(m, -1) == 0) {
 		if (!RTE_MBUF_DIRECT(m))
-			return cnxk_pktmbuf_detach(m);
+			return cnxk_pktmbuf_detach(m, aura);
 
 		rte_mbuf_refcnt_set(m, 1);
 		m->next = NULL;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 11/14] net/cnxk: fix check cookies for multi-seg offload
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (8 preceding siblings ...)
  2024-02-22 11:02   ` [PATCH v4 10/14] net/cnxk: fix indirect mbuf handling in Tx path Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 12/14] common/cnxk: add IO attribute to mbox structs Nithin Dabilpuram
                     ` (2 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, Rahul Bhansali, stable

From: Rahul Bhansali <rbhansali@marvell.com>

Fix missing check cookies with multi-seg offload case

Fixes: 3626d5195d49 ("net/cnxk: add multi-segment Tx for CN10K")
Cc: stable@dpdk.org

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 21 ++++++++++++++++++++-
 1 file changed, 20 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index a1e923cd9e..ad4cbf7ffa 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1863,6 +1863,9 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 	len -= dlen;
 	sg_u = sg_u | ((uint64_t)dlen);
 
+	/* Mark mempool object as "put" since it is freed by NIX */
+	RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
 	m->next = NULL;
@@ -1888,6 +1891,9 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 			slist++;
 		}
 		m->next = NULL;
+		/* Mark mempool object as "put" since it is freed by NIX */
+		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+
 		m = m_next;
 	} while (nb_segs);
 
@@ -1911,8 +1917,11 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0,
 	union nix_send_hdr_w0_u sh;
 	union nix_send_sg_s sg;
 
-	if (m->nb_segs == 1)
+	if (m->nb_segs == 1) {
+		/* Mark mempool object as "put" since it is freed by NIX */
+		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 		return;
+	}
 
 	sh.u = vgetq_lane_u64(cmd0[0], 0);
 	sg.u = vgetq_lane_u64(cmd1[0], 0);
@@ -1972,6 +1981,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq,
 			*data128 |= ((__uint128_t)7) << *shift;
 			*shift += 3;
 
+			/* Mark mempool object as "put" since it is freed by NIX */
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[0]->pool, (void **)&mbufs[0], 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[1]->pool, (void **)&mbufs[1], 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[2]->pool, (void **)&mbufs[2], 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[3]->pool, (void **)&mbufs[3], 1, 0);
 			return 1;
 		}
 	}
@@ -1990,6 +2004,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq,
 				vst1q_u64(lmt_addr + 10, cmd2[j + 1]);
 				vst1q_u64(lmt_addr + 12, cmd1[j + 1]);
 				vst1q_u64(lmt_addr + 14, cmd3[j + 1]);
+
+				/* Mark mempool object as "put" since it is freed by NIX */
+				RTE_MEMPOOL_CHECK_COOKIES(mbufs[j]->pool, (void **)&mbufs[j], 1, 0);
+				RTE_MEMPOOL_CHECK_COOKIES(mbufs[j + 1]->pool,
+							  (void **)&mbufs[j + 1], 1, 0);
 			} else if (flags & NIX_TX_NEED_EXT_HDR) {
 				/* EXT header take 3 each, space for 2 segs.*/
 				cn10k_nix_prepare_mseg_vec(mbufs[j],
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 12/14] common/cnxk: add IO attribute to mbox structs
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (9 preceding siblings ...)
  2024-02-22 11:02   ` [PATCH v4 11/14] net/cnxk: fix check cookies for multi-seg offload Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 13/14] common/cnxk: use SQ enable and disable API Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 14/14] net/cnxk: reset mbuf fields in multi-seg Tx path Nithin Dabilpuram
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev, stable

IO attribute is needed to mbox structs to avoid unaligned or pair
access causing by compiler optimization. Add them to structs
where it is missing.

Fixes: 503b82de2cbf ("common/cnxk: add mbox request and response definitions")
Fixes: ddf955d3917e ("common/cnxk: support CPT second pass")
Cc: stable@dpdk.org

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_mbox.h | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 4b4f48e372..d8a8494ac4 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -1427,12 +1427,12 @@ struct nix_cn10k_aq_enq_req {
 struct nix_cn10k_aq_enq_rsp {
 	struct mbox_msghdr hdr;
 	union {
-		struct nix_cn10k_rq_ctx_s rq;
-		struct nix_cn10k_sq_ctx_s sq;
-		struct nix_cq_ctx_s cq;
-		struct nix_rsse_s rss;
-		struct nix_rx_mce_s mce;
-		struct nix_band_prof_s prof;
+		__io struct nix_cn10k_rq_ctx_s rq;
+		__io struct nix_cn10k_sq_ctx_s sq;
+		__io struct nix_cq_ctx_s cq;
+		__io struct nix_rsse_s rss;
+		__io struct nix_rx_mce_s mce;
+		__io struct nix_band_prof_s prof;
 	};
 };
 
@@ -1668,11 +1668,11 @@ struct nix_rq_cpt_field_mask_cfg_req {
 #define RQ_CTX_MASK_MAX 6
 	union {
 		uint64_t __io rq_ctx_word_set[RQ_CTX_MASK_MAX];
-		struct nix_cn10k_rq_ctx_s rq_set;
+		__io struct nix_cn10k_rq_ctx_s rq_set;
 	};
 	union {
 		uint64_t __io rq_ctx_word_mask[RQ_CTX_MASK_MAX];
-		struct nix_cn10k_rq_ctx_s rq_mask;
+		__io struct nix_cn10k_rq_ctx_s rq_mask;
 	};
 	struct nix_lf_rx_ipec_cfg1_req {
 		uint32_t __io spb_cpt_aura;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 13/14] common/cnxk: use SQ enable and disable API
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (10 preceding siblings ...)
  2024-02-22 11:02   ` [PATCH v4 12/14] common/cnxk: add IO attribute to mbox structs Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  2024-02-22 11:02   ` [PATCH v4 14/14] net/cnxk: reset mbuf fields in multi-seg Tx path Nithin Dabilpuram
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Use SQ enable and disable API in TM setup to affect
the state change.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_tm_ops.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 900b182c76..9f3870a311 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -495,7 +495,7 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 		if (!sq)
 			continue;
 
-		rc = roc_nix_tm_sq_aura_fc(sq, false);
+		rc = roc_nix_sq_ena_dis(sq, false);
 		if (rc) {
 			plt_err("Failed to disable sqb aura fc, rc=%d", rc);
 			goto cleanup;
@@ -607,7 +607,7 @@ roc_nix_tm_hierarchy_xmit_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree t
 		sq_id = node->id;
 		sq = nix->sqs[sq_id];
 
-		rc = roc_nix_tm_sq_aura_fc(sq, true);
+		rc = roc_nix_sq_ena_dis(sq, true);
 		if (rc) {
 			plt_err("TM sw xon failed on SQ %u, rc=%d", node->id,
 				rc);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v4 14/14] net/cnxk: reset mbuf fields in multi-seg Tx path
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (11 preceding siblings ...)
  2024-02-22 11:02   ` [PATCH v4 13/14] common/cnxk: use SQ enable and disable API Nithin Dabilpuram
@ 2024-02-22 11:02   ` Nithin Dabilpuram
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-22 11:02 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, Rahul Bhansali

From: Rahul Bhansali <rbhansali@marvell.com>

Currently in debug mode when a buffer is allocated in SW,
nb_segs will have invalid values as it didn't come from driver
Rx path. Hence reset mbuf next and nb_segs fields in multi-seg Tx path.

Fixes: 3626d5195d49 ("net/cnxk: add multi-segment Tx for CN10K")

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h |  2 ++
 drivers/net/cnxk/cn9k_tx.h  | 20 ++++++++++++++++++++
 2 files changed, 22 insertions(+)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index ad4cbf7ffa..94bfebf246 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1324,6 +1324,7 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
 	m->next = NULL;
+	m->nb_segs = 1;
 	slist = &cmd[3 + off + 1];
 
 	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
@@ -1869,6 +1870,7 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
 	m->next = NULL;
+	m->nb_segs = 1;
 	m = m_next;
 	/* Fill mbuf segments */
 	do {
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index e6512471b9..fb5e8c5f56 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -643,6 +643,10 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 	rte_io_wmb();
 #else
 	RTE_SET_USED(cookie);
+#endif
+#ifdef RTE_ENABLE_ASSERT
+	m->next = NULL;
+	m->nb_segs = 1;
 #endif
 	m = m_next;
 	if (!m)
@@ -679,6 +683,9 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 			sg_u = sg->u;
 			slist++;
 		}
+#ifdef RTE_ENABLE_ASSERT
+		m->next = NULL;
+#endif
 		m = m_next;
 	} while (nb_segs);
 
@@ -692,6 +699,9 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 	segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
 	send_hdr->w0.sizem1 = segdw - 1;
 
+#ifdef RTE_ENABLE_ASSERT
+	rte_io_wmb();
+#endif
 	return segdw;
 }
 
@@ -908,6 +918,10 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 	RTE_SET_USED(cookie);
 #endif
 
+#ifdef RTE_ENABLE_ASSERT
+	m->next = NULL;
+	m->nb_segs = 1;
+#endif
 	m = m_next;
 	/* Fill mbuf segments */
 	do {
@@ -938,6 +952,9 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 			sg_u = sg->u;
 			slist++;
 		}
+#ifdef RTE_ENABLE_ASSERT
+		m->next = NULL;
+#endif
 		m = m_next;
 	} while (nb_segs);
 
@@ -953,6 +970,9 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 		 !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
 	send_hdr->w0.sizem1 = segdw - 1;
 
+#ifdef RTE_ENABLE_ASSERT
+	rte_io_wmb();
+#endif
 	return segdw;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines
  2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                   ` (12 preceding siblings ...)
  2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
@ 2024-02-26 13:35 ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 02/14] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
                     ` (12 more replies)
  13 siblings, 13 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Since now Inline IPsec in cn9k is using same opcode as LA,
remove the definitions of fast path opcode.

Also fix devarg handling for ipsec_out_max_sa to allow 32-bit.

Fixes: fe5846bcc076 ("net/cnxk: add devargs for min-max SPI")
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---

v5:
- Fixed commit messages and added fixes

v4:
- Fixed compilation warnings

v3:
- Added mempool debug fixes patch 14/14

v2:
- Fixed commit messages

drivers/common/cnxk/cnxk_security.c    | 230 -------------------------
 drivers/common/cnxk/cnxk_security.h    |  12 --
 drivers/common/cnxk/roc_ie_on.h        |  60 -------
 drivers/common/cnxk/roc_nix_inl.h      |  50 +-----
 drivers/common/cnxk/version.map        |   4 -
 drivers/net/cnxk/cnxk_ethdev_devargs.c |   2 +-
 6 files changed, 3 insertions(+), 355 deletions(-)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 64c901a57a..bab015e3b3 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -574,236 +574,6 @@ cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa)
 	return !!sa->w2.s.valid;
 }
 
-static inline int
-ipsec_xfrm_verify(struct rte_security_ipsec_xform *ipsec_xfrm,
-		  struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	if (crypto_xfrm->next == NULL)
-		return -EINVAL;
-
-	if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
-		if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
-		    crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
-			return -EINVAL;
-	} else {
-		if (crypto_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
-		    crypto_xfrm->next->type != RTE_CRYPTO_SYM_XFORM_AUTH)
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int
-onf_ipsec_sa_common_param_fill(struct roc_ie_onf_sa_ctl *ctl, uint8_t *salt,
-			       uint8_t *cipher_key, uint8_t *hmac_opad_ipad,
-			       struct rte_security_ipsec_xform *ipsec_xfrm,
-			       struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
-	int rc, length, auth_key_len;
-	const uint8_t *key = NULL;
-	uint8_t ccm_flag = 0;
-
-	/* Set direction */
-	switch (ipsec_xfrm->direction) {
-	case RTE_SECURITY_IPSEC_SA_DIR_INGRESS:
-		ctl->direction = ROC_IE_SA_DIR_INBOUND;
-		auth_xfrm = crypto_xfrm;
-		cipher_xfrm = crypto_xfrm->next;
-		break;
-	case RTE_SECURITY_IPSEC_SA_DIR_EGRESS:
-		ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
-		cipher_xfrm = crypto_xfrm;
-		auth_xfrm = crypto_xfrm->next;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* Set protocol - ESP vs AH */
-	switch (ipsec_xfrm->proto) {
-	case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
-		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
-		break;
-	case RTE_SECURITY_IPSEC_SA_PROTO_AH:
-		return -ENOTSUP;
-	default:
-		return -EINVAL;
-	}
-
-	/* Set mode - transport vs tunnel */
-	switch (ipsec_xfrm->mode) {
-	case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
-		ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
-		break;
-	case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
-		ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* Set encryption algorithm */
-	if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-		length = crypto_xfrm->aead.key.length;
-
-		switch (crypto_xfrm->aead.algo) {
-		case RTE_CRYPTO_AEAD_AES_GCM:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
-			memcpy(salt, &ipsec_xfrm->salt, 4);
-			key = crypto_xfrm->aead.key.data;
-			break;
-		case RTE_CRYPTO_AEAD_AES_CCM:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CCM;
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
-			ccm_flag = 0x07 & ~ROC_CPT_AES_CCM_CTR_LEN;
-			*salt = ccm_flag;
-			memcpy(PLT_PTR_ADD(salt, 1), &ipsec_xfrm->salt, 3);
-			key = crypto_xfrm->aead.key.data;
-			break;
-		default:
-			return -ENOTSUP;
-		}
-
-	} else {
-		rc = ipsec_xfrm_verify(ipsec_xfrm, crypto_xfrm);
-		if (rc)
-			return rc;
-
-		switch (cipher_xfrm->cipher.algo) {
-		case RTE_CRYPTO_CIPHER_AES_CBC:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
-			break;
-		case RTE_CRYPTO_CIPHER_AES_CTR:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
-			break;
-		default:
-			return -ENOTSUP;
-		}
-
-		switch (auth_xfrm->auth.algo) {
-		case RTE_CRYPTO_AUTH_SHA1_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
-			break;
-		default:
-			return -ENOTSUP;
-		}
-		auth_key_len = auth_xfrm->auth.key.length;
-		if (auth_key_len < 20 || auth_key_len > 64)
-			return -ENOTSUP;
-
-		key = cipher_xfrm->cipher.key.data;
-		length = cipher_xfrm->cipher.key.length;
-
-		roc_se_hmac_opad_ipad_gen(ctl->auth_type, auth_xfrm->auth.key.data,
-					  auth_xfrm->auth.key.length, hmac_opad_ipad, ROC_SE_IPSEC);
-	}
-
-	switch (length) {
-	case ROC_CPT_AES128_KEY_LEN:
-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
-		break;
-	case ROC_CPT_AES192_KEY_LEN:
-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
-		break;
-	case ROC_CPT_AES256_KEY_LEN:
-		ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	memcpy(cipher_key, key, length);
-
-	if (ipsec_xfrm->options.esn)
-		ctl->esn_en = 1;
-
-	ctl->spi = rte_cpu_to_be_32(ipsec_xfrm->spi);
-	return 0;
-}
-
-int
-cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa,
-			   struct rte_security_ipsec_xform *ipsec_xfrm,
-			   struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
-	int rc;
-
-	rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
-					    sa->hmac_key, ipsec_xfrm,
-					    crypto_xfrm);
-	if (rc)
-		return rc;
-
-	rte_wmb();
-
-	/* Enable SA */
-	ctl->valid = 1;
-	return 0;
-}
-
-int
-cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
-			    struct rte_security_ipsec_xform *ipsec_xfrm,
-			    struct rte_crypto_sym_xform *crypto_xfrm)
-{
-	struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
-	struct roc_ie_onf_sa_ctl *ctl = &sa->ctl;
-	int rc;
-
-	/* Fill common params */
-	rc = onf_ipsec_sa_common_param_fill(ctl, sa->nonce, sa->cipher_key,
-					    sa->hmac_key, ipsec_xfrm,
-					    crypto_xfrm);
-	if (rc)
-		return rc;
-
-	if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
-		goto skip_tunnel_info;
-
-	/* Tunnel header info */
-	switch (tunnel->type) {
-	case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
-		memcpy(&sa->ip_src, &tunnel->ipv4.src_ip,
-		       sizeof(struct in_addr));
-		memcpy(&sa->ip_dst, &tunnel->ipv4.dst_ip,
-		       sizeof(struct in_addr));
-		break;
-	case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
-		return -ENOTSUP;
-	default:
-		return -EINVAL;
-	}
-
-	/* Update udp encap ports */
-	if (ipsec_xfrm->options.udp_encap == 1) {
-		sa->udp_src = 4500;
-		sa->udp_dst = 4500;
-	}
-
-skip_tunnel_info:
-	rte_wmb();
-
-	/* Enable SA */
-	ctl->valid = 1;
-	return 0;
-}
-
-bool
-cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa)
-{
-	return !!sa->ctl.valid;
-}
-
-bool
-cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa)
-{
-	return !!sa->ctl.valid;
-}
-
 uint8_t
 cnxk_ipsec_ivlen_get(enum rte_crypto_cipher_algorithm c_algo,
 		     enum rte_crypto_auth_algorithm a_algo,
diff --git a/drivers/common/cnxk/cnxk_security.h b/drivers/common/cnxk/cnxk_security.h
index b323b8b757..19eb9bb03d 100644
--- a/drivers/common/cnxk/cnxk_security.h
+++ b/drivers/common/cnxk/cnxk_security.h
@@ -48,18 +48,6 @@ cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
 bool __roc_api cnxk_ot_ipsec_inb_sa_valid(struct roc_ot_ipsec_inb_sa *sa);
 bool __roc_api cnxk_ot_ipsec_outb_sa_valid(struct roc_ot_ipsec_outb_sa *sa);
 
-/* [CN9K, CN10K) */
-int __roc_api
-cnxk_onf_ipsec_inb_sa_fill(struct roc_onf_ipsec_inb_sa *sa,
-			   struct rte_security_ipsec_xform *ipsec_xfrm,
-			   struct rte_crypto_sym_xform *crypto_xfrm);
-int __roc_api
-cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
-			    struct rte_security_ipsec_xform *ipsec_xfrm,
-			    struct rte_crypto_sym_xform *crypto_xfrm);
-bool __roc_api cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa);
-bool __roc_api cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa);
-
 /* [CN9K] */
 int __roc_api cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
 					  struct rte_crypto_sym_xform *crypto_xform,
diff --git a/drivers/common/cnxk/roc_ie_on.h b/drivers/common/cnxk/roc_ie_on.h
index 9933ffa148..11c995e9d1 100644
--- a/drivers/common/cnxk/roc_ie_on.h
+++ b/drivers/common/cnxk/roc_ie_on.h
@@ -269,66 +269,6 @@ struct roc_ie_on_inb_sa {
 #define ROC_IE_ON_UCC_L2_HDR_INFO_ERR	  0xCF
 #define ROC_IE_ON_UCC_L2_HDR_LEN_ERR	  0xE0
 
-struct roc_ie_onf_sa_ctl {
-	uint32_t spi;
-	uint64_t exp_proto_inter_frag : 8;
-	uint64_t rsvd_41_40 : 2;
-	/* Disable SPI, SEQ data in RPTR for Inbound inline */
-	uint64_t spi_seq_dis : 1;
-	uint64_t esn_en : 1;
-	uint64_t rsvd_44_45 : 2;
-	uint64_t encap_type : 2;
-	uint64_t enc_type : 3;
-	uint64_t rsvd_48 : 1;
-	uint64_t auth_type : 4;
-	uint64_t valid : 1;
-	uint64_t direction : 1;
-	uint64_t outer_ip_ver : 1;
-	uint64_t inner_ip_ver : 1;
-	uint64_t ipsec_mode : 1;
-	uint64_t ipsec_proto : 1;
-	uint64_t aes_key_len : 2;
-};
-
-struct roc_onf_ipsec_outb_sa {
-	/* w0 */
-	struct roc_ie_onf_sa_ctl ctl;
-
-	/* w1 */
-	uint8_t nonce[4];
-	uint16_t udp_src;
-	uint16_t udp_dst;
-
-	/* w2 */
-	uint32_t ip_src;
-	uint32_t ip_dst;
-
-	/* w3-w6 */
-	uint8_t cipher_key[32];
-
-	/* w7-w12 */
-	uint8_t hmac_key[48];
-};
-
-struct roc_onf_ipsec_inb_sa {
-	/* w0 */
-	struct roc_ie_onf_sa_ctl ctl;
-
-	/* w1 */
-	uint8_t nonce[4]; /* Only for AES-GCM */
-	uint32_t unused;
-
-	/* w2 */
-	uint32_t esn_hi;
-	uint32_t esn_low;
-
-	/* w3-w6 */
-	uint8_t cipher_key[32];
-
-	/* w7-w12 */
-	uint8_t hmac_key[48];
-};
-
 #define ROC_ONF_IPSEC_INB_MAX_L2_SZ	  32UL
 #define ROC_ONF_IPSEC_OUTB_MAX_L2_SZ	  30UL
 #define ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ (ROC_ONF_IPSEC_OUTB_MAX_L2_SZ + 2)
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index ab1e9c0f98..f5ce26f03f 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -4,24 +4,6 @@
 #ifndef _ROC_NIX_INL_H_
 #define _ROC_NIX_INL_H_
 
-/* ONF INB HW area */
-#define ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ                                        \
-	PLT_ALIGN(sizeof(struct roc_onf_ipsec_inb_sa), ROC_ALIGN)
-/* ONF INB SW reserved area */
-#define ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD 384
-#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ                                        \
-	(ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD)
-#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2 9
-
-/* ONF OUTB HW area */
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ                                       \
-	PLT_ALIGN(sizeof(struct roc_onf_ipsec_outb_sa), ROC_ALIGN)
-/* ONF OUTB SW reserved area */
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD 128
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ                                       \
-	(ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD)
-#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2 8
-
 /* ON INB HW area */
 #define ROC_NIX_INL_ON_IPSEC_INB_HW_SZ                                         \
 	PLT_ALIGN(sizeof(struct roc_ie_on_inb_sa), ROC_ALIGN)
@@ -31,10 +13,10 @@
 	(ROC_NIX_INL_ON_IPSEC_INB_HW_SZ + ROC_NIX_INL_ON_IPSEC_INB_SW_RSVD)
 #define ROC_NIX_INL_ON_IPSEC_INB_SA_SZ_LOG2 10
 
-/* ONF OUTB HW area */
+/* ON OUTB HW area */
 #define ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ                                        \
 	PLT_ALIGN(sizeof(struct roc_ie_on_outb_sa), ROC_ALIGN)
-/* ONF OUTB SW reserved area */
+/* ON OUTB SW reserved area */
 #define ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD 256
 #define ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ                                        \
 	(ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ON_IPSEC_OUTB_SW_RSVD)
@@ -86,34 +68,6 @@ roc_nix_inl_on_ipsec_outb_sa_sw_rsvd(void *sa)
 	return PLT_PTR_ADD(sa, ROC_NIX_INL_ON_IPSEC_OUTB_HW_SZ);
 }
 
-static inline struct roc_onf_ipsec_inb_sa *
-roc_nix_inl_onf_ipsec_inb_sa(uintptr_t base, uint64_t idx)
-{
-	uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2;
-
-	return PLT_PTR_ADD(base, off);
-}
-
-static inline struct roc_onf_ipsec_outb_sa *
-roc_nix_inl_onf_ipsec_outb_sa(uintptr_t base, uint64_t idx)
-{
-	uint64_t off = idx << ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2;
-
-	return PLT_PTR_ADD(base, off);
-}
-
-static inline void *
-roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(void *sa)
-{
-	return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ);
-}
-
-static inline void *
-roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(void *sa)
-{
-	return PLT_PTR_ADD(sa, ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ);
-}
-
 /* Inline device SSO Work callback */
 typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
 					  uint32_t soft_exp_event);
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 4981d42ab7..a7402d9941 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -17,10 +17,6 @@ INTERNAL {
 	cnxk_logtype_sso;
 	cnxk_logtype_tim;
 	cnxk_logtype_tm;
-	cnxk_onf_ipsec_inb_sa_fill;
-	cnxk_onf_ipsec_outb_sa_fill;
-	cnxk_onf_ipsec_inb_sa_valid;
-	cnxk_onf_ipsec_outb_sa_valid;
 	cnxk_ot_ipsec_inb_sa_fill;
 	cnxk_ot_ipsec_outb_sa_fill;
 	cnxk_ot_ipsec_inb_sa_valid;
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 8e862be933..a0e9300cff 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -75,7 +75,7 @@ parse_ipsec_out_max_sa(const char *key, const char *value, void *extra_args)
 	if (errno)
 		val = 0;
 
-	*(uint16_t *)extra_args = val;
+	*(uint32_t *)extra_args = val;
 
 	return 0;
 }
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 02/14] net/cnxk: add IPsec SA defines for PMD API
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 03/14] net/cnxk: add transport mode to security capability on cn9k Nithin Dabilpuram
                     ` (11 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Define inbound and outbound IPsec data type for PMD API's
rte_pmd_cnxk_hw_sa_read() and rte_pmd_cnxk_hw_sa_write().

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn10k_ethdev_sec.c |  18 +-
 drivers/net/cnxk/rte_pmd_cnxk.h     | 397 +++++++++++++++++++++++++++-
 2 files changed, 411 insertions(+), 4 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 575d0fabd5..05ec49d981 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -14,6 +14,20 @@
 #include <cnxk_security.h>
 #include <roc_priv.h>
 
+PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_inb_sa, ctx.ar_winbits) ==
+		  offsetof(struct roc_ot_ipsec_inb_sa, ctx.ar_winbits));
+
+PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_outb_sa, ctx.mib_pkts) ==
+		  offsetof(struct roc_ot_ipsec_outb_sa, ctx.mib_pkts));
+
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_CKEY_LEN == ROC_CTX_MAX_CKEY_LEN);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN == RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN);
+
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MIN == ROC_AR_WIN_SIZE_MIN);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MAX == ROC_AR_WIN_SIZE_MAX);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_LOG_MIN_AR_WIN_SIZE_M1 == ROC_LOG_MIN_AR_WIN_SIZE_M1);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WINBITS_SZ == ROC_AR_WINBITS_SZ);
+
 static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
 	{	/* AES GCM */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -1143,7 +1157,7 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
 
 int
 rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
-			void *data, uint32_t len)
+			union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
@@ -1166,7 +1180,7 @@ rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
 
 int
 rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
-			 void *data, uint32_t len)
+			 union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
diff --git a/drivers/net/cnxk/rte_pmd_cnxk.h b/drivers/net/cnxk/rte_pmd_cnxk.h
index 7827c33ac9..43f2a7ed9b 100644
--- a/drivers/net/cnxk/rte_pmd_cnxk.h
+++ b/drivers/net/cnxk/rte_pmd_cnxk.h
@@ -60,6 +60,399 @@ struct rte_pmd_cnxk_sec_action {
 	enum rte_pmd_cnxk_sec_action_alg alg;
 };
 
+#define RTE_PMD_CNXK_CTX_MAX_CKEY_LEN	   32
+#define RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN 128
+
+/** Anti reply window size supported */
+#define RTE_PMD_CNXK_AR_WIN_SIZE_MIN	    64
+#define RTE_PMD_CNXK_AR_WIN_SIZE_MAX	    4096
+#define RTE_PMD_CNXK_LOG_MIN_AR_WIN_SIZE_M1 5
+
+/** u64 array size to fit anti replay window bits */
+#define RTE_PMD_CNXK_AR_WINBITS_SZ (RTE_ALIGN_CEIL(RTE_PMD_CNXK_AR_WIN_SIZE_MAX, 64) / 64)
+
+/** Outer header info for Inbound or Outbound */
+union rte_pmd_cnxk_ipsec_outer_ip_hdr {
+	struct {
+		/** IPv4 destination */
+		uint32_t dst_addr;
+		/** IPv4 source */
+		uint32_t src_addr;
+	} ipv4;
+	struct {
+		/** IPv6 source */
+		uint8_t src_addr[16];
+		/** IPv6 destination */
+		uint8_t dst_addr[16];
+	} ipv6;
+};
+
+/** Inbound IPsec context update region */
+struct rte_pmd_cnxk_ipsec_inb_ctx_update_reg {
+	/** Highest sequence number received */
+	uint64_t ar_base;
+	/** Valid bit for 64-bit words of replay window */
+	uint64_t ar_valid_mask;
+	/** Hard life for SA */
+	uint64_t hard_life;
+	/** Soft life for SA */
+	uint64_t soft_life;
+	/** MIB octets */
+	uint64_t mib_octs;
+	/** MIB packets */
+	uint64_t mib_pkts;
+	/** AR window bits */
+	uint64_t ar_winbits[RTE_PMD_CNXK_AR_WINBITS_SZ];
+};
+
+/** Outbound IPsec IV data */
+union rte_pmd_cnxk_ipsec_outb_iv {
+	uint64_t u64[2];
+	/** IV debug - 16B*/
+	uint8_t iv_dbg[16];
+	struct {
+		/** IV debug - 8B */
+		uint8_t iv_dbg1[4];
+		/** Salt */
+		uint8_t salt[4];
+
+		uint32_t rsvd;
+		/** IV debug - 8B */
+		uint8_t iv_dbg2[4];
+	} s;
+};
+
+/** Outbound IPsec context update region */
+struct rte_pmd_cnxk_ipsec_outb_ctx_update_reg {
+	union {
+		struct {
+			uint64_t reserved_0_2 : 3;
+			uint64_t address : 57;
+			uint64_t mode : 4;
+		} s;
+		uint64_t u64;
+	} err_ctl;
+
+	uint64_t esn_val;
+	uint64_t hard_life;
+	uint64_t soft_life;
+	uint64_t mib_octs;
+	uint64_t mib_pkts;
+};
+
+/**
+ * Inbound IPsec SA
+ */
+struct rte_pmd_cnxk_ipsec_inb_sa {
+	/** Word0 */
+	union {
+		struct {
+			/** AR window size */
+			uint64_t ar_win : 3;
+			/** Hard life enable */
+			uint64_t hard_life_dec : 1;
+			/** Soft life enable */
+			uint64_t soft_life_dec : 1;
+
+			/** Count global octets */
+			uint64_t count_glb_octets : 1;
+			/** Count global pkts */
+			uint64_t count_glb_pkts : 1;
+			/** Count bytes */
+			uint64_t count_mib_bytes : 1;
+
+			/** Count pkts */
+			uint64_t count_mib_pkts : 1;
+			/** HW context offset */
+			uint64_t hw_ctx_off : 7;
+
+			/** Context ID */
+			uint64_t ctx_id : 16;
+
+			/** Original packet free absolute */
+			uint64_t orig_pkt_fabs : 1;
+			/** Original packet free */
+			uint64_t orig_pkt_free : 1;
+			/** PKIND for second pass */
+			uint64_t pkind : 6;
+
+			uint64_t rsvd0 : 1;
+			/** Ether type overwrite */
+			uint64_t et_ovrwr : 1;
+			/** Packet output type */
+			uint64_t pkt_output : 2;
+			/** Packet format type */
+			uint64_t pkt_format : 1;
+			/** Defrag option */
+			uint64_t defrag_opt : 2;
+			/** Reserved for X2P dest */
+			uint64_t x2p_dst : 1;
+
+			/** Context push size */
+			uint64_t ctx_push_size : 7;
+			uint64_t rsvd1 : 1;
+
+			/** Context header size */
+			uint64_t ctx_hdr_size : 2;
+			/** AOP enable */
+			uint64_t aop_valid : 1;
+			uint64_t rsvd2 : 1;
+			/** Context size */
+			uint64_t ctx_size : 4;
+		} s;
+		uint64_t u64;
+	} w0;
+
+	/** Word1 */
+	union {
+		struct {
+			/** Original packet aura */
+			uint64_t orig_pkt_aura : 20;
+			uint64_t rsvd3 : 4;
+			/** Original packet free offset */
+			uint64_t orig_pkt_foff : 8;
+			/** SA cookie */
+			uint64_t cookie : 32;
+		} s;
+		uint64_t u64;
+	} w1;
+
+	/** Word 2 */
+	union {
+		struct {
+			/** SA valid */
+			uint64_t valid : 1;
+			/** SA direction */
+			uint64_t dir : 1;
+			uint64_t rsvd11 : 1;
+			uint64_t rsvd4 : 1;
+			/** IPsec mode */
+			uint64_t ipsec_mode : 1;
+			/** IPsec protocol */
+			uint64_t ipsec_protocol : 1;
+			/** AES key length */
+			uint64_t aes_key_len : 2;
+
+			/** Encryption algo */
+			uint64_t enc_type : 3;
+			/** Soft life and hard life unit */
+			uint64_t life_unit : 1;
+			/** Authentication algo */
+			uint64_t auth_type : 4;
+
+			/** Encapsulation type */
+			uint64_t encap_type : 2;
+			/** Ether type override enable */
+			uint64_t et_ovrwr_ddr_en : 1;
+			/** ESN enable */
+			uint64_t esn_en : 1;
+			/** Transport mode L4 checksum incrementally update */
+			uint64_t tport_l4_incr_csum : 1;
+			/** Outer IP header verification */
+			uint64_t ip_hdr_verify : 2;
+			/** UDP enacapsulation ports verification */
+			uint64_t udp_ports_verify : 1;
+
+			/** Return 64B of L2/L3 header on error */
+			uint64_t l3hdr_on_err : 1;
+			uint64_t rsvd6 : 6;
+			uint64_t rsvd12 : 1;
+
+			/** SPI */
+			uint64_t spi : 32;
+		} s;
+		uint64_t u64;
+	} w2;
+
+	/** Word3 */
+	uint64_t rsvd7;
+
+	/** Word4 - Word7 */
+	uint8_t cipher_key[RTE_PMD_CNXK_CTX_MAX_CKEY_LEN];
+
+	/** Word8 - Word9 */
+	union {
+		struct {
+			uint32_t rsvd8;
+			/** IV salt */
+			uint8_t salt[4];
+		} s;
+		uint64_t u64;
+	} w8;
+	uint64_t rsvd9;
+
+	/** Word10 */
+	union {
+		struct {
+			uint64_t rsvd10 : 32;
+			/** UDP encapsulation source port */
+			uint64_t udp_src_port : 16;
+			/** UDP encapsulation destination port */
+			uint64_t udp_dst_port : 16;
+		} s;
+		uint64_t u64;
+	} w10;
+
+	/** Word11 - Word14 */
+	union rte_pmd_cnxk_ipsec_outer_ip_hdr outer_hdr;
+
+	/** Word15 - Word30 */
+	uint8_t hmac_opad_ipad[RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN];
+
+	/** Word31 - Word100 */
+	struct rte_pmd_cnxk_ipsec_inb_ctx_update_reg ctx;
+};
+
+/**
+ * Outbound IPsec SA
+ */
+struct rte_pmd_cnxk_ipsec_outb_sa {
+	/** Word0 */
+	union {
+		struct {
+			/** ESN enable */
+			uint64_t esn_en : 1;
+			/** IP ID generation type */
+			uint64_t ip_id : 1;
+			uint64_t rsvd0 : 1;
+			/** Hard life enable */
+			uint64_t hard_life_dec : 1;
+			/** Soft life enable */
+			uint64_t soft_life_dec : 1;
+
+			/** Count global octets */
+			uint64_t count_glb_octets : 1;
+			/** Count global pkts */
+			uint64_t count_glb_pkts : 1;
+			/** Count bytes */
+			uint64_t count_mib_bytes : 1;
+
+			/** Count pkts */
+			uint64_t count_mib_pkts : 1;
+			/** HW context offset */
+			uint64_t hw_ctx_off : 7;
+
+			/** Context ID */
+			uint64_t ctx_id : 16;
+			uint64_t rsvd1 : 16;
+
+			/** Context push size */
+			uint64_t ctx_push_size : 7;
+			uint64_t rsvd2 : 1;
+
+			/** Context header size */
+			uint64_t ctx_hdr_size : 2;
+			/** AOP enable */
+			uint64_t aop_valid : 1;
+			uint64_t rsvd3 : 1;
+			/** Context size */
+			uint64_t ctx_size : 4;
+		} s;
+		uint64_t u64;
+	} w0;
+
+	/** Word1 */
+	union {
+		struct {
+			uint64_t rsvd4 : 32;
+			/** SA cookie */
+			uint64_t cookie : 32;
+		} s;
+		uint64_t u64;
+	} w1;
+
+	/** Word 2 */
+	union {
+		struct {
+			/** SA valid */
+			uint64_t valid : 1;
+			/** SA direction */
+			uint64_t dir : 1;
+			uint64_t rsvd11 : 1;
+			uint64_t rsvd5 : 1;
+			/** IPsec mode */
+			uint64_t ipsec_mode : 1;
+			/** IPsec protocol */
+			uint64_t ipsec_protocol : 1;
+
+			/** AES key length */
+			uint64_t aes_key_len : 2;
+
+			/** Encryption algo */
+			uint64_t enc_type : 3;
+			/** Soft life and hard life unit */
+			uint64_t life_unit : 1;
+			/** Authentication algo */
+			uint64_t auth_type : 4;
+
+			/** Encapsulation type */
+			uint64_t encap_type : 2;
+			/** DF source */
+			uint64_t ipv4_df_src_or_ipv6_flw_lbl_src : 1;
+			/** DSCP source */
+			uint64_t dscp_src : 1;
+			/** IV source */
+			uint64_t iv_src : 2;
+			/** IPID value in outer header */
+			uint64_t ipid_gen : 1;
+			uint64_t rsvd6 : 1;
+
+			uint64_t rsvd7 : 7;
+			uint64_t rsvd12 : 1;
+
+			/** SPI */
+			uint64_t spi : 32;
+		} s;
+		uint64_t u64;
+	} w2;
+
+	/** Word3 */
+	uint64_t rsvd8;
+
+	/** Word4 - Word7 */
+	uint8_t cipher_key[RTE_PMD_CNXK_CTX_MAX_CKEY_LEN];
+
+	/** Word8 - Word9 */
+	union rte_pmd_cnxk_ipsec_outb_iv iv;
+
+	/** Word10 */
+	union {
+		struct {
+			uint64_t rsvd9 : 4;
+			/** Outer header IPv4 DF or IPv6 flow label */
+			uint64_t ipv4_df_or_ipv6_flw_lbl : 20;
+
+			/** DSCP for outer header */
+			uint64_t dscp : 6;
+			uint64_t rsvd10 : 2;
+
+			/** UDP encapsulation destination port */
+			uint64_t udp_dst_port : 16;
+
+			/** UDP encapsulation source port */
+			uint64_t udp_src_port : 16;
+		} s;
+		uint64_t u64;
+	} w10;
+
+	/** Word11 - Word14 */
+	union rte_pmd_cnxk_ipsec_outer_ip_hdr outer_hdr;
+
+	/** Word15 - Word30 */
+	uint8_t hmac_opad_ipad[RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN];
+
+	/** Word31 - Word36 */
+	struct rte_pmd_cnxk_ipsec_outb_ctx_update_reg ctx;
+};
+
+/** Inbound/Outbound IPsec SA */
+union rte_pmd_cnxk_ipsec_hw_sa {
+	/** Inbound SA */
+	struct rte_pmd_cnxk_ipsec_inb_sa inb;
+	/** Outbound SA */
+	struct rte_pmd_cnxk_ipsec_outb_sa outb;
+};
+
 /**
  * Read HW SA context from session.
  *
@@ -77,7 +470,7 @@ struct rte_pmd_cnxk_sec_action {
  */
 __rte_experimental
 int rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
-			    void *data, uint32_t len);
+			    union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len);
 /**
  * Write HW SA context to session.
  *
@@ -95,7 +488,7 @@ int rte_pmd_cnxk_hw_sa_read(void *device, struct rte_security_session *sess,
  */
 __rte_experimental
 int rte_pmd_cnxk_hw_sa_write(void *device, struct rte_security_session *sess,
-			     void *data, uint32_t len);
+			     union rte_pmd_cnxk_ipsec_hw_sa *data, uint32_t len);
 
 /**
  * Get pointer to CPT result info for inline inbound processed pkt.
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 03/14] net/cnxk: add transport mode to security capability on cn9k
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 02/14] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 04/14] common/cnxk: dump selected SQ entries Nithin Dabilpuram
                     ` (10 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Add transport mode to security capabilities since it
is supported by UCODE.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev_sec.c | 33 ++++++++++++++++++++++++++++++
 1 file changed, 33 insertions(+)

diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
index 688b13ae1e..a0e0a73639 100644
--- a/drivers/net/cnxk/cn9k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -351,6 +351,39 @@ static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
 		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
 		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
 	},
+	{	/* IPsec Inline Protocol ESP Transport Ingress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX,
+			.options = {
+				.udp_encap = 1,
+				.esn = 1,
+			},
+		},
+		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
+		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+	},
+	{	/* IPsec Inline Protocol ESP Transport Egress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+			.replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX,
+			.options = {
+				.iv_gen_disable = 1,
+				.udp_encap = 1,
+				.esn = 1,
+			},
+		},
+		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
+		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+	},
 	{
 		.action = RTE_SECURITY_ACTION_TYPE_NONE
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 04/14] common/cnxk: dump selected SQ entries
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 02/14] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 03/14] net/cnxk: add transport mode to security capability on cn9k Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 05/14] net/cnxk: added Tx descriptor dump API Nithin Dabilpuram
                     ` (9 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

New API to dump detailed SQ entries.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix.h       |   2 +
 drivers/common/cnxk/roc_nix_debug.c | 172 ++++++++++++++++++++++++++++
 drivers/common/cnxk/version.map     |   1 +
 3 files changed, 175 insertions(+)

diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 84e6fc3df5..9d57ca0be7 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -553,6 +553,8 @@ void __roc_api roc_nix_cqe_dump(FILE *file, const struct nix_cqe_hdr_s *cq);
 void __roc_api roc_nix_rq_dump(struct roc_nix_rq *rq, FILE *file);
 void __roc_api roc_nix_cq_dump(struct roc_nix_cq *cq, FILE *file);
 void __roc_api roc_nix_sq_dump(struct roc_nix_sq *sq, FILE *file);
+int __roc_api roc_nix_sq_desc_dump(struct roc_nix *roc_nix, uint16_t q, uint16_t offset,
+				   uint16_t num, FILE *file);
 void __roc_api roc_nix_tm_dump(struct roc_nix *roc_nix, FILE *file);
 void __roc_api roc_nix_dump(struct roc_nix *roc_nix, FILE *file);
 
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index 8962a76097..26546f9297 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -1362,3 +1362,175 @@ roc_nix_inl_outb_cpt_lfs_dump(struct roc_nix *roc_nix, FILE *file)
 		cpt_lf_print(&lf_base[i]);
 	}
 }
+
+static void
+nix_tm_sqe_dump(uint64_t *sqe, int head_off, int end_off, int instr_sz, FILE *file, int full,
+		uint16_t *num)
+{
+	int i, j, inc = (8 * (0x2 >> instr_sz)), segs;
+	uint64_t *ptr;
+
+	if (!sqe || !(*num))
+		return;
+
+	ptr = sqe + (head_off * inc);
+	for (i = head_off; i < end_off; i++) {
+		if (!(*num))
+			return;
+		ptr = sqe + (i * inc);
+		nix_dump(file, "Entry : %d >>>>>\n", i);
+		nix_dump(file, "\t\tSEND_HDR[0]: 0x%016lx SEND_HDR[1]: 0x%016lx\n", *ptr,
+			 *(ptr + 1));
+		*num = *num - 1;
+		if (!full)
+			continue;
+		ptr += 2;
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_EXT) {
+			nix_dump(file, "\t\tSUBDC_EXT[0]: 0x%016lx DUBDC_EXT[1]: 0x%016lx\n", *ptr,
+				 *(ptr + 1));
+			ptr += 2;
+		}
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_AGE_AND_STATS) {
+			nix_dump(file,
+				 "\t\tSUBDC_AGE_STATS[0]: 0x%016lx SUBDC_AGE_STATS[1]: 0x%016lx\n",
+				 *ptr, *(ptr + 1));
+			ptr += 2;
+		}
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_JUMP) {
+			nix_dump(file, "\t\tSUBDC_JUMP: 0x%016lx\n", *ptr);
+			ptr += 1;
+			ptr = (uint64_t *)*ptr;
+		}
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_CRC) {
+			nix_dump(file, "\t\tSUBDC_CRC[0]: 0x%016lx SUBDC_CRC[1]: 0x%016lx\n", *ptr,
+				 *(ptr + 1));
+			ptr += 2;
+		}
+		/* We are not parsing immediate send descriptor */
+		if (((*ptr >> 60) & 0xF) == NIX_SUBDC_IMM) {
+			nix_dump(file, "\t\tSUBDC_IMM: 0x%016lx ", *ptr);
+			continue;
+		}
+		while (1) {
+			if (((*ptr >> 60) & 0xF) == NIX_SUBDC_SG) {
+				nix_dump(file, "\t\tSUBDC_SG: 0x%016lx   ", *ptr);
+				segs = (*ptr >> 48) & 0x3;
+				ptr += 1;
+				for (j = 0; j < segs; j++) {
+					nix_dump(file, "\t\t\t  0x%016lx   ", *ptr);
+					ptr += 1;
+				}
+				if (segs == 2)
+					ptr += 1;
+			} else if (((*ptr >> 60) & 0xF) == NIX_SUBDC_SG2) {
+				nix_dump(file, "\t\tSUBDC_SG2: 0x%016lx   ", *ptr);
+				ptr += 1;
+				nix_dump(file, "\t\t\t  0x%016lx   ", *ptr);
+				ptr += 1;
+			} else
+				break;
+		}
+	}
+}
+
+int
+roc_nix_sq_desc_dump(struct roc_nix *roc_nix, uint16_t q, uint16_t offset, uint16_t num, FILE *file)
+{
+	int head_off, count, rc = 0, tail_off, full = 0;
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	struct roc_nix_sq *sq = nix->sqs[q];
+	void *sqb_buf, *dat, *tail_sqb;
+	struct ndc_sync_op *ndc_req;
+	struct dev *dev = &nix->dev;
+	uint16_t sqes_per_sqb;
+	struct mbox *mbox;
+
+	mbox = dev->mbox;
+	/* Sync NDC-NIX-TX for LF */
+	ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox));
+	if (ndc_req == NULL) {
+		mbox_put(mbox);
+		return -EFAULT;
+	}
+
+	ndc_req->nix_lf_tx_sync = 1;
+	if (mbox_process(mbox))
+		rc |= NIX_ERR_NDC_SYNC;
+	mbox_put(mbox);
+
+	if (rc)
+		plt_err("NDC_SYNC failed rc %d", rc);
+
+	rc = nix_q_ctx_get(dev, NIX_AQ_CTYPE_SQ, q, (void *)&dat);
+	if (rc)
+		return rc;
+	if (roc_model_is_cn9k()) {
+		volatile struct nix_sq_ctx_s *ctx = (struct nix_sq_ctx_s *)dat;
+
+		if (ctx->mnq_dis || ctx->lmt_dis)
+			full = 1;
+
+		count = ctx->sqb_count;
+		sqb_buf = (void *)ctx->head_sqb;
+		tail_sqb = (void *)ctx->tail_sqb;
+		head_off = ctx->head_offset;
+		tail_off = ctx->tail_offset;
+	} else {
+		volatile struct nix_cn10k_sq_ctx_s *ctx = (struct nix_cn10k_sq_ctx_s *)dat;
+
+		if (ctx->mnq_dis || ctx->lmt_dis)
+			full = 1;
+
+		count = ctx->sqb_count;
+		sqb_buf = (void *)ctx->head_sqb;
+		tail_sqb = (void *)ctx->tail_sqb;
+		head_off = ctx->head_offset;
+		tail_off = ctx->tail_offset;
+	}
+	sqes_per_sqb = 1 << sq->sqes_per_sqb_log2;
+	while (count) {
+		void *next_sqb;
+
+		if (sqb_buf == tail_sqb) {
+			if ((head_off + offset) >= tail_off) /* Nothing to be dump */
+				return 0;
+			head_off += tail_off;
+			break;
+		} else if ((head_off + offset) >= sqes_per_sqb) {
+			next_sqb = *(void **)((uint64_t *)sqb_buf +
+					      (uint32_t)((sqes_per_sqb - 1) *
+							 (0x2 >> sq->max_sqe_sz) * 8));
+			/* While traffic running HW may freed/reused this SQE */
+			if (!next_sqb)
+				return 0;
+			sqb_buf = next_sqb;
+			head_off = 0;
+			count--;
+		} else {
+			head_off += offset;
+			break;
+		}
+	}
+	while (count) {
+		void *next_sqb;
+
+		if (sqb_buf == tail_sqb)
+			nix_tm_sqe_dump(sqb_buf, head_off, tail_off, sq->max_sqe_sz, file, full,
+					&num);
+		else
+			nix_tm_sqe_dump(sqb_buf, head_off, (sqes_per_sqb - 1), sq->max_sqe_sz, file,
+					full, &num);
+		if (!num)
+			break;
+		next_sqb = *(void **)((uint64_t *)sqb_buf +
+				      (uint32_t)((sqes_per_sqb - 1) * (0x2 >> sq->max_sqe_sz) * 8));
+		/* While traffic running HW may freed/reused this SQE */
+		if (!next_sqb)
+			return 0;
+		sqb_buf = next_sqb;
+		head_off = 0;
+		count--;
+	}
+
+	return 0;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index a7402d9941..05432ad245 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -340,6 +340,7 @@ INTERNAL {
 	roc_nix_rx_drop_re_set;
 	roc_nix_rx_queue_intr_disable;
 	roc_nix_rx_queue_intr_enable;
+	roc_nix_sq_desc_dump;
 	roc_nix_sq_dump;
 	roc_nix_sq_ena_dis;
 	roc_nix_sq_fini;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 05/14] net/cnxk: added Tx descriptor dump API
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (2 preceding siblings ...)
  2024-02-26 13:35   ` [PATCH v5 04/14] common/cnxk: dump selected SQ entries Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 06/14] net/cnxk: fix issue with buff size compute Nithin Dabilpuram
                     ` (8 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

New API to dump selected descriptor entries from SQE list.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev.c     |  1 +
 drivers/net/cnxk/cnxk_ethdev.h     |  2 ++
 drivers/net/cnxk/cnxk_ethdev_ops.c | 10 ++++++++++
 3 files changed, 13 insertions(+)

diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 2372a4e793..7640910782 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1821,6 +1821,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
 	.cman_config_init = cnxk_nix_cman_config_init,
 	.cman_config_set = cnxk_nix_cman_config_set,
 	.cman_config_get = cnxk_nix_cman_config_get,
+	.eth_tx_descriptor_dump = cnxk_nix_tx_descriptor_dump,
 };
 
 void
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 37b6395b93..45b9055234 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -557,6 +557,8 @@ int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
 int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
 			     int mark_yellow, int mark_red,
 			     struct rte_tm_error *error);
+int cnxk_nix_tx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t offset,
+				uint16_t num, FILE *file);
 
 /* MTR */
 int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops);
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 5de2919047..e9ab8da781 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -1313,3 +1313,13 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
 exit:
 	return rc;
 }
+
+int
+cnxk_nix_tx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t offset,
+			    uint16_t num, FILE *file)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *nix = &dev->nix;
+
+	return roc_nix_sq_desc_dump(nix, qid, offset, num, file);
+}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 06/14] net/cnxk: fix issue with buff size compute
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (3 preceding siblings ...)
  2024-02-26 13:35   ` [PATCH v5 05/14] net/cnxk: added Tx descriptor dump API Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 07/14] common/cnxk: fix Tx MTU configuration Nithin Dabilpuram
                     ` (7 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev, stable

In case where cnxk_nix_mtu_set() is called before
data->min_rx_buf_size is set, use buf size from first RQ's
mempool.

Fixes: 34b46320f446 ("net/cnxk: perform early MTU setup for event mode")
Cc: stable@dpdk.org

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev_ops.c | 25 ++++++++++++++++++++++---
 1 file changed, 22 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index e9ab8da781..e816884d47 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -544,8 +544,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	struct rte_eth_dev_data *data = eth_dev->data;
 	struct roc_nix *nix = &dev->nix;
+	struct cnxk_eth_rxq_sp *rxq_sp;
+	uint32_t buffsz = 0;
 	int rc = -EINVAL;
-	uint32_t buffsz;
 
 	frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
 
@@ -561,8 +562,24 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 		goto exit;
 	}
 
-	buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
-	old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
+	if (!eth_dev->data->nb_rx_queues)
+		goto skip_buffsz_check;
+
+	/* Perform buff size check */
+	if (data->min_rx_buf_size) {
+		buffsz = data->min_rx_buf_size;
+	} else if (eth_dev->data->rx_queues && eth_dev->data->rx_queues[0]) {
+		rxq_sp = cnxk_eth_rxq_to_sp(data->rx_queues[0]);
+
+		if (rxq_sp->qconf.mp)
+			buffsz = rte_pktmbuf_data_room_size(rxq_sp->qconf.mp);
+	}
+
+	/* Skip validation if RQ's are not yet setup */
+	if (!buffsz)
+		goto skip_buffsz_check;
+
+	buffsz -= RTE_PKTMBUF_HEADROOM;
 
 	/* Refuse MTU that requires the support of scattered packets
 	 * when this feature has not been enabled before.
@@ -580,6 +597,8 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 		goto exit;
 	}
 
+skip_buffsz_check:
+	old_frame_size = data->mtu + CNXK_NIX_L2_OVERHEAD;
 	/* if new MTU was smaller than old one, then flush all SQs before MTU change */
 	if (old_frame_size > frame_size) {
 		if (data->dev_started) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 07/14] common/cnxk: fix Tx MTU configuration
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (4 preceding siblings ...)
  2024-02-26 13:35   ` [PATCH v5 06/14] net/cnxk: fix issue with buff size compute Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 08/14] net/cnxk: fix max MTU limit Nithin Dabilpuram
                     ` (6 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Skip setting Tx MTU separately as now the Tx credit configuration
is based on max MTU possible for that link.
Also init MTU with max value for that port.

Fixes: 8589ec212e80 ("net/cnxk: support MTU set")

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix.c      |  2 +-
 drivers/common/cnxk/roc_nix.h      |  2 --
 drivers/net/cnxk/cnxk_ethdev_ops.c | 12 +-----------
 3 files changed, 2 insertions(+), 14 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 97c0ae3e25..90ccb260fb 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -484,7 +484,7 @@ roc_nix_dev_init(struct roc_nix *roc_nix)
 	sdp_lbk_id_update(pci_dev, nix);
 	nix->pci_dev = pci_dev;
 	nix->reta_sz = reta_sz;
-	nix->mtu = ROC_NIX_DEFAULT_HW_FRS;
+	nix->mtu = roc_nix_max_pkt_len(roc_nix);
 	nix->dmac_flt_idx = -1;
 
 	/* Register error and ras interrupts */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 9d57ca0be7..3799b551f2 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -267,8 +267,6 @@ struct roc_nix_eeprom_info {
 #define ROC_NIX_RSS_KEY_LEN	     48 /* 352 Bits */
 #define ROC_NIX_RSS_MCAM_IDX_DEFAULT (-1)
 
-#define ROC_NIX_DEFAULT_HW_FRS 1514
-
 #define ROC_NIX_VWQE_MAX_SIZE_LOG2 11
 #define ROC_NIX_VWQE_MIN_SIZE_LOG2 2
 
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index e816884d47..4962f3bced 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -610,19 +610,9 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 
 	frame_size -= RTE_ETHER_CRC_LEN;
 
-	/* Update mtu on Tx */
-	rc = roc_nix_mac_mtu_set(nix, frame_size);
-	if (rc) {
-		plt_err("Failed to set MTU, rc=%d", rc);
-		goto exit;
-	}
-
-	/* Sync same frame size on Rx */
+	/* Set frame size on Rx */
 	rc = roc_nix_mac_max_rx_len_set(nix, frame_size);
 	if (rc) {
-		/* Rollback to older mtu */
-		roc_nix_mac_mtu_set(nix,
-				    old_frame_size - RTE_ETHER_CRC_LEN);
 		plt_err("Failed to max Rx frame length, rc=%d", rc);
 		goto exit;
 	}
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 08/14] net/cnxk: fix max MTU limit
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (5 preceding siblings ...)
  2024-02-26 13:35   ` [PATCH v5 07/14] common/cnxk: fix Tx MTU configuration Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 09/14] common/cnxk: fix RETA table config API Nithin Dabilpuram
                     ` (5 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev, stable

From: Sunil Kumar Kori <skori@marvell.com>

Device can support maximum frame size up to 9212 bytes. While configuring
mtu, overhead is considered as ethernet header size, crc and
2 * (vlan tags) which translates to 26 bytes.

Exposed overhead to the user via rte_eth_dev_info() is 18 bytes which were
leading to set wrong Rx frame size.

Fixes: 8589ec212e80 ("net/cnxk: support MTU set")
Cc: stable@dpdk.org

Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
 drivers/net/cnxk/cnxk_ethdev_ops.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 4962f3bced..56049c5dd2 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -20,8 +20,7 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
 	devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
 	devinfo->max_mac_addrs = dev->max_mac_entries;
 	devinfo->max_vfs = pci_dev->max_vfs;
-	devinfo->max_mtu = devinfo->max_rx_pktlen -
-				(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
+	devinfo->max_mtu = devinfo->max_rx_pktlen - CNXK_NIX_L2_OVERHEAD;
 	devinfo->min_mtu = devinfo->min_rx_bufsize - CNXK_NIX_L2_OVERHEAD;
 
 	devinfo->rx_offload_capa = dev->rx_offload_capa;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 09/14] common/cnxk: fix RETA table config API
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (6 preceding siblings ...)
  2024-02-26 13:35   ` [PATCH v5 08/14] net/cnxk: fix max MTU limit Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 10/14] net/cnxk: fix indirect mbuf handling in Tx path Nithin Dabilpuram
                     ` (4 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, Kommula Shiva Shankar

From: Kommula Shiva Shankar <kshankar@marvell.com>

This patch updates queue entries copy in reta table
based on data type.

Fixes: 1bf6746e653b ("common/cnxk: support NIX RSS")

Signed-off-by: Kommula Shiva Shankar <kshankar@marvell.com>
---
 drivers/common/cnxk/roc_nix_rss.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_rss.c b/drivers/common/cnxk/roc_nix_rss.c
index 3599eb9bae..2b88e1360d 100644
--- a/drivers/common/cnxk/roc_nix_rss.c
+++ b/drivers/common/cnxk/roc_nix_rss.c
@@ -196,7 +196,7 @@ roc_nix_rss_reta_set(struct roc_nix *roc_nix, uint8_t group,
 	if (rc)
 		return rc;
 
-	memcpy(&nix->reta[group], reta, ROC_NIX_RSS_RETA_MAX);
+	memcpy(&nix->reta[group], reta, sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX);
 	return 0;
 }
 
@@ -209,7 +209,7 @@ roc_nix_rss_reta_get(struct roc_nix *roc_nix, uint8_t group,
 	if (group >= ROC_NIX_RSS_GRPS)
 		return NIX_ERR_PARAM;
 
-	memcpy(reta, &nix->reta[group], ROC_NIX_RSS_RETA_MAX);
+	memcpy(reta, &nix->reta[group], sizeof(uint16_t) * ROC_NIX_RSS_RETA_MAX);
 	return 0;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 10/14] net/cnxk: fix indirect mbuf handling in Tx path
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (7 preceding siblings ...)
  2024-02-26 13:35   ` [PATCH v5 09/14] common/cnxk: fix RETA table config API Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 11/14] net/cnxk: fix check cookies for multi-seg offload Nithin Dabilpuram
                     ` (3 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, stable, Rahul Bhansali

Indirect mbuf can be pointing to data from different pool. Use the right
aura in NIX send header in SG2 and SG case.

Fixes: 862e28128707 ("net/cnxk: add vector Tx for CN9K")
Fixes: f71b7dbbf04b ("net/cnxk: add vector Tx for CN10K")
Fixes: 7e95c11df4f1 ("net/cnxk: add multi-segment Tx for CN9K")
Fixes: 3626d5195d49 ("net/cnxk: add multi-segment Tx for CN10K")
Cc: stable@dpdk.org

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn10k_ethdev.c   |   6 +
 drivers/net/cnxk/cn10k_rxtx.h     |   1 +
 drivers/net/cnxk/cn10k_tx.h       | 269 +++++++++++++++++++--------
 drivers/net/cnxk/cn9k_ethdev.c    |   6 +
 drivers/net/cnxk/cn9k_ethdev.h    |   1 +
 drivers/net/cnxk/cn9k_tx.h        | 299 +++++++++++++++++++++---------
 drivers/net/cnxk/cnxk_ethdev_dp.h |  10 +-
 7 files changed, 414 insertions(+), 178 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index a2e943a3d0..a5696c092a 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -389,7 +389,13 @@ cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 		struct roc_nix_sq *sq = &dev->sqs[qidx];
 		do {
 			handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
+			/* Check if SQ is empty */
 			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+			if (head != tail)
+				continue;
+
+			/* Check if completion CQ is empty */
+			roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail);
 		} while (head != tail);
 	}
 
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index aeffc4ac92..9f33d0192e 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -177,6 +177,7 @@ handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe)
 			m = m_next;
 		}
 		rte_pktmbuf_free_seg(m);
+		txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL;
 
 		head++;
 		head &= qmask;
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 467f0ccc65..025eff2913 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -786,8 +786,9 @@ cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr,
 
 static __rte_always_inline uint64_t
 cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq,
-		struct nix_send_hdr_s *send_hdr)
+		      struct nix_send_hdr_s *send_hdr, uint64_t *aura)
 {
+	struct rte_mbuf *prev = NULL;
 	uint32_t sqe_id;
 
 	if (RTE_MBUF_HAS_EXTBUF(m)) {
@@ -796,7 +797,10 @@ cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq,
 			return 1;
 		}
 		if (send_hdr->w0.pnc) {
-			txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m;
+			sqe_id = send_hdr->w1.sqe_id;
+			prev = txq->tx_compl.ptr[sqe_id];
+			m->next = prev;
+			txq->tx_compl.ptr[sqe_id] = m;
 		} else {
 			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
 			send_hdr->w0.pnc = 1;
@@ -806,10 +810,155 @@ cn10k_nix_prefree_seg(struct rte_mbuf *m, struct cn10k_eth_txq *txq,
 		}
 		return 1;
 	} else {
-		return cnxk_nix_prefree_seg(m);
+		return cnxk_nix_prefree_seg(m, aura);
 	}
 }
 
+#if defined(RTE_ARCH_ARM64)
+/* Only called for first segments of single segmented mbufs */
+static __rte_always_inline void
+cn10k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct cn10k_eth_txq *txq,
+			  uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0,
+			  uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1)
+{
+	struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr;
+	uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask;
+	bool tx_compl_ena = txq->tx_compl.ena;
+	struct rte_mbuf *m0, *m1, *m2, *m3;
+	struct rte_mbuf *cookie;
+	uint64_t w0, w1, aura;
+	uint64_t sqe_id;
+
+	m0 = mbufs[0];
+	m1 = mbufs[1];
+	m2 = mbufs[2];
+	m3 = mbufs[3];
+
+	/* mbuf 0 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m0)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m0);
+		} else {
+			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
+							       rte_memory_order_relaxed);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m0;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0);
+
+	/* mbuf1 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m1)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m1);
+		} else {
+			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
+							       rte_memory_order_relaxed);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m1;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1);
+
+	/* mbuf 2 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m2)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m2);
+		} else {
+			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
+							       rte_memory_order_relaxed);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m2;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0);
+
+	/* mbuf3 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m3)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m3);
+		} else {
+			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
+							       rte_memory_order_relaxed);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m3;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1);
+#ifndef RTE_LIBRTE_MEMPOOL_DEBUG
+	RTE_SET_USED(cookie);
+#endif
+}
+#endif
+
 static __rte_always_inline void
 cn10k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
 {
@@ -889,6 +1038,9 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq,
 		sg = (union nix_send_sg_s *)(cmd + 2);
 	}
 
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+		send_hdr->w0.pnc = 0;
+
 	if (flags & (NIX_TX_NEED_SEND_HDR_W1 | NIX_TX_OFFLOAD_SECURITY_F)) {
 		ol_flags = m->ol_flags;
 		w1.u = 0;
@@ -1049,19 +1201,30 @@ cn10k_nix_xmit_prepare(struct cn10k_eth_txq *txq,
 		send_hdr->w1.u = w1.u;
 
 	if (!(flags & NIX_TX_MULTI_SEG_F)) {
+		struct rte_mbuf *cookie;
+
 		sg->seg1_size = send_hdr->w0.total;
 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+			uint64_t aura;
+
 			/* DF bit = 1 if refcount of current mbuf or parent mbuf
 			 *		is greater than 1
 			 * DF bit = 0 otherwise
 			 */
-			send_hdr->w0.df = cn10k_nix_prefree_seg(m, txq, send_hdr);
+			aura = send_hdr->w0.aura;
+			send_hdr->w0.df = cn10k_nix_prefree_seg(m, txq, send_hdr, &aura);
+			send_hdr->w0.aura = aura;
 		}
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!send_hdr->w0.df)
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+#else
+		RTE_SET_USED(cookie);
+#endif
 	} else {
 		sg->seg1_size = m->data_len;
 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
@@ -1135,6 +1298,7 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 	struct nix_send_hdr_s *send_hdr;
 	union nix_send_sg_s *sg, l_sg;
 	union nix_send_sg2_s l_sg2;
+	struct rte_mbuf *cookie;
 	struct rte_mbuf *m_next;
 	uint8_t off, is_sg2;
 	uint64_t len, dlen;
@@ -1163,21 +1327,26 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 	len -= dlen;
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
+	m->next = NULL;
 	slist = &cmd[3 + off + 1];
 
+	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 	/* Set invert df if buffer is not to be freed by H/W */
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-		prefree = cn10k_nix_prefree_seg(m, txq, send_hdr);
+		aura = send_hdr->w0.aura;
+		prefree = cn10k_nix_prefree_seg(m, txq, send_hdr, &aura);
+		send_hdr->w0.aura = aura;
 		l_sg.i1 = prefree;
 	}
 
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	/* Mark mempool object as "put" since it is freed by NIX */
 	if (!prefree)
-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+		RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 	rte_io_wmb();
+#else
+	RTE_SET_USED(cookie);
 #endif
-	m->next = NULL;
 
 	/* Quickly handle single segmented packets. With this if-condition
 	 * compiler will completely optimize out the below do-while loop
@@ -1207,9 +1376,12 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 		aura = aura0;
 		prefree = 0;
 
+		m->next = NULL;
+
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
 			aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
-			prefree = cn10k_nix_prefree_seg(m, txq, send_hdr);
+			prefree = cn10k_nix_prefree_seg(m, txq, send_hdr, &aura);
 			is_sg2 = aura != aura0 && !prefree;
 		}
 
@@ -1259,13 +1431,14 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 			l_sg.subdc = NIX_SUBDC_SG;
 			slist++;
 		}
-		m->next = NULL;
 
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		/* Mark mempool object as "put" since it is freed by NIX
 		 */
 		if (!prefree)
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+#else
+		RTE_SET_USED(cookie);
 #endif
 		m = m_next;
 	} while (nb_segs);
@@ -1997,13 +2170,10 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 	uint64x2_t sgdesc01_w0, sgdesc23_w0;
 	uint64x2_t sgdesc01_w1, sgdesc23_w1;
 	struct cn10k_eth_txq *txq = tx_queue;
-	uint64x2_t xmask01_w0, xmask23_w0;
-	uint64x2_t xmask01_w1, xmask23_w1;
 	rte_iova_t io_addr = txq->io_addr;
 	uint8_t lnum, shift = 0, loff = 0;
 	uintptr_t laddr = txq->lmt_base;
 	uint8_t c_lnum, c_shft, c_loff;
-	struct nix_send_hdr_s send_hdr;
 	uint64x2_t ltypes01, ltypes23;
 	uint64x2_t xtmp128, ytmp128;
 	uint64x2_t xmask01, xmask23;
@@ -2153,7 +2323,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		}
 		/* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
 		senddesc01_w0 =
-			vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF));
+			vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF));
 		sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF));
 
 		senddesc23_w0 = senddesc01_w0;
@@ -2859,73 +3029,8 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 		    !(flags & NIX_TX_MULTI_SEG_F) &&
 		    !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
 			/* Set don't free bit if reference count > 1 */
-			xmask01_w0 = vdupq_n_u64(0);
-			xmask01_w1 = vdupq_n_u64(0);
-			xmask23_w0 = xmask01_w0;
-			xmask23_w1 = xmask01_w1;
-
-			/* Move mbufs to iova */
-			mbuf0 = (uint64_t *)tx_pkts[0];
-			mbuf1 = (uint64_t *)tx_pkts[1];
-			mbuf2 = (uint64_t *)tx_pkts[2];
-			mbuf3 = (uint64_t *)tx_pkts[3];
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf0)->pool,
-					(void **)&mbuf0, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf1)->pool,
-					(void **)&mbuf1, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf2)->pool,
-					(void **)&mbuf2, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn10k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf3)->pool,
-					(void **)&mbuf3, 1, 0);
-			}
-
-			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0);
-			senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0);
-			senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1);
-			senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1);
+			cn10k_nix_prefree_seg_vec(tx_pkts, txq, &senddesc01_w0, &senddesc23_w0,
+						  &senddesc01_w1, &senddesc23_w1);
 		} else if (!(flags & NIX_TX_MULTI_SEG_F) &&
 			   !(flags & NIX_TX_OFFLOAD_SECURITY_F)) {
 			/* Move mbufs to iova */
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 67f21a9c7f..ea92b1dcb6 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -347,7 +347,13 @@ cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
 		struct roc_nix_sq *sq = &dev->sqs[qidx];
 		do {
 			handle_tx_completion_pkts(txq, 0);
+			/* Check if SQ is empty */
 			roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+			if (head != tail)
+				continue;
+
+			/* Check if completion CQ is empty */
+			roc_nix_cq_head_tail_get(nix, sq->cqid, &head, &tail);
 		} while (head != tail);
 	}
 
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index 9e0a3c5bb2..6ae0db62ca 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -169,6 +169,7 @@ handle_tx_completion_pkts(struct cn9k_eth_txq *txq, uint8_t mt_safe)
 			m = m_next;
 		}
 		rte_pktmbuf_free_seg(m);
+		txq->tx_compl.ptr[tx_compl_s0->sqe_id] = NULL;
 
 		head++;
 		head &= qmask;
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index fba4bb4215..3596651cc2 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -83,9 +83,10 @@ cn9k_nix_tx_skeleton(struct cn9k_eth_txq *txq, uint64_t *cmd,
 }
 
 static __rte_always_inline uint64_t
-cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq,
-		struct nix_send_hdr_s *send_hdr)
+cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq, struct nix_send_hdr_s *send_hdr,
+		     uint64_t *aura)
 {
+	struct rte_mbuf *prev;
 	uint32_t sqe_id;
 
 	if (RTE_MBUF_HAS_EXTBUF(m)) {
@@ -94,7 +95,10 @@ cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq,
 			return 1;
 		}
 		if (send_hdr->w0.pnc) {
-			txq->tx_compl.ptr[send_hdr->w1.sqe_id]->next = m;
+			sqe_id = send_hdr->w1.sqe_id;
+			prev = txq->tx_compl.ptr[sqe_id];
+			m->next = prev;
+			txq->tx_compl.ptr[sqe_id] = m;
 		} else {
 			sqe_id = __atomic_fetch_add(&txq->tx_compl.sqe_id, 1, __ATOMIC_RELAXED);
 			send_hdr->w0.pnc = 1;
@@ -104,10 +108,155 @@ cn9k_nix_prefree_seg(struct rte_mbuf *m, struct cn9k_eth_txq *txq,
 		}
 		return 1;
 	} else {
-		return cnxk_nix_prefree_seg(m);
+		return cnxk_nix_prefree_seg(m, aura);
 	}
 }
 
+#if defined(RTE_ARCH_ARM64)
+/* Only called for first segments of single segmented mbufs */
+static __rte_always_inline void
+cn9k_nix_prefree_seg_vec(struct rte_mbuf **mbufs, struct cn9k_eth_txq *txq,
+			 uint64x2_t *senddesc01_w0, uint64x2_t *senddesc23_w0,
+			 uint64x2_t *senddesc01_w1, uint64x2_t *senddesc23_w1)
+{
+	struct rte_mbuf **tx_compl_ptr = txq->tx_compl.ptr;
+	uint32_t nb_desc_mask = txq->tx_compl.nb_desc_mask;
+	bool tx_compl_ena = txq->tx_compl.ena;
+	struct rte_mbuf *m0, *m1, *m2, *m3;
+	struct rte_mbuf *cookie;
+	uint64_t w0, w1, aura;
+	uint64_t sqe_id;
+
+	m0 = mbufs[0];
+	m1 = mbufs[1];
+	m2 = mbufs[2];
+	m3 = mbufs[3];
+
+	/* mbuf 0 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m0)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m0);
+		} else {
+			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
+							       rte_memory_order_relaxed);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m0;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m0) ? m0 : rte_mbuf_from_indirect(m0);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m0, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 0);
+
+	/* mbuf1 */
+	w0 = vgetq_lane_u64(*senddesc01_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m1)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc01_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m1);
+		} else {
+			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
+							       rte_memory_order_relaxed);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m1;
+			*senddesc01_w1 = vsetq_lane_u64(w1, *senddesc01_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m1) ? m1 : rte_mbuf_from_indirect(m1);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m1, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc01_w0 = vsetq_lane_u64(w0, *senddesc01_w0, 1);
+
+	/* mbuf 2 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 0);
+	if (RTE_MBUF_HAS_EXTBUF(m2)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 0);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m2);
+		} else {
+			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
+							       rte_memory_order_relaxed);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m2;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 0);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m2) ? m2 : rte_mbuf_from_indirect(m2);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m2, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 0);
+
+	/* mbuf3 */
+	w0 = vgetq_lane_u64(*senddesc23_w0, 1);
+	if (RTE_MBUF_HAS_EXTBUF(m3)) {
+		w0 |= BIT_ULL(19);
+		w1 = vgetq_lane_u64(*senddesc23_w1, 1);
+		w1 &= ~0xFFFF000000000000UL;
+		if (unlikely(!tx_compl_ena)) {
+			rte_pktmbuf_free_seg(m3);
+		} else {
+			sqe_id = rte_atomic_fetch_add_explicit(&txq->tx_compl.sqe_id, 1,
+							       rte_memory_order_relaxed);
+			sqe_id = sqe_id & nb_desc_mask;
+			/* Set PNC */
+			w0 |= BIT_ULL(43);
+			w1 |= sqe_id << 48;
+			tx_compl_ptr[sqe_id] = m3;
+			*senddesc23_w1 = vsetq_lane_u64(w1, *senddesc23_w1, 1);
+		}
+	} else {
+		cookie = RTE_MBUF_DIRECT(m3) ? m3 : rte_mbuf_from_indirect(m3);
+		aura = (w0 >> 20) & 0xFFFFF;
+		w0 &= ~0xFFFFF00000UL;
+		w0 |= cnxk_nix_prefree_seg(m3, &aura) << 19;
+		w0 |= aura << 20;
+
+		if ((w0 & BIT_ULL(19)) == 0)
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+	}
+	*senddesc23_w0 = vsetq_lane_u64(w0, *senddesc23_w0, 1);
+#ifndef RTE_LIBRTE_MEMPOOL_DEBUG
+	RTE_SET_USED(cookie);
+#endif
+}
+#endif
+
 static __rte_always_inline void
 cn9k_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
 {
@@ -191,6 +340,8 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq,
 		ol_flags = m->ol_flags;
 		w1.u = 0;
 	}
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+		send_hdr->w0.pnc = 0;
 
 	if (!(flags & NIX_TX_MULTI_SEG_F))
 		send_hdr->w0.total = m->data_len;
@@ -345,23 +496,33 @@ cn9k_nix_xmit_prepare(struct cn9k_eth_txq *txq,
 		send_hdr->w1.u = w1.u;
 
 	if (!(flags & NIX_TX_MULTI_SEG_F)) {
+		struct rte_mbuf *cookie;
+
 		sg->seg1_size = m->data_len;
 		*(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+			uint64_t aura;
 			/* DF bit = 1 if refcount of current mbuf or parent mbuf
 			 *		is greater than 1
 			 * DF bit = 0 otherwise
 			 */
-			send_hdr->w0.df = cn9k_nix_prefree_seg(m, txq, send_hdr);
+			aura = send_hdr->w0.aura;
+			send_hdr->w0.df = cn9k_nix_prefree_seg(m, txq, send_hdr, &aura);
+			send_hdr->w0.aura = aura;
 			/* Ensuring mbuf fields which got updated in
 			 * cnxk_nix_prefree_seg are written before LMTST.
 			 */
 			rte_io_wmb();
 		}
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		/* Mark mempool object as "put" since it is freed by NIX */
 		if (!send_hdr->w0.df)
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
+#else
+		RTE_SET_USED(cookie);
+#endif
 	} else {
 		sg->seg1_size = m->data_len;
 		*(rte_iova_t *)(sg + 1) = rte_mbuf_data_iova(m);
@@ -443,6 +604,8 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 		      struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
 {
 	struct nix_send_hdr_s *send_hdr;
+	uint64_t prefree = 0, aura;
+	struct rte_mbuf *cookie;
 	union nix_send_sg_s *sg;
 	struct rte_mbuf *m_next;
 	uint64_t *slist, sg_u;
@@ -467,17 +630,23 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 	m_next = m->next;
 	slist = &cmd[3 + off + 1];
 
+	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 	/* Set invert df if buffer is not to be freed by H/W */
 	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-		sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55);
+		aura = send_hdr->w0.aura;
+		prefree = (cn9k_nix_prefree_seg(m, txq, send_hdr, &aura) << 55);
+		send_hdr->w0.aura = aura;
+		sg_u |= prefree;
 		rte_io_wmb();
 	}
 
 	/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	if (!(sg_u & (1ULL << 55)))
-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+		RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 	rte_io_wmb();
+#else
+	RTE_SET_USED(cookie);
 #endif
 	m = m_next;
 	if (!m)
@@ -488,16 +657,17 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 		m_next = m->next;
 		sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
 		*slist = rte_mbuf_data_iova(m);
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 		/* Set invert df if buffer is not to be freed by H/W */
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
-			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55));
+			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr, NULL) << (i + 55));
 			/* Commit changes to mbuf */
 			rte_io_wmb();
 		}
 		/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		if (!(sg_u & (1ULL << (i + 55))))
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 		rte_io_wmb();
 #endif
 		slist++;
@@ -709,8 +879,8 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 			       struct nix_send_hdr_s *send_hdr,
 			       union nix_send_sg_s *sg, const uint32_t flags)
 {
-	struct rte_mbuf *m_next;
-	uint64_t *slist, sg_u;
+	struct rte_mbuf *m_next, *cookie;
+	uint64_t *slist, sg_u, aura;
 	uint16_t nb_segs;
 	uint64_t segdw;
 	int i = 1;
@@ -727,13 +897,19 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 	m_next = m->next;
 
 	/* Set invert df if buffer is not to be freed by H/W */
-	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
-		sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << 55);
-		/* Mark mempool object as "put" since it is freed by NIX */
+	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
+	if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+		aura = send_hdr->w0.aura;
+		sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr, &aura) << 55);
+		send_hdr->w0.aura = aura;
+	}
+	/* Mark mempool object as "put" since it is freed by NIX */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 	if (!(sg_u & (1ULL << 55)))
-		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+		RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 	rte_io_wmb();
+#else
+	RTE_SET_USED(cookie);
 #endif
 
 	m = m_next;
@@ -742,14 +918,15 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 		m_next = m->next;
 		sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
 		*slist = rte_mbuf_data_iova(m);
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 		/* Set invert df if buffer is not to be freed by H/W */
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
-			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr) << (i + 55));
+			sg_u |= (cn9k_nix_prefree_seg(m, txq, send_hdr, &aura) << (i + 55));
 			/* Mark mempool object as "put" since it is freed by NIX
 			 */
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		if (!(sg_u & (1ULL << (i + 55))))
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 		rte_io_wmb();
 #endif
 		slist++;
@@ -789,15 +966,20 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq,
 			  uint64x2_t *cmd1, const uint32_t flags)
 {
 	struct nix_send_hdr_s send_hdr;
+	struct rte_mbuf *cookie;
 	union nix_send_sg_s sg;
+	uint64_t aura;
 	uint8_t ret;
 
 	if (m->nb_segs == 1) {
+		cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
 		if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
 			send_hdr.w0.u = vgetq_lane_u64(cmd0[0], 0);
 			send_hdr.w1.u = vgetq_lane_u64(cmd0[0], 1);
 			sg.u = vgetq_lane_u64(cmd1[0], 0);
-			sg.u |= (cn9k_nix_prefree_seg(m, txq, &send_hdr) << 55);
+			aura = send_hdr.w0.aura;
+			sg.u |= (cn9k_nix_prefree_seg(m, txq, &send_hdr, &aura) << 55);
+			send_hdr.w0.aura = aura;
 			cmd1[0] = vsetq_lane_u64(sg.u, cmd1[0], 0);
 			cmd0[0] = vsetq_lane_u64(send_hdr.w0.u, cmd0[0], 0);
 			cmd0[0] = vsetq_lane_u64(send_hdr.w1.u, cmd0[0], 1);
@@ -806,8 +988,10 @@ cn9k_nix_prepare_mseg_vec(struct cn9k_eth_txq *txq,
 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
 		sg.u = vgetq_lane_u64(cmd1[0], 0);
 		if (!(sg.u & (1ULL << 55)))
-			RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(cookie->pool, (void **)&cookie, 1, 0);
 		rte_io_wmb();
+#else
+		RTE_SET_USED(cookie);
 #endif
 		return 2 + !!(flags & NIX_TX_NEED_EXT_HDR) +
 		       !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
@@ -962,10 +1146,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint64x2_t sgdesc01_w1, sgdesc23_w1;
 	struct cn9k_eth_txq *txq = tx_queue;
 	uint64_t *lmt_addr = txq->lmt_addr;
-	uint64x2_t xmask01_w0, xmask23_w0;
-	uint64x2_t xmask01_w1, xmask23_w1;
 	rte_iova_t io_addr = txq->io_addr;
-	struct nix_send_hdr_s send_hdr;
 	uint64x2_t ltypes01, ltypes23;
 	uint64x2_t xtmp128, ytmp128;
 	uint64x2_t xmask01, xmask23;
@@ -1028,7 +1209,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 	for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
 		/* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
 		senddesc01_w0 =
-			vbicq_u64(senddesc01_w0, vdupq_n_u64(0xFFFFFFFF));
+			vbicq_u64(senddesc01_w0, vdupq_n_u64(0x800FFFFFFFF));
 		sgdesc01_w0 = vbicq_u64(sgdesc01_w0, vdupq_n_u64(0xFFFFFFFF));
 
 		senddesc23_w0 = senddesc01_w0;
@@ -1732,74 +1913,8 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
 		if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
 		    !(flags & NIX_TX_MULTI_SEG_F)) {
 			/* Set don't free bit if reference count > 1 */
-			xmask01_w0 = vdupq_n_u64(0);
-			xmask01_w1 = vdupq_n_u64(0);
-			xmask23_w0 = xmask01_w0;
-			xmask23_w1 = xmask01_w1;
-
-			/* Move mbufs to iova */
-			mbuf0 = (uint64_t *)tx_pkts[0];
-			mbuf1 = (uint64_t *)tx_pkts[1];
-			mbuf2 = (uint64_t *)tx_pkts[2];
-			mbuf3 = (uint64_t *)tx_pkts[3];
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf0, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 0);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf0)->pool,
-					(void **)&mbuf0, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf1, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask01_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask01_w0, 1);
-				xmask01_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask01_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf1)->pool,
-					(void **)&mbuf1, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf2, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 0);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 0);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf2)->pool,
-					(void **)&mbuf2, 1, 0);
-			}
-
-			send_hdr.w0.u = 0;
-			send_hdr.w1.u = 0;
-
-			if (cn9k_nix_prefree_seg((struct rte_mbuf *)mbuf3, txq, &send_hdr)) {
-				send_hdr.w0.df = 1;
-				xmask23_w0 = vsetq_lane_u64(send_hdr.w0.u, xmask23_w0, 1);
-				xmask23_w1 = vsetq_lane_u64(send_hdr.w1.u, xmask23_w1, 1);
-			} else {
-				RTE_MEMPOOL_CHECK_COOKIES(
-					((struct rte_mbuf *)mbuf3)->pool,
-					(void **)&mbuf3, 1, 0);
-			}
-
-			senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01_w0);
-			senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23_w0);
-			senddesc01_w1 = vorrq_u64(senddesc01_w1, xmask01_w1);
-			senddesc23_w1 = vorrq_u64(senddesc23_w1, xmask23_w1);
-
+			cn9k_nix_prefree_seg_vec(tx_pkts, txq, &senddesc01_w0, &senddesc23_w0,
+						 &senddesc01_w1, &senddesc23_w1);
 			/* Ensuring mbuf fields which got updated in
 			 * cnxk_nix_prefree_seg are written before LMTST.
 			 */
diff --git a/drivers/net/cnxk/cnxk_ethdev_dp.h b/drivers/net/cnxk/cnxk_ethdev_dp.h
index c1f99a2616..67f40b8e25 100644
--- a/drivers/net/cnxk/cnxk_ethdev_dp.h
+++ b/drivers/net/cnxk/cnxk_ethdev_dp.h
@@ -84,7 +84,7 @@ struct cnxk_timesync_info {
 
 /* Inlines */
 static __rte_always_inline uint64_t
-cnxk_pktmbuf_detach(struct rte_mbuf *m)
+cnxk_pktmbuf_detach(struct rte_mbuf *m, uint64_t *aura)
 {
 	struct rte_mempool *mp = m->pool;
 	uint32_t mbuf_size, buf_len;
@@ -94,6 +94,8 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m)
 
 	/* Update refcount of direct mbuf */
 	md = rte_mbuf_from_indirect(m);
+	if (aura)
+		*aura = roc_npa_aura_handle_to_aura(md->pool->pool_id);
 	refcount = rte_mbuf_refcnt_update(md, -1);
 
 	priv_size = rte_pktmbuf_priv_size(mp);
@@ -126,18 +128,18 @@ cnxk_pktmbuf_detach(struct rte_mbuf *m)
 }
 
 static __rte_always_inline uint64_t
-cnxk_nix_prefree_seg(struct rte_mbuf *m)
+cnxk_nix_prefree_seg(struct rte_mbuf *m, uint64_t *aura)
 {
 	if (likely(rte_mbuf_refcnt_read(m) == 1)) {
 		if (!RTE_MBUF_DIRECT(m))
-			return cnxk_pktmbuf_detach(m);
+			return cnxk_pktmbuf_detach(m, aura);
 
 		m->next = NULL;
 		m->nb_segs = 1;
 		return 0;
 	} else if (rte_mbuf_refcnt_update(m, -1) == 0) {
 		if (!RTE_MBUF_DIRECT(m))
-			return cnxk_pktmbuf_detach(m);
+			return cnxk_pktmbuf_detach(m, aura);
 
 		rte_mbuf_refcnt_set(m, 1);
 		m->next = NULL;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 11/14] net/cnxk: fix check cookies for multi-seg offload
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (8 preceding siblings ...)
  2024-02-26 13:35   ` [PATCH v5 10/14] net/cnxk: fix indirect mbuf handling in Tx path Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 12/14] common/cnxk: fix mbox struct attributes Nithin Dabilpuram
                     ` (2 subsequent siblings)
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, Rahul Bhansali, stable

From: Rahul Bhansali <rbhansali@marvell.com>

Fix missing check cookies with multi-seg offload case

Fixes: 3626d5195d49 ("net/cnxk: add multi-segment Tx for CN10K")
Cc: stable@dpdk.org

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 21 ++++++++++++++++++++-
 1 file changed, 20 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 025eff2913..84d71d0818 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1867,6 +1867,9 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 	len -= dlen;
 	sg_u = sg_u | ((uint64_t)dlen);
 
+	/* Mark mempool object as "put" since it is freed by NIX */
+	RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
 	m->next = NULL;
@@ -1892,6 +1895,9 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 			slist++;
 		}
 		m->next = NULL;
+		/* Mark mempool object as "put" since it is freed by NIX */
+		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+
 		m = m_next;
 	} while (nb_segs);
 
@@ -1915,8 +1921,11 @@ cn10k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0,
 	union nix_send_hdr_w0_u sh;
 	union nix_send_sg_s sg;
 
-	if (m->nb_segs == 1)
+	if (m->nb_segs == 1) {
+		/* Mark mempool object as "put" since it is freed by NIX */
+		RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
 		return;
+	}
 
 	sh.u = vgetq_lane_u64(cmd0[0], 0);
 	sg.u = vgetq_lane_u64(cmd1[0], 0);
@@ -1976,6 +1985,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq,
 			*data128 |= ((__uint128_t)7) << *shift;
 			*shift += 3;
 
+			/* Mark mempool object as "put" since it is freed by NIX */
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[0]->pool, (void **)&mbufs[0], 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[1]->pool, (void **)&mbufs[1], 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[2]->pool, (void **)&mbufs[2], 1, 0);
+			RTE_MEMPOOL_CHECK_COOKIES(mbufs[3]->pool, (void **)&mbufs[3], 1, 0);
 			return 1;
 		}
 	}
@@ -1994,6 +2008,11 @@ cn10k_nix_prep_lmt_mseg_vector(struct cn10k_eth_txq *txq,
 				vst1q_u64(lmt_addr + 10, cmd2[j + 1]);
 				vst1q_u64(lmt_addr + 12, cmd1[j + 1]);
 				vst1q_u64(lmt_addr + 14, cmd3[j + 1]);
+
+				/* Mark mempool object as "put" since it is freed by NIX */
+				RTE_MEMPOOL_CHECK_COOKIES(mbufs[j]->pool, (void **)&mbufs[j], 1, 0);
+				RTE_MEMPOOL_CHECK_COOKIES(mbufs[j + 1]->pool,
+							  (void **)&mbufs[j + 1], 1, 0);
 			} else if (flags & NIX_TX_NEED_EXT_HDR) {
 				/* EXT header take 3 each, space for 2 segs.*/
 				cn10k_nix_prepare_mseg_vec(mbufs[j],
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 12/14] common/cnxk: fix mbox struct attributes
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (9 preceding siblings ...)
  2024-02-26 13:35   ` [PATCH v5 11/14] net/cnxk: fix check cookies for multi-seg offload Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 13/14] common/cnxk: use SQ enable and disable API Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 14/14] net/cnxk: fix mbuf fields in multi-seg Tx path Nithin Dabilpuram
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev, stable

IO attribute is needed to mbox structs to avoid unaligned or pair
access causing by compiler optimization. Add them to structs
where it is missing.

Fixes: 503b82de2cbf ("common/cnxk: add mbox request and response definitions")
Fixes: ddf955d3917e ("common/cnxk: support CPT second pass")
Cc: stable@dpdk.org

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_mbox.h | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 4b4f48e372..d8a8494ac4 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -1427,12 +1427,12 @@ struct nix_cn10k_aq_enq_req {
 struct nix_cn10k_aq_enq_rsp {
 	struct mbox_msghdr hdr;
 	union {
-		struct nix_cn10k_rq_ctx_s rq;
-		struct nix_cn10k_sq_ctx_s sq;
-		struct nix_cq_ctx_s cq;
-		struct nix_rsse_s rss;
-		struct nix_rx_mce_s mce;
-		struct nix_band_prof_s prof;
+		__io struct nix_cn10k_rq_ctx_s rq;
+		__io struct nix_cn10k_sq_ctx_s sq;
+		__io struct nix_cq_ctx_s cq;
+		__io struct nix_rsse_s rss;
+		__io struct nix_rx_mce_s mce;
+		__io struct nix_band_prof_s prof;
 	};
 };
 
@@ -1668,11 +1668,11 @@ struct nix_rq_cpt_field_mask_cfg_req {
 #define RQ_CTX_MASK_MAX 6
 	union {
 		uint64_t __io rq_ctx_word_set[RQ_CTX_MASK_MAX];
-		struct nix_cn10k_rq_ctx_s rq_set;
+		__io struct nix_cn10k_rq_ctx_s rq_set;
 	};
 	union {
 		uint64_t __io rq_ctx_word_mask[RQ_CTX_MASK_MAX];
-		struct nix_cn10k_rq_ctx_s rq_mask;
+		__io struct nix_cn10k_rq_ctx_s rq_mask;
 	};
 	struct nix_lf_rx_ipec_cfg1_req {
 		uint32_t __io spb_cpt_aura;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 13/14] common/cnxk: use SQ enable and disable API
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (10 preceding siblings ...)
  2024-02-26 13:35   ` [PATCH v5 12/14] common/cnxk: fix mbox struct attributes Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-26 13:35   ` [PATCH v5 14/14] net/cnxk: fix mbuf fields in multi-seg Tx path Nithin Dabilpuram
  12 siblings, 0 replies; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

Use SQ enable and disable API in TM setup to affect
the state change. This is needed since now SQ state
helps in avoiding multiple SQ enables / SQ disables
if the SQ is already in that state. Otherwise multiple
mbox messages slows down bootup / teardown.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/roc_nix_tm_ops.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 900b182c76..9f3870a311 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -495,7 +495,7 @@ roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
 		if (!sq)
 			continue;
 
-		rc = roc_nix_tm_sq_aura_fc(sq, false);
+		rc = roc_nix_sq_ena_dis(sq, false);
 		if (rc) {
 			plt_err("Failed to disable sqb aura fc, rc=%d", rc);
 			goto cleanup;
@@ -607,7 +607,7 @@ roc_nix_tm_hierarchy_xmit_enable(struct roc_nix *roc_nix, enum roc_nix_tm_tree t
 		sq_id = node->id;
 		sq = nix->sqs[sq_id];
 
-		rc = roc_nix_tm_sq_aura_fc(sq, true);
+		rc = roc_nix_sq_ena_dis(sq, true);
 		if (rc) {
 			plt_err("TM sw xon failed on SQ %u, rc=%d", node->id,
 				rc);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* [PATCH v5 14/14] net/cnxk: fix mbuf fields in multi-seg Tx path
  2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
                     ` (11 preceding siblings ...)
  2024-02-26 13:35   ` [PATCH v5 13/14] common/cnxk: use SQ enable and disable API Nithin Dabilpuram
@ 2024-02-26 13:35   ` Nithin Dabilpuram
  2024-02-29 18:23     ` Jerin Jacob
  12 siblings, 1 reply; 43+ messages in thread
From: Nithin Dabilpuram @ 2024-02-26 13:35 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, Rahul Bhansali

From: Rahul Bhansali <rbhansali@marvell.com>

Currently in debug mode when a buffer is allocated in SW,
nb_segs will have invalid values as it didn't come from driver
Rx path. Hence reset mbuf next and nb_segs fields in multi-seg Tx path.

Fixes: 3626d5195d49 ("net/cnxk: add multi-segment Tx for CN10K")

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h |  2 ++
 drivers/net/cnxk/cn9k_tx.h  | 20 ++++++++++++++++++++
 2 files changed, 22 insertions(+)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 84d71d0818..cc480d24e8 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1328,6 +1328,7 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
 	m->next = NULL;
+	m->nb_segs = 1;
 	slist = &cmd[3 + off + 1];
 
 	cookie = RTE_MBUF_DIRECT(m) ? m : rte_mbuf_from_indirect(m);
@@ -1873,6 +1874,7 @@ cn10k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd,
 	nb_segs = m->nb_segs - 1;
 	m_next = m->next;
 	m->next = NULL;
+	m->nb_segs = 1;
 	m = m_next;
 	/* Fill mbuf segments */
 	do {
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index 3596651cc2..94acbe64fa 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -647,6 +647,10 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 	rte_io_wmb();
 #else
 	RTE_SET_USED(cookie);
+#endif
+#ifdef RTE_ENABLE_ASSERT
+	m->next = NULL;
+	m->nb_segs = 1;
 #endif
 	m = m_next;
 	if (!m)
@@ -683,6 +687,9 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 			sg_u = sg->u;
 			slist++;
 		}
+#ifdef RTE_ENABLE_ASSERT
+		m->next = NULL;
+#endif
 		m = m_next;
 	} while (nb_segs);
 
@@ -696,6 +703,9 @@ cn9k_nix_prepare_mseg(struct cn9k_eth_txq *txq,
 	segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
 	send_hdr->w0.sizem1 = segdw - 1;
 
+#ifdef RTE_ENABLE_ASSERT
+	rte_io_wmb();
+#endif
 	return segdw;
 }
 
@@ -912,6 +922,10 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 	RTE_SET_USED(cookie);
 #endif
 
+#ifdef RTE_ENABLE_ASSERT
+	m->next = NULL;
+	m->nb_segs = 1;
+#endif
 	m = m_next;
 	/* Fill mbuf segments */
 	do {
@@ -942,6 +956,9 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 			sg_u = sg->u;
 			slist++;
 		}
+#ifdef RTE_ENABLE_ASSERT
+		m->next = NULL;
+#endif
 		m = m_next;
 	} while (nb_segs);
 
@@ -957,6 +974,9 @@ cn9k_nix_prepare_mseg_vec_list(struct cn9k_eth_txq *txq,
 		 !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
 	send_hdr->w0.sizem1 = segdw - 1;
 
+#ifdef RTE_ENABLE_ASSERT
+	rte_io_wmb();
+#endif
 	return segdw;
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 43+ messages in thread

* Re: [PATCH v5 14/14] net/cnxk: fix mbuf fields in multi-seg Tx path
  2024-02-26 13:35   ` [PATCH v5 14/14] net/cnxk: fix mbuf fields in multi-seg Tx path Nithin Dabilpuram
@ 2024-02-29 18:23     ` Jerin Jacob
  0 siblings, 0 replies; 43+ messages in thread
From: Jerin Jacob @ 2024-02-29 18:23 UTC (permalink / raw)
  To: Nithin Dabilpuram
  Cc: Kiran Kumar K, Sunil Kumar Kori, Satha Rao, dev, Rahul Bhansali

On Mon, Feb 26, 2024 at 7:07 PM Nithin Dabilpuram
<ndabilpuram@marvell.com> wrote:
>
> From: Rahul Bhansali <rbhansali@marvell.com>
>
> Currently in debug mode when a buffer is allocated in SW,
> nb_segs will have invalid values as it didn't come from driver
> Rx path. Hence reset mbuf next and nb_segs fields in multi-seg Tx path.
>
> Fixes: 3626d5195d49 ("net/cnxk: add multi-segment Tx for CN10K")
>
> Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>


Series applied to dpdk-next-net-mrvl/for-main. Thanks

^ permalink raw reply	[flat|nested] 43+ messages in thread

end of thread, other threads:[~2024-02-29 18:23 UTC | newest]

Thread overview: 43+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-02-08  8:59 [PATCH 01/13] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 02/13] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 03/13] net/cnxk: add transport mode to security capability on cn9k Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 04/13] common/cnxk: dump selected SQ entries Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 05/13] net/cnxk: added Tx descriptor dump API Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 06/13] net/cnxk: fix issue with buff size compute Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 07/13] common/cnxk: skip setting Tx MTU separately Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 08/13] net/cnxk: fix max MTU limit Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 09/13] common/cnxk: update queue entries copy in RETA table Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 10/13] net/cnxk: fix indirect mbuf handling in Tx path Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 11/13] net/cnxk: fix check cookies for multi-seg offload Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 12/13] common/cnxk: add IO attribute to mbox structs Nithin Dabilpuram
2024-02-08  8:59 ` [PATCH 13/13] common/cnxk: use SQ enable and disable API Nithin Dabilpuram
2024-02-09  9:28   ` Jerin Jacob
2024-02-22 11:02 ` [PATCH v4 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 02/14] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 03/14] net/cnxk: add transport mode to security capability on cn9k Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 04/14] common/cnxk: dump selected SQ entries Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 05/14] net/cnxk: added Tx descriptor dump API Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 06/14] net/cnxk: fix issue with buff size compute Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 07/14] common/cnxk: skip setting Tx MTU separately Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 08/14] net/cnxk: fix max MTU limit Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 09/14] common/cnxk: update queue entries copy in RETA table Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 10/14] net/cnxk: fix indirect mbuf handling in Tx path Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 11/14] net/cnxk: fix check cookies for multi-seg offload Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 12/14] common/cnxk: add IO attribute to mbox structs Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 13/14] common/cnxk: use SQ enable and disable API Nithin Dabilpuram
2024-02-22 11:02   ` [PATCH v4 14/14] net/cnxk: reset mbuf fields in multi-seg Tx path Nithin Dabilpuram
2024-02-26 13:35 ` [PATCH v5 01/14] common/cnxk: remove cn9k Inline IPsec FP opcode defines Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 02/14] net/cnxk: add IPsec SA defines for PMD API Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 03/14] net/cnxk: add transport mode to security capability on cn9k Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 04/14] common/cnxk: dump selected SQ entries Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 05/14] net/cnxk: added Tx descriptor dump API Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 06/14] net/cnxk: fix issue with buff size compute Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 07/14] common/cnxk: fix Tx MTU configuration Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 08/14] net/cnxk: fix max MTU limit Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 09/14] common/cnxk: fix RETA table config API Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 10/14] net/cnxk: fix indirect mbuf handling in Tx path Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 11/14] net/cnxk: fix check cookies for multi-seg offload Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 12/14] common/cnxk: fix mbox struct attributes Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 13/14] common/cnxk: use SQ enable and disable API Nithin Dabilpuram
2024-02-26 13:35   ` [PATCH v5 14/14] net/cnxk: fix mbuf fields in multi-seg Tx path Nithin Dabilpuram
2024-02-29 18:23     ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).