DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 1/3] common/cnxk: enable packet marking
@ 2022-02-24  9:57 skoteshwar
  2022-02-24  9:57 ` [PATCH 2/3] net/cnxk: event/cnxk: enable packet marking callbacks skoteshwar
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: skoteshwar @ 2022-02-24  9:57 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

cnxk platforms supports packet marking when TM enabled with
valid shaper rates. VLAN DEI, IP ECN, or IP DSCP inside
packet will be updated based on mark flags selected.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/meson.build       |   1 +
 drivers/common/cnxk/roc_nix.h         |  21 +++
 drivers/common/cnxk/roc_nix_priv.h    |  23 ++-
 drivers/common/cnxk/roc_nix_tm.c      |   4 +
 drivers/common/cnxk/roc_nix_tm_mark.c | 295 ++++++++++++++++++++++++++++++++++
 drivers/common/cnxk/version.map       |   2 +
 6 files changed, 343 insertions(+), 3 deletions(-)
 create mode 100644 drivers/common/cnxk/roc_nix_tm_mark.c

diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 2834846..6f80827 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -44,6 +44,7 @@ sources = files(
         'roc_nix_rss.c',
         'roc_nix_stats.c',
         'roc_nix_tm.c',
+        'roc_nix_tm_mark.c',
         'roc_nix_tm_ops.c',
         'roc_nix_tm_utils.c',
         'roc_nix_vlan.c',
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 10e8375..5e6eb58 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -570,6 +570,22 @@ struct roc_nix_tm_node_stats {
 	uint64_t stats[ROC_NIX_TM_NODE_STATS_MAX];
 };
 
+enum roc_nix_tm_mark {
+	ROC_NIX_TM_MARK_VLAN_DEI,
+	ROC_NIX_TM_MARK_IPV4_DSCP,
+	ROC_NIX_TM_MARK_IPV4_ECN,
+	ROC_NIX_TM_MARK_IPV6_DSCP,
+	ROC_NIX_TM_MARK_IPV6_ECN,
+	ROC_NIX_TM_MARK_MAX
+};
+
+enum roc_nix_tm_mark_color {
+	ROC_NIX_TM_MARK_COLOR_Y,
+	ROC_NIX_TM_MARK_COLOR_R,
+	ROC_NIX_TM_MARK_COLOR_Y_R,
+	ROC_NIX_TM_MARK_COLOR_MAX
+};
+
 int __roc_api roc_nix_tm_node_add(struct roc_nix *roc_nix,
 				  struct roc_nix_tm_node *roc_node);
 int __roc_api roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
@@ -646,6 +662,11 @@ int __roc_api roc_nix_tm_node_name_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
+int __roc_api roc_nix_tm_mark_config(struct roc_nix *roc_nix,
+				     enum roc_nix_tm_mark type, int mark_yellow,
+				     int mark_red);
+uint64_t __roc_api roc_nix_tm_mark_format_get(struct roc_nix *roc_nix,
+					      uint64_t *flags);
 
 /* Ingress Policer API */
 int __roc_api roc_nix_bpf_timeunit_get(struct roc_nix *roc_nix,
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 2bc228c..d77c905 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -36,9 +36,22 @@ struct nix_qint {
 #define NIX_TM_CHAN_INVALID UINT16_MAX
 
 /* TM flags */
-#define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
-#define NIX_TM_TL1_NO_SP     BIT_ULL(1)
-#define NIX_TM_TL1_ACCESS    BIT_ULL(2)
+#define NIX_TM_HIERARCHY_ENA	BIT_ULL(0)
+#define NIX_TM_TL1_NO_SP	BIT_ULL(1)
+#define NIX_TM_TL1_ACCESS	BIT_ULL(2)
+#define NIX_TM_MARK_VLAN_DEI_EN BIT_ULL(3)
+#define NIX_TM_MARK_IP_DSCP_EN	BIT_ULL(4)
+#define NIX_TM_MARK_IP_ECN_EN	BIT_ULL(5)
+
+#define NIX_TM_MARK_EN_MASK                                                    \
+	(NIX_TM_MARK_IP_DSCP_EN | NIX_TM_MARK_IP_ECN_EN |                      \
+	 NIX_TM_MARK_VLAN_DEI_EN)
+
+#define NIX_TM_MARK_VLAN_DEI_SHIFT  0 /* Leave 16b for VLAN for FP logic */
+#define NIX_TM_MARK_IPV4_DSCP_SHIFT 16
+#define NIX_TM_MARK_IPV6_DSCP_SHIFT 24
+#define NIX_TM_MARK_IPV4_ECN_SHIFT  32
+#define NIX_TM_MARK_IPV6_ECN_SHIFT  40
 
 struct nix_tm_tb {
 	/** Token bucket rate (bytes per second) */
@@ -170,6 +183,9 @@ struct nix {
 	uint16_t tm_link_cfg_lvl;
 	uint16_t contig_rsvd[NIX_TXSCH_LVL_CNT];
 	uint16_t discontig_rsvd[NIX_TXSCH_LVL_CNT];
+	uint64_t tm_markfmt_en;
+	uint8_t tm_markfmt_null;
+	uint8_t tm_markfmt[ROC_NIX_TM_MARK_MAX][ROC_NIX_TM_MARK_COLOR_MAX];
 
 	/* Ipsec info */
 	uint16_t cpt_msixoff[MAX_RVU_BLKLF_CNT];
@@ -384,6 +400,7 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
 			 bool enable);
 void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
+int nix_tm_mark_init(struct nix *nix);
 
 /*
  * TM priv utils.
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index ecf3edf..5b23ecd 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -1692,6 +1692,10 @@
 		bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
 	}
 
+	rc = nix_tm_mark_init(nix);
+	if (rc)
+		goto exit;
+
 	/* Disable TL1 Static Priority when VF's are enabled
 	 * as otherwise VF's TL2 reallocation will be needed
 	 * runtime to support a specific topology of PF.
diff --git a/drivers/common/cnxk/roc_nix_tm_mark.c b/drivers/common/cnxk/roc_nix_tm_mark.c
new file mode 100644
index 0000000..64cf679
--- /dev/null
+++ b/drivers/common/cnxk/roc_nix_tm_mark.c
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+static const uint8_t y_mask_val[ROC_NIX_TM_MARK_MAX][2] = {
+	[ROC_NIX_TM_MARK_VLAN_DEI] = {0x0, 0x8},
+	[ROC_NIX_TM_MARK_IPV4_DSCP] = {0x1, 0x2},
+	[ROC_NIX_TM_MARK_IPV4_ECN] = {0x0, 0xc},
+	[ROC_NIX_TM_MARK_IPV6_DSCP] = {0x1, 0x2},
+	[ROC_NIX_TM_MARK_IPV6_ECN] = {0x0, 0x3},
+};
+
+static const uint8_t r_mask_val[ROC_NIX_TM_MARK_MAX][2] = {
+	[ROC_NIX_TM_MARK_VLAN_DEI] = {0x0, 0x8},
+	[ROC_NIX_TM_MARK_IPV4_DSCP] = {0x0, 0x3},
+	[ROC_NIX_TM_MARK_IPV4_ECN] = {0x0, 0xc},
+	[ROC_NIX_TM_MARK_IPV6_DSCP] = {0x0, 0x3},
+	[ROC_NIX_TM_MARK_IPV6_ECN] = {0x0, 0x3},
+};
+
+static const uint8_t mark_off[ROC_NIX_TM_MARK_MAX] = {
+	[ROC_NIX_TM_MARK_VLAN_DEI] = 0x3,  /* Byte 14 Bit[4:1] */
+	[ROC_NIX_TM_MARK_IPV4_DSCP] = 0x1, /* Byte 1 Bit[6:3] */
+	[ROC_NIX_TM_MARK_IPV4_ECN] = 0x6, /* Byte 1 Bit[1:0], Byte 2 Bit[7:6] */
+	[ROC_NIX_TM_MARK_IPV6_DSCP] = 0x5, /* Byte 0 Bit[2:0], Byte 1 Bit[7] */
+	[ROC_NIX_TM_MARK_IPV6_ECN] = 0x0,  /* Byte 1 Bit[7:4] */
+};
+
+static const uint64_t mark_flag[ROC_NIX_TM_MARK_MAX] = {
+	[ROC_NIX_TM_MARK_VLAN_DEI] = NIX_TM_MARK_VLAN_DEI_EN,
+	[ROC_NIX_TM_MARK_IPV4_DSCP] = NIX_TM_MARK_IP_DSCP_EN,
+	[ROC_NIX_TM_MARK_IPV4_ECN] = NIX_TM_MARK_IP_ECN_EN,
+	[ROC_NIX_TM_MARK_IPV6_DSCP] = NIX_TM_MARK_IP_DSCP_EN,
+	[ROC_NIX_TM_MARK_IPV6_ECN] = NIX_TM_MARK_IP_ECN_EN,
+};
+
+static uint8_t
+prepare_tm_shaper_red_algo(struct nix_tm_node *tm_node, volatile uint64_t *reg,
+			   volatile uint64_t *regval,
+			   volatile uint64_t *regval_mask)
+{
+	uint32_t schq = tm_node->hw_id;
+	uint8_t k = 0;
+
+	plt_tm_dbg("Shaper read alg node %s(%u) lvl %u id %u, red_alg %x (%p)",
+		   nix_tm_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
+		   tm_node->id, tm_node->red_algo, tm_node);
+
+	/* Configure just RED algo */
+	regval[k] = ((uint64_t)tm_node->red_algo << 9);
+	regval_mask[k] = ~(BIT_ULL(10) | BIT_ULL(9));
+
+	switch (tm_node->hw_lvl) {
+	case NIX_TXSCH_LVL_SMQ:
+		reg[k] = NIX_AF_MDQX_SHAPE(schq);
+		k++;
+		break;
+	case NIX_TXSCH_LVL_TL4:
+		reg[k] = NIX_AF_TL4X_SHAPE(schq);
+		k++;
+		break;
+	case NIX_TXSCH_LVL_TL3:
+		reg[k] = NIX_AF_TL3X_SHAPE(schq);
+		k++;
+		break;
+	case NIX_TXSCH_LVL_TL2:
+		reg[k] = NIX_AF_TL2X_SHAPE(schq);
+		k++;
+		break;
+	default:
+		break;
+	}
+
+	return k;
+}
+
+/* Only called while device is stopped */
+static int
+nix_tm_update_red_algo(struct nix *nix, bool red_send)
+{
+	struct mbox *mbox = (&nix->dev)->mbox;
+	struct nix_txschq_config *req;
+	struct nix_tm_node_list *list;
+	struct nix_tm_node *tm_node;
+	uint8_t k;
+	int rc;
+
+	list = nix_tm_node_list(nix, nix->tm_tree);
+	TAILQ_FOREACH(tm_node, list, node) {
+		/* Skip leaf nodes */
+		if (nix_tm_is_leaf(nix, tm_node->lvl))
+			continue;
+
+		if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1)
+			continue;
+
+		/* Skip if no update of red_algo is needed */
+		if ((red_send && (tm_node->red_algo == NIX_REDALG_SEND)) ||
+		    (!red_send && (tm_node->red_algo != NIX_REDALG_SEND)))
+			continue;
+
+		/* Update Red algo */
+		if (red_send)
+			tm_node->red_algo = NIX_REDALG_SEND;
+		else
+			tm_node->red_algo = NIX_REDALG_STD;
+
+		/* Update txschq config  */
+		req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+		req->lvl = tm_node->hw_lvl;
+		k = prepare_tm_shaper_red_algo(tm_node, req->reg, req->regval,
+					       req->regval_mask);
+		req->num_regs = k;
+
+		rc = mbox_process(mbox);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+/* Return's true if queue reconfig is needed */
+static bool
+nix_tm_update_markfmt(struct nix *nix, enum roc_nix_tm_mark type,
+		      int mark_yellow, int mark_red)
+{
+	uint64_t new_markfmt, old_markfmt;
+	uint8_t *tm_markfmt;
+	uint8_t en_shift;
+	uint64_t mask;
+
+	if (type >= ROC_NIX_TM_MARK_MAX)
+		return false;
+
+	/* Pre-allocated mark formats for type:color combinations */
+	tm_markfmt = nix->tm_markfmt[type];
+
+	if (!mark_yellow && !mark_red) {
+		/* Null format to disable */
+		new_markfmt = nix->tm_markfmt_null;
+	} else {
+		/* Marking enabled with combination of yellow and red */
+		if (mark_yellow && mark_red)
+			new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_Y_R];
+		else if (mark_yellow)
+			new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_Y];
+		else
+			new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_R];
+	}
+
+	mask = 0xFFull;
+	/* Format of fast path markfmt
+	 * ipv6_ecn[8]:ipv4_ecn[8]:ipv6_dscp[8]:ipv4_dscp[8]:vlan_dei[16]
+	 * fmt[7] = ptr offset for IPv4/IPv6 on l2_len.
+	 * fmt[6:0] = markfmt idx.
+	 */
+	switch (type) {
+	case ROC_NIX_TM_MARK_VLAN_DEI:
+		en_shift = NIX_TM_MARK_VLAN_DEI_SHIFT;
+		mask = 0xFFFFull;
+		new_markfmt |= new_markfmt << 8;
+		break;
+	case ROC_NIX_TM_MARK_IPV4_DSCP:
+		new_markfmt |= BIT_ULL(7);
+		en_shift = NIX_TM_MARK_IPV4_DSCP_SHIFT;
+		break;
+	case ROC_NIX_TM_MARK_IPV4_ECN:
+		new_markfmt |= BIT_ULL(7);
+		en_shift = NIX_TM_MARK_IPV4_ECN_SHIFT;
+		break;
+	case ROC_NIX_TM_MARK_IPV6_DSCP:
+		en_shift = NIX_TM_MARK_IPV6_DSCP_SHIFT;
+		break;
+	case ROC_NIX_TM_MARK_IPV6_ECN:
+		new_markfmt |= BIT_ULL(7);
+		en_shift = NIX_TM_MARK_IPV6_ECN_SHIFT;
+		break;
+	default:
+		return false;
+	}
+
+	/* Skip if same as old config */
+	old_markfmt = (nix->tm_markfmt_en >> en_shift) & mask;
+	if (old_markfmt == new_markfmt)
+		return false;
+
+	/* Need queue reconfig */
+	nix->tm_markfmt_en &= ~(mask << en_shift);
+	nix->tm_markfmt_en |= (new_markfmt << en_shift);
+
+	return true;
+}
+
+int
+nix_tm_mark_init(struct nix *nix)
+{
+	struct mbox *mbox = (&nix->dev)->mbox;
+	struct nix_mark_format_cfg_rsp *rsp;
+	struct nix_mark_format_cfg *req;
+	int rc, i, j;
+
+	/* Check for supported revisions */
+	if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
+		return 0;
+
+	/* Null mark format */
+	req = mbox_alloc_msg_nix_mark_format_cfg(mbox);
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc) {
+		plt_err("TM failed to alloc null mark format, rc=%d", rc);
+		goto exit;
+	}
+
+	nix->tm_markfmt_null = rsp->mark_format_idx;
+
+	/* Alloc vlan, dscp, ecn mark formats */
+	for (i = 0; i < ROC_NIX_TM_MARK_MAX; i++) {
+		for (j = 0; j < ROC_NIX_TM_MARK_COLOR_MAX; j++) {
+			req = mbox_alloc_msg_nix_mark_format_cfg(mbox);
+			req->offset = mark_off[i];
+
+			switch (j) {
+			case ROC_NIX_TM_MARK_COLOR_Y:
+				req->y_mask = y_mask_val[i][0];
+				req->y_val = y_mask_val[i][1];
+				break;
+			case ROC_NIX_TM_MARK_COLOR_R:
+				req->r_mask = r_mask_val[i][0];
+				req->r_val = r_mask_val[i][1];
+				break;
+			case ROC_NIX_TM_MARK_COLOR_Y_R:
+				req->y_mask = y_mask_val[i][0];
+				req->y_val = y_mask_val[i][1];
+				req->r_mask = r_mask_val[i][0];
+				req->r_val = r_mask_val[i][1];
+				break;
+			}
+
+			rc = mbox_process_msg(mbox, (void *)&rsp);
+			if (rc) {
+				plt_err("TM failed to alloc mark fmt "
+					"type %u color %u, rc=%d",
+					i, j, rc);
+				goto exit;
+			}
+
+			nix->tm_markfmt[i][j] = rsp->mark_format_idx;
+			plt_tm_dbg("Mark type: %u, Mark Color:%u, id:%u\n", i,
+				   j, nix->tm_markfmt[i][j]);
+		}
+	}
+	/* Update null mark format as default */
+	nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_VLAN_DEI, 0, 0);
+	nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV4_DSCP, 0, 0);
+	nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV4_ECN, 0, 0);
+	nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV6_DSCP, 0, 0);
+	nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV6_ECN, 0, 0);
+exit:
+	return rc;
+}
+
+int
+roc_nix_tm_mark_config(struct roc_nix *roc_nix, enum roc_nix_tm_mark type,
+		       int mark_yellow, int mark_red)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	int rc;
+
+	if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
+		return -EINVAL;
+
+	rc = nix_tm_update_markfmt(nix, type, mark_yellow, mark_red);
+	if (!rc)
+		return 0;
+
+	if (!mark_yellow && !mark_red)
+		nix->tm_flags &= ~mark_flag[type];
+	else
+		nix->tm_flags |= mark_flag[type];
+
+	/* Update red algo for change in mark_red */
+	return nix_tm_update_red_algo(nix, !!mark_red);
+}
+
+uint64_t
+roc_nix_tm_mark_format_get(struct roc_nix *roc_nix, uint64_t *flags)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	*flags = ((nix->tm_flags & NIX_TM_MARK_EN_MASK) >> 3);
+	return nix->tm_markfmt_en;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 21f94e8..a41dc26 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -253,6 +253,8 @@ INTERNAL {
 	roc_nix_tm_leaf_cnt;
 	roc_nix_tm_lvl_have_link_access;
 	roc_nix_tm_lvl_is_leaf;
+	roc_nix_tm_mark_config;
+	roc_nix_tm_mark_format_get;
 	roc_nix_tm_max_prio;
 	roc_nix_tm_node_add;
 	roc_nix_tm_node_delete;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 2/3] net/cnxk: event/cnxk: enable packet marking callbacks
  2022-02-24  9:57 [PATCH 1/3] common/cnxk: enable packet marking skoteshwar
@ 2022-02-24  9:57 ` skoteshwar
  2022-02-24  9:57 ` [PATCH 3/3] common/cnxk: check SQ node before setting bp config skoteshwar
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 8+ messages in thread
From: skoteshwar @ 2022-02-24  9:57 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Nithin Dabilpuram,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

cnxk platform supports red/yellow packet marking based on TM
configuration. This patch set hooks to enable/disable packet
marking for VLAN DEI, IP DSCP and IP ECN. Marking enabled only
in scalar mode.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/event/cnxk/cn10k_worker.h  |   3 +-
 drivers/event/cnxk/cn9k_worker.h   |   3 +-
 drivers/net/cnxk/cn10k_ethdev.c    | 138 +++++++++++++++++++++++++++++++++++++
 drivers/net/cnxk/cn10k_ethdev.h    |   2 +
 drivers/net/cnxk/cn10k_tx.h        |  45 +++++++++++-
 drivers/net/cnxk/cn10k_tx_select.c |   2 +-
 drivers/net/cnxk/cn9k_ethdev.c     | 137 ++++++++++++++++++++++++++++++++++++
 drivers/net/cnxk/cn9k_ethdev.h     |   2 +
 drivers/net/cnxk/cn9k_tx.c         |   2 +-
 drivers/net/cnxk/cn9k_tx.h         |  48 +++++++++++--
 drivers/net/cnxk/cn9k_tx_select.c  |   2 +-
 drivers/net/cnxk/cnxk_ethdev.h     |  23 +++++++
 drivers/net/cnxk/cnxk_tm.c         | 125 +++++++++++++++++++++++++++++++--
 13 files changed, 514 insertions(+), 18 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index e8255ea..0a41a2e 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -499,7 +499,8 @@ uint16_t __rte_hot cn10k_sso_hws_ca_enq(void *port, struct rte_event ev[],
 	if (flags & NIX_TX_OFFLOAD_TSO_F)
 		cn10k_nix_xmit_prepare_tso(m, flags);
 
-	cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec);
+	cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec,
+			       txq->mark_flag, txq->mark_fmt);
 
 	laddr = lmt_addr;
 	/* Prepare CPT instruction and get nixtx addr if
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 2107ff7..24ce05b 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -762,7 +762,8 @@ uint16_t __rte_hot cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[],
 		rte_io_wmb();
 	txq = cn9k_sso_hws_xtract_meta(m, txq_data);
 	cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
-	cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
+	cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag,
+			      txq->mark_fmt);
 
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
 		uint64_t ol_flags = m->ol_flags;
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index c6890f3..24193dc 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -110,6 +110,9 @@
 	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
+	if (dev->tx_mark)
+		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+
 	return flags;
 }
 
@@ -169,6 +172,7 @@
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	struct roc_nix *nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
 	struct roc_cpt_lf *inl_lf;
 	struct cn10k_eth_txq *txq;
 	struct roc_nix_sq *sq;
@@ -206,6 +210,11 @@
 		PLT_STATIC_ASSERT(ROC_NIX_INL_SA_BASE_ALIGN == BIT_ULL(16));
 	}
 
+	/* Restore marking flag from roc */
+	mark_fmt = roc_nix_tm_mark_format_get(nix, &mark_flag);
+	txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+	txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+
 	nix_form_default_desc(dev, txq, qid);
 	txq->lso_tun_fmt = dev->lso_tun_fmt;
 	return 0;
@@ -478,6 +487,118 @@
 	return 0;
 }
 
+static int
+cn10k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+			   int mark_yellow, int mark_red,
+			   struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
+				       mark_red, error);
+
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn10k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
+static int
+cn10k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+			 int mark_yellow, int mark_red,
+			 struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
+				     error);
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn10k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
+static int
+cn10k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+			  int mark_yellow, int mark_red,
+			  struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
+				      mark_red, error);
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn10k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
 /* Update platform specific eth dev ops */
 static void
 nix_eth_dev_ops_override(void)
@@ -501,6 +622,22 @@
 		cn10k_nix_rx_metadata_negotiate;
 }
 
+/* Update platform specific tm ops */
+static void
+nix_tm_ops_override(void)
+{
+	static int init_once;
+
+	if (init_once)
+		return;
+	init_once = 1;
+
+	/* Update platform specific ops */
+	cnxk_tm_ops.mark_vlan_dei = cn10k_nix_tm_mark_vlan_dei;
+	cnxk_tm_ops.mark_ip_ecn = cn10k_nix_tm_mark_ip_ecn;
+	cnxk_tm_ops.mark_ip_dscp = cn10k_nix_tm_mark_ip_dscp;
+}
+
 static void
 npc_flow_ops_override(void)
 {
@@ -540,6 +677,7 @@
 	}
 
 	nix_eth_dev_ops_override();
+	nix_tm_ops_override();
 	npc_flow_ops_override();
 
 	cn10k_eth_sec_ops_override();
diff --git a/drivers/net/cnxk/cn10k_ethdev.h b/drivers/net/cnxk/cn10k_ethdev.h
index fd72730..a3447a0 100644
--- a/drivers/net/cnxk/cn10k_ethdev.h
+++ b/drivers/net/cnxk/cn10k_ethdev.h
@@ -21,6 +21,8 @@ struct cn10k_eth_txq {
 	uint16_t cpt_desc;
 	uint64_t lso_tun_fmt;
 	uint64_t ts_mem;
+	uint64_t mark_flag : 8;
+	uint64_t mark_fmt : 48;
 } __plt_cache_aligned;
 
 struct cn10k_eth_rxq {
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index ec63661..542b64d 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -511,13 +511,16 @@
 
 static __rte_always_inline void
 cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
-		       const uint64_t lso_tun_fmt, bool *sec)
+		       const uint64_t lso_tun_fmt, bool *sec, uint8_t mark_flag,
+		       uint64_t mark_fmt)
 {
+	uint8_t mark_off = 0, mark_vlan = 0, markptr = 0;
 	struct nix_send_ext_s *send_hdr_ext;
 	struct nix_send_hdr_s *send_hdr;
 	uint64_t ol_flags = 0, mask;
 	union nix_send_hdr_w1_u w1;
 	union nix_send_sg_s *sg;
+	uint16_t mark_form = 0;
 
 	send_hdr = (struct nix_send_hdr_s *)cmd;
 	if (flags & NIX_TX_NEED_EXT_HDR) {
@@ -525,7 +528,9 @@
 		sg = (union nix_send_sg_s *)(cmd + 4);
 		/* Clear previous markings */
 		send_hdr_ext->w0.lso = 0;
+		send_hdr_ext->w0.mark_en = 0;
 		send_hdr_ext->w1.u = 0;
+		ol_flags = m->ol_flags;
 	} else {
 		sg = (union nix_send_sg_s *)(cmd + 2);
 	}
@@ -621,6 +626,10 @@
 	}
 
 	if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		const uint8_t ipv6 = !!(ol_flags & RTE_MBUF_F_TX_IPV6);
+		const uint8_t ip = !!(ol_flags & (RTE_MBUF_F_TX_IPV4 |
+						  RTE_MBUF_F_TX_IPV6));
+
 		send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
 		/* HW will update ptr after vlan0 update */
 		send_hdr_ext->w1.vlan1_ins_ptr = 12;
@@ -630,6 +639,22 @@
 		/* 2B before end of l2 header */
 		send_hdr_ext->w1.vlan0_ins_ptr = 12;
 		send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
+		/* Fill for VLAN marking only when VLAN insertion enabled */
+		mark_vlan = ((mark_flag & CNXK_TM_MARK_VLAN_DEI) &
+			     (send_hdr_ext->w1.vlan1_ins_ena ||
+			      send_hdr_ext->w1.vlan0_ins_ena));
+
+		/* Mask requested flags with packet data information */
+		mark_off = mark_flag & ((ip << 2) | (ip << 1) | mark_vlan);
+		mark_off = ffs(mark_off & CNXK_TM_MARK_MASK);
+
+		mark_form = (mark_fmt >> ((mark_off - !!mark_off) << 4));
+		mark_form = (mark_form >> (ipv6 << 3)) & 0xFF;
+		markptr = m->l2_len + (mark_form >> 7) - (mark_vlan << 2);
+
+		send_hdr_ext->w0.mark_en = !!mark_off;
+		send_hdr_ext->w0.markform = mark_form & 0x7F;
+		send_hdr_ext->w0.markptr = markptr;
 	}
 
 	if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
@@ -841,6 +866,8 @@
 	uintptr_t pa, lbase = txq->lmt_base;
 	uint16_t lmt_id, burst, left, i;
 	uintptr_t c_lbase = lbase;
+	uint64_t mark_fmt = 0;
+	uint8_t mark_flag = 0;
 	rte_iova_t c_io_addr;
 	uint64_t lso_tun_fmt;
 	uint16_t c_lmt_id;
@@ -860,6 +887,11 @@
 	if (flags & NIX_TX_OFFLOAD_TSO_F)
 		lso_tun_fmt = txq->lso_tun_fmt;
 
+	if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		mark_fmt = txq->mark_fmt;
+		mark_flag = txq->mark_flag;
+	}
+
 	/* Get LMT base address and LMT ID as lcore id */
 	ROC_LMT_BASE_ID_GET(lbase, lmt_id);
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
@@ -887,7 +919,7 @@
 			cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
 
 		cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
-				       &sec);
+				       &sec, mark_flag, mark_fmt);
 
 		laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
 
@@ -967,6 +999,8 @@
 	uint16_t segdw, lmt_id, burst, left, i;
 	uint8_t lnum, c_lnum, c_loff;
 	uintptr_t c_lbase = lbase;
+	uint64_t mark_fmt = 0;
+	uint8_t mark_flag = 0;
 	uint64_t data0, data1;
 	rte_iova_t c_io_addr;
 	uint64_t lso_tun_fmt;
@@ -988,6 +1022,11 @@
 	if (flags & NIX_TX_OFFLOAD_TSO_F)
 		lso_tun_fmt = txq->lso_tun_fmt;
 
+	if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		mark_fmt = txq->mark_fmt;
+		mark_flag = txq->mark_flag;
+	}
+
 	/* Get LMT base address and LMT ID as lcore id */
 	ROC_LMT_BASE_ID_GET(lbase, lmt_id);
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
@@ -1017,7 +1056,7 @@
 			cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
 
 		cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
-				       &sec);
+				       &sec, mark_flag, mark_fmt);
 
 		laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
 
diff --git a/drivers/net/cnxk/cn10k_tx_select.c b/drivers/net/cnxk/cn10k_tx_select.c
index 9fdf014..54023c4 100644
--- a/drivers/net/cnxk/cn10k_tx_select.c
+++ b/drivers/net/cnxk/cn10k_tx_select.c
@@ -53,7 +53,7 @@
 #undef T
 	};
 
-	if (dev->scalar_ena) {
+	if (dev->scalar_ena || dev->tx_mark) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
 		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index d81f9ac..9db66c6 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -110,6 +110,9 @@
 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
+	if (dev->tx_mark)
+		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+
 	return flags;
 }
 
@@ -168,6 +171,7 @@
 			const struct rte_eth_txconf *tx_conf)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	uint64_t mark_fmt, mark_flag;
 	struct roc_cpt_lf *inl_lf;
 	struct cn9k_eth_txq *txq;
 	struct roc_nix_sq *sq;
@@ -204,6 +208,10 @@
 		PLT_STATIC_ASSERT(BIT_ULL(16) == ROC_NIX_INL_SA_BASE_ALIGN);
 	}
 
+	mark_fmt = roc_nix_tm_mark_format_get(&dev->nix, &mark_flag);
+	txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+	txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+
 	nix_form_default_desc(dev, txq, qid);
 	txq->lso_tun_fmt = dev->lso_tun_fmt;
 	return 0;
@@ -471,6 +479,118 @@
 	return 0;
 }
 
+static int
+cn9k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+			  int mark_yellow, int mark_red,
+			  struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
+				       mark_red, error);
+
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn9k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
+static int
+cn9k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+			int mark_yellow, int mark_red,
+			struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
+				     error);
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn9k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
+static int
+cn9k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+			 int mark_yellow, int mark_red,
+			 struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
+				      mark_red, error);
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn9k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
 /* Update platform specific eth dev ops */
 static void
 nix_eth_dev_ops_override(void)
@@ -494,6 +614,22 @@
 	cnxk_eth_dev_ops.rx_metadata_negotiate = cn9k_nix_rx_metadata_negotiate;
 }
 
+/* Update platform specific eth dev ops */
+static void
+nix_tm_ops_override(void)
+{
+	static int init_once;
+
+	if (init_once)
+		return;
+	init_once = 1;
+
+	/* Update platform specific ops */
+	cnxk_tm_ops.mark_vlan_dei = cn9k_nix_tm_mark_vlan_dei;
+	cnxk_tm_ops.mark_ip_ecn = cn9k_nix_tm_mark_ip_ecn;
+	cnxk_tm_ops.mark_ip_dscp = cn9k_nix_tm_mark_ip_dscp;
+}
+
 static void
 npc_flow_ops_override(void)
 {
@@ -533,6 +669,7 @@
 	}
 
 	nix_eth_dev_ops_override();
+	nix_tm_ops_override();
 	npc_flow_ops_override();
 
 	cn9k_eth_sec_ops_override();
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index 8ab9249..449729f 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -22,6 +22,8 @@ struct cn9k_eth_txq {
 	uint64_t sa_base;
 	uint64_t *cpt_fc;
 	uint16_t cpt_desc;
+	uint64_t mark_flag : 8;
+	uint64_t mark_fmt : 48;
 } __plt_cache_aligned;
 
 struct cn9k_eth_rxq {
diff --git a/drivers/net/cnxk/cn9k_tx.c b/drivers/net/cnxk/cn9k_tx.c
index f99e5d3..b6d224e 100644
--- a/drivers/net/cnxk/cn9k_tx.c
+++ b/drivers/net/cnxk/cn9k_tx.c
@@ -79,7 +79,7 @@
 #undef T
 	};
 
-	if (dev->scalar_ena) {
+	if (dev->scalar_ena || dev->tx_mark) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
 		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index d23e4b6..f55cd4b 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -135,13 +135,16 @@
 
 static __rte_always_inline void
 cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
-		      const uint64_t lso_tun_fmt)
+		      const uint64_t lso_tun_fmt, uint8_t mark_flag,
+		      uint64_t mark_fmt)
 {
+	uint8_t mark_off = 0, mark_vlan = 0, markptr = 0;
 	struct nix_send_ext_s *send_hdr_ext;
 	struct nix_send_hdr_s *send_hdr;
 	uint64_t ol_flags = 0, mask;
 	union nix_send_hdr_w1_u w1;
 	union nix_send_sg_s *sg;
+	uint16_t mark_form = 0;
 
 	send_hdr = (struct nix_send_hdr_s *)cmd;
 	if (flags & NIX_TX_NEED_EXT_HDR) {
@@ -149,7 +152,9 @@
 		sg = (union nix_send_sg_s *)(cmd + 4);
 		/* Clear previous markings */
 		send_hdr_ext->w0.lso = 0;
+		send_hdr_ext->w0.mark_en = 0;
 		send_hdr_ext->w1.u = 0;
+		ol_flags = m->ol_flags;
 	} else {
 		sg = (union nix_send_sg_s *)(cmd + 2);
 	}
@@ -245,6 +250,10 @@
 	}
 
 	if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		const uint8_t ipv6 = !!(ol_flags & RTE_MBUF_F_TX_IPV6);
+		const uint8_t ip = !!(ol_flags & (RTE_MBUF_F_TX_IPV4 |
+						  RTE_MBUF_F_TX_IPV6));
+
 		send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
 		/* HW will update ptr after vlan0 update */
 		send_hdr_ext->w1.vlan1_ins_ptr = 12;
@@ -254,6 +263,21 @@
 		/* 2B before end of l2 header */
 		send_hdr_ext->w1.vlan0_ins_ptr = 12;
 		send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
+		/* Fill for VLAN marking only when VLAN insertion enabled */
+		mark_vlan = ((mark_flag & CNXK_TM_MARK_VLAN_DEI) &
+			     (send_hdr_ext->w1.vlan1_ins_ena ||
+			      send_hdr_ext->w1.vlan0_ins_ena));
+		/* Mask requested flags with packet data information */
+		mark_off = mark_flag & ((ip << 2) | (ip << 1) | mark_vlan);
+		mark_off = ffs(mark_off & CNXK_TM_MARK_MASK);
+
+		mark_form = (mark_fmt >> ((mark_off - !!mark_off) << 4));
+		mark_form = (mark_form >> (ipv6 << 3)) & 0xFF;
+		markptr = m->l2_len + (mark_form >> 7) - (mark_vlan << 2);
+
+		send_hdr_ext->w0.mark_en = !!mark_off;
+		send_hdr_ext->w0.markform = mark_form & 0x7F;
+		send_hdr_ext->w0.markptr = markptr;
 	}
 
 	if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
@@ -502,8 +526,9 @@
 {
 	struct cn9k_eth_txq *txq = tx_queue;
 	const rte_iova_t io_addr = txq->io_addr;
+	uint64_t lso_tun_fmt, mark_fmt = 0;
 	void *lmt_addr = txq->lmt_addr;
-	uint64_t lso_tun_fmt;
+	uint8_t mark_flag = 0;
 	uint16_t i;
 
 	NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -518,6 +543,11 @@
 			cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
 	}
 
+	if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		mark_fmt = txq->mark_fmt;
+		mark_flag = txq->mark_flag;
+	}
+
 	/* Lets commit any changes in the packet here as no further changes
 	 * to the packet will be done unless no fast free is enabled.
 	 */
@@ -525,7 +555,8 @@
 		rte_io_wmb();
 
 	for (i = 0; i < pkts; i++) {
-		cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
+		cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+				      mark_flag, mark_fmt);
 		cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags, 4,
 					     flags);
 		cn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
@@ -543,8 +574,9 @@
 {
 	struct cn9k_eth_txq *txq = tx_queue;
 	const rte_iova_t io_addr = txq->io_addr;
+	uint64_t lso_tun_fmt, mark_fmt = 0;
 	void *lmt_addr = txq->lmt_addr;
-	uint64_t lso_tun_fmt;
+	uint8_t mark_flag = 0;
 	uint16_t segdw;
 	uint64_t i;
 
@@ -560,6 +592,11 @@
 			cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
 	}
 
+	if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		mark_fmt = txq->mark_fmt;
+		mark_flag = txq->mark_flag;
+	}
+
 	/* Lets commit any changes in the packet here as no further changes
 	 * to the packet will be done unless no fast free is enabled.
 	 */
@@ -567,7 +604,8 @@
 		rte_io_wmb();
 
 	for (i = 0; i < pkts; i++) {
-		cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
+		cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+				      mark_flag, mark_fmt);
 		segdw = cn9k_nix_prepare_mseg(tx_pkts[i], cmd, flags);
 		cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags,
 					     segdw, flags);
diff --git a/drivers/net/cnxk/cn9k_tx_select.c b/drivers/net/cnxk/cn9k_tx_select.c
index 407ede9..62beb1b 100644
--- a/drivers/net/cnxk/cn9k_tx_select.c
+++ b/drivers/net/cnxk/cn9k_tx_select.c
@@ -49,7 +49,7 @@
 #undef T
 	};
 
-	if (dev->scalar_ena) {
+	if (dev->scalar_ena || dev->tx_mark) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
 		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index ce42b86..22be3e1 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -18,6 +18,7 @@
 #include <rte_security_driver.h>
 #include <rte_tailq.h>
 #include <rte_time.h>
+#include <rte_tm_driver.h>
 
 #include "roc_api.h"
 
@@ -139,6 +140,15 @@
 
 #define CNXK_NIX_PFC_CHAN_COUNT 16
 
+#define CNXK_TM_MARK_VLAN_DEI BIT_ULL(0)
+#define CNXK_TM_MARK_IP_DSCP  BIT_ULL(1)
+#define CNXK_TM_MARK_IP_ECN   BIT_ULL(2)
+
+#define CNXK_TM_MARK_MASK                                                      \
+	(CNXK_TM_MARK_VLAN_DEI | CNXK_TM_MARK_IP_DSCP | CNXK_TM_MARK_IP_ECN)
+
+#define CNXK_TX_MARK_FMT_MASK (0xFFFFFFFFFFFFull)
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
@@ -350,6 +360,7 @@ struct cnxk_eth_dev {
 	uint16_t flags;
 	uint8_t ptype_disable;
 	bool scalar_ena;
+	bool tx_mark;
 	bool ptp_en;
 	bool rx_mark_update; /* Enable/Disable mark update to mbuf */
 
@@ -460,6 +471,9 @@ struct cnxk_eth_txq_sp {
 /* Common security ops */
 extern struct rte_security_ops cnxk_eth_sec_ops;
 
+/* Common tm ops */
+extern struct rte_tm_ops cnxk_tm_ops;
+
 /* Ops */
 int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
 		   struct rte_pci_device *pci_dev);
@@ -536,6 +550,15 @@ int cnxk_nix_timesync_write_time(struct rte_eth_dev *eth_dev,
 int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
 int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
 				     uint16_t queue_idx, uint16_t tx_rate);
+int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+			      int mark_yellow, int mark_red,
+			      struct rte_tm_error *error);
+int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+			    int mark_yellow, int mark_red,
+			    struct rte_tm_error *error);
+int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+			     int mark_yellow, int mark_red,
+			     struct rte_tm_error *error);
 
 /* MTR */
 int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops);
diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
index 9015a45..d45e70a 100644
--- a/drivers/net/cnxk/cnxk_tm.c
+++ b/drivers/net/cnxk/cnxk_tm.c
@@ -88,10 +88,16 @@
 			  RTE_TM_STATS_N_PKTS_RED_DROPPED |
 			  RTE_TM_STATS_N_BYTES_RED_DROPPED;
 
-	for (i = 0; i < RTE_COLORS; i++) {
-		cap->mark_vlan_dei_supported[i] = false;
-		cap->mark_ip_ecn_tcp_supported[i] = false;
-		cap->mark_ip_dscp_supported[i] = false;
+	cap->mark_vlan_dei_supported[RTE_COLOR_GREEN] = false;
+	cap->mark_ip_ecn_tcp_supported[RTE_COLOR_GREEN] = false;
+	cap->mark_ip_ecn_sctp_supported[RTE_COLOR_GREEN] = false;
+	cap->mark_ip_dscp_supported[RTE_COLOR_GREEN] = false;
+
+	for (i = RTE_COLOR_YELLOW; i < RTE_COLORS; i++) {
+		cap->mark_vlan_dei_supported[i] = true;
+		cap->mark_ip_ecn_tcp_supported[i] = true;
+		cap->mark_ip_ecn_sctp_supported[i] = true;
+		cap->mark_ip_dscp_supported[i] = true;
 	}
 
 	return 0;
@@ -599,7 +605,112 @@
 	return rc;
 }
 
-const struct rte_tm_ops cnxk_tm_ops = {
+int
+cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+			  int mark_yellow, int mark_red,
+			  struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	int rc;
+
+	if (mark_green) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "Green VLAN marking not supported";
+		return -EINVAL;
+	}
+
+	if (eth_dev->data->dev_started) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "VLAN DEI mark for running ports not "
+				 "supported";
+		return -EBUSY;
+	}
+
+	rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_VLAN_DEI,
+				    mark_yellow, mark_red);
+	if (rc) {
+		error->type = roc_nix_tm_err_to_rte_err(rc);
+		error->message = roc_error_msg_get(rc);
+	}
+	return rc;
+}
+
+int
+cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+			int mark_yellow, int mark_red,
+			struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	int rc;
+
+	if (mark_green) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "Green IP ECN marking not supported";
+		return -EINVAL;
+	}
+
+	if (eth_dev->data->dev_started) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "IP ECN mark for running ports not "
+				 "supported";
+		return -EBUSY;
+	}
+
+	rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_ECN,
+				    mark_yellow, mark_red);
+	if (rc < 0)
+		goto exit;
+
+	rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_ECN,
+				    mark_yellow, mark_red);
+exit:
+	if (rc < 0) {
+		error->type = roc_nix_tm_err_to_rte_err(rc);
+		error->message = roc_error_msg_get(rc);
+	}
+	return rc;
+}
+
+int
+cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+			 int mark_yellow, int mark_red,
+			 struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	int rc;
+
+	if (mark_green) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "Green IP DSCP marking not supported";
+		return -EINVAL;
+	}
+
+	if (eth_dev->data->dev_started) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "IP DSCP mark for running ports not "
+				 "supported";
+		return -EBUSY;
+	}
+
+	rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_DSCP,
+				    mark_yellow, mark_red);
+	if (rc < 0)
+		goto exit;
+
+	rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_DSCP,
+				    mark_yellow, mark_red);
+exit:
+	if (rc < 0) {
+		error->type = roc_nix_tm_err_to_rte_err(rc);
+		error->message = roc_error_msg_get(rc);
+	}
+	return rc;
+}
+
+struct rte_tm_ops cnxk_tm_ops = {
 	.node_type_get = cnxk_nix_tm_node_type_get,
 	.capabilities_get = cnxk_nix_tm_capa_get,
 	.level_capabilities_get = cnxk_nix_tm_level_capa_get,
@@ -617,6 +728,10 @@
 	.node_shaper_update = cnxk_nix_tm_node_shaper_update,
 	.node_parent_update = cnxk_nix_tm_node_parent_update,
 	.node_stats_read = cnxk_nix_tm_node_stats_read,
+
+	.mark_vlan_dei = cnxk_nix_tm_mark_vlan_dei,
+	.mark_ip_ecn = cnxk_nix_tm_mark_ip_ecn,
+	.mark_ip_dscp = cnxk_nix_tm_mark_ip_dscp,
 };
 
 int
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 3/3] common/cnxk: check SQ node before setting bp config
  2022-02-24  9:57 [PATCH 1/3] common/cnxk: enable packet marking skoteshwar
  2022-02-24  9:57 ` [PATCH 2/3] net/cnxk: event/cnxk: enable packet marking callbacks skoteshwar
@ 2022-02-24  9:57 ` skoteshwar
  2022-02-24 19:36 ` [PATCH 1/3] common/cnxk: enable packet marking Jerin Jacob
  2022-02-25  4:59 ` [PATCH v2 " skoteshwar
  3 siblings, 0 replies; 8+ messages in thread
From: skoteshwar @ 2022-02-24  9:57 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

Validate sq_node and parent before accessing their fields.
SQ was created without any associated TM node, this is valid negative
case, so return success while stopping TM without SQ node.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix_tm.c     | 8 ++++++--
 drivers/common/cnxk/roc_nix_tm_ops.c | 2 +-
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 5b23ecd..7a17780 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -325,14 +325,17 @@
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	uint16_t link = nix->tx_link;
 	struct nix_tm_node *sq_node;
 	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
-	uint16_t link;
 	int rc = 0;
 
 	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	if (!sq_node)
+		return -ENOENT;
+
 	parent = sq_node->parent;
 	while (parent) {
 		if (parent->lvl == ROC_TM_LVL_SCH2)
@@ -340,9 +343,10 @@
 
 		parent = parent->parent;
 	}
+	if (!parent)
+		return -ENOENT;
 
 	list = nix_tm_node_list(nix, tree);
-	link = nix->tx_link;
 
 	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
 		rc = -EINVAL;
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 5a25b3e..1d9a02b 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -474,7 +474,7 @@
 			continue;
 
 		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
-		if (rc) {
+		if (rc && rc != -ENOENT) {
 			plt_err("Failed to disable backpressure, rc=%d", rc);
 			goto cleanup;
 		}
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/3] common/cnxk: enable packet marking
  2022-02-24  9:57 [PATCH 1/3] common/cnxk: enable packet marking skoteshwar
  2022-02-24  9:57 ` [PATCH 2/3] net/cnxk: event/cnxk: enable packet marking callbacks skoteshwar
  2022-02-24  9:57 ` [PATCH 3/3] common/cnxk: check SQ node before setting bp config skoteshwar
@ 2022-02-24 19:36 ` Jerin Jacob
  2022-02-25  4:59 ` [PATCH v2 " skoteshwar
  3 siblings, 0 replies; 8+ messages in thread
From: Jerin Jacob @ 2022-02-24 19:36 UTC (permalink / raw)
  To: Satha Koteswara Rao Kottidi
  Cc: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Ray Kinsella,
	dpdk-dev

On Thu, Feb 24, 2022 at 3:28 PM <skoteshwar@marvell.com> wrote:
>
> From: Satha Rao <skoteshwar@marvell.com>
>
> cnxk platforms supports packet marking when TM enabled with
> valid shaper rates. VLAN DEI, IP ECN, or IP DSCP inside
> packet will be updated based on mark flags selected.
>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>

Please rebase to next-net-mrvl as it has apply issues.

[for-next-net]dell[dpdk-next-net-mrvl] $ git pw series apply 21853
Failed to apply patch:
Applying: common/cnxk: enable packet marking
Applying: net/cnxk: event/cnxk: enable packet marking callbacks
Using index info to reconstruct a base tree...
M       drivers/event/cnxk/cn10k_worker.h
M       drivers/event/cnxk/cn9k_worker.h
M       drivers/net/cnxk/cn10k_ethdev.c
M       drivers/net/cnxk/cn10k_ethdev.h
M       drivers/net/cnxk/cn10k_tx.h
M       drivers/net/cnxk/cn9k_ethdev.c
A       drivers/net/cnxk/cn9k_tx.c
M       drivers/net/cnxk/cnxk_ethdev.h
Falling back to patching base and 3-way merge...
Auto-merging drivers/net/cnxk/cnxk_ethdev.h
CONFLICT (modify/delete): drivers/net/cnxk/cn9k_tx.c deleted in HEAD
and modified in net/cnxk: event/cnxk: enable packet marking callbacks.
Version net/cnxk: event/cnxk: enable packet marking callbacks of
drivers/net/cnxk/cn9k_tx.c left in
tree.
Auto-merging drivers/net/cnxk/cn9k_ethdev.c
Auto-merging drivers/net/cnxk/cn10k_tx.h
Auto-merging drivers/net/cnxk/cn10k_ethdev.h
Auto-merging drivers/net/cnxk/cn10k_ethdev.c
Auto-merging drivers/event/cnxk/cn9k_worker.h
Auto-merging drivers/event/cnxk/cn10k_worker.h
error: Failed to merge in the changes.
hint: Use 'git am --show-current-patch=diff' to see the failed patch

> ---
>  drivers/common/cnxk/meson.build       |   1 +
>  drivers/common/cnxk/roc_nix.h         |  21 +++
>  drivers/common/cnxk/roc_nix_priv.h    |  23 ++-
>  drivers/common/cnxk/roc_nix_tm.c      |   4 +
>  drivers/common/cnxk/roc_nix_tm_mark.c | 295 ++++++++++++++++++++++++++++++++++
>  drivers/common/cnxk/version.map       |   2 +
>  6 files changed, 343 insertions(+), 3 deletions(-)
>  create mode 100644 drivers/common/cnxk/roc_nix_tm_mark.c
>
> diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
> index 2834846..6f80827 100644
> --- a/drivers/common/cnxk/meson.build
> +++ b/drivers/common/cnxk/meson.build
> @@ -44,6 +44,7 @@ sources = files(
>          'roc_nix_rss.c',
>          'roc_nix_stats.c',
>          'roc_nix_tm.c',
> +        'roc_nix_tm_mark.c',
>          'roc_nix_tm_ops.c',
>          'roc_nix_tm_utils.c',
>          'roc_nix_vlan.c',
> diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
> index 10e8375..5e6eb58 100644
> --- a/drivers/common/cnxk/roc_nix.h
> +++ b/drivers/common/cnxk/roc_nix.h
> @@ -570,6 +570,22 @@ struct roc_nix_tm_node_stats {
>         uint64_t stats[ROC_NIX_TM_NODE_STATS_MAX];
>  };
>
> +enum roc_nix_tm_mark {
> +       ROC_NIX_TM_MARK_VLAN_DEI,
> +       ROC_NIX_TM_MARK_IPV4_DSCP,
> +       ROC_NIX_TM_MARK_IPV4_ECN,
> +       ROC_NIX_TM_MARK_IPV6_DSCP,
> +       ROC_NIX_TM_MARK_IPV6_ECN,
> +       ROC_NIX_TM_MARK_MAX
> +};
> +
> +enum roc_nix_tm_mark_color {
> +       ROC_NIX_TM_MARK_COLOR_Y,
> +       ROC_NIX_TM_MARK_COLOR_R,
> +       ROC_NIX_TM_MARK_COLOR_Y_R,
> +       ROC_NIX_TM_MARK_COLOR_MAX
> +};
> +
>  int __roc_api roc_nix_tm_node_add(struct roc_nix *roc_nix,
>                                   struct roc_nix_tm_node *roc_node);
>  int __roc_api roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
> @@ -646,6 +662,11 @@ int __roc_api roc_nix_tm_node_name_get(struct roc_nix *roc_nix,
>  int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
>  bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
>  int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
> +int __roc_api roc_nix_tm_mark_config(struct roc_nix *roc_nix,
> +                                    enum roc_nix_tm_mark type, int mark_yellow,
> +                                    int mark_red);
> +uint64_t __roc_api roc_nix_tm_mark_format_get(struct roc_nix *roc_nix,
> +                                             uint64_t *flags);
>
>  /* Ingress Policer API */
>  int __roc_api roc_nix_bpf_timeunit_get(struct roc_nix *roc_nix,
> diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
> index 2bc228c..d77c905 100644
> --- a/drivers/common/cnxk/roc_nix_priv.h
> +++ b/drivers/common/cnxk/roc_nix_priv.h
> @@ -36,9 +36,22 @@ struct nix_qint {
>  #define NIX_TM_CHAN_INVALID UINT16_MAX
>
>  /* TM flags */
> -#define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
> -#define NIX_TM_TL1_NO_SP     BIT_ULL(1)
> -#define NIX_TM_TL1_ACCESS    BIT_ULL(2)
> +#define NIX_TM_HIERARCHY_ENA   BIT_ULL(0)
> +#define NIX_TM_TL1_NO_SP       BIT_ULL(1)
> +#define NIX_TM_TL1_ACCESS      BIT_ULL(2)
> +#define NIX_TM_MARK_VLAN_DEI_EN BIT_ULL(3)
> +#define NIX_TM_MARK_IP_DSCP_EN BIT_ULL(4)
> +#define NIX_TM_MARK_IP_ECN_EN  BIT_ULL(5)
> +
> +#define NIX_TM_MARK_EN_MASK                                                    \
> +       (NIX_TM_MARK_IP_DSCP_EN | NIX_TM_MARK_IP_ECN_EN |                      \
> +        NIX_TM_MARK_VLAN_DEI_EN)
> +
> +#define NIX_TM_MARK_VLAN_DEI_SHIFT  0 /* Leave 16b for VLAN for FP logic */
> +#define NIX_TM_MARK_IPV4_DSCP_SHIFT 16
> +#define NIX_TM_MARK_IPV6_DSCP_SHIFT 24
> +#define NIX_TM_MARK_IPV4_ECN_SHIFT  32
> +#define NIX_TM_MARK_IPV6_ECN_SHIFT  40
>
>  struct nix_tm_tb {
>         /** Token bucket rate (bytes per second) */
> @@ -170,6 +183,9 @@ struct nix {
>         uint16_t tm_link_cfg_lvl;
>         uint16_t contig_rsvd[NIX_TXSCH_LVL_CNT];
>         uint16_t discontig_rsvd[NIX_TXSCH_LVL_CNT];
> +       uint64_t tm_markfmt_en;
> +       uint8_t tm_markfmt_null;
> +       uint8_t tm_markfmt[ROC_NIX_TM_MARK_MAX][ROC_NIX_TM_MARK_COLOR_MAX];
>
>         /* Ipsec info */
>         uint16_t cpt_msixoff[MAX_RVU_BLKLF_CNT];
> @@ -384,6 +400,7 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
>  int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
>                          bool enable);
>  void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
> +int nix_tm_mark_init(struct nix *nix);
>
>  /*
>   * TM priv utils.
> diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
> index ecf3edf..5b23ecd 100644
> --- a/drivers/common/cnxk/roc_nix_tm.c
> +++ b/drivers/common/cnxk/roc_nix_tm.c
> @@ -1692,6 +1692,10 @@
>                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
>         }
>
> +       rc = nix_tm_mark_init(nix);
> +       if (rc)
> +               goto exit;
> +
>         /* Disable TL1 Static Priority when VF's are enabled
>          * as otherwise VF's TL2 reallocation will be needed
>          * runtime to support a specific topology of PF.
> diff --git a/drivers/common/cnxk/roc_nix_tm_mark.c b/drivers/common/cnxk/roc_nix_tm_mark.c
> new file mode 100644
> index 0000000..64cf679
> --- /dev/null
> +++ b/drivers/common/cnxk/roc_nix_tm_mark.c
> @@ -0,0 +1,295 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2022 Marvell.
> + */
> +
> +#include "roc_api.h"
> +#include "roc_priv.h"
> +
> +static const uint8_t y_mask_val[ROC_NIX_TM_MARK_MAX][2] = {
> +       [ROC_NIX_TM_MARK_VLAN_DEI] = {0x0, 0x8},
> +       [ROC_NIX_TM_MARK_IPV4_DSCP] = {0x1, 0x2},
> +       [ROC_NIX_TM_MARK_IPV4_ECN] = {0x0, 0xc},
> +       [ROC_NIX_TM_MARK_IPV6_DSCP] = {0x1, 0x2},
> +       [ROC_NIX_TM_MARK_IPV6_ECN] = {0x0, 0x3},
> +};
> +
> +static const uint8_t r_mask_val[ROC_NIX_TM_MARK_MAX][2] = {
> +       [ROC_NIX_TM_MARK_VLAN_DEI] = {0x0, 0x8},
> +       [ROC_NIX_TM_MARK_IPV4_DSCP] = {0x0, 0x3},
> +       [ROC_NIX_TM_MARK_IPV4_ECN] = {0x0, 0xc},
> +       [ROC_NIX_TM_MARK_IPV6_DSCP] = {0x0, 0x3},
> +       [ROC_NIX_TM_MARK_IPV6_ECN] = {0x0, 0x3},
> +};
> +
> +static const uint8_t mark_off[ROC_NIX_TM_MARK_MAX] = {
> +       [ROC_NIX_TM_MARK_VLAN_DEI] = 0x3,  /* Byte 14 Bit[4:1] */
> +       [ROC_NIX_TM_MARK_IPV4_DSCP] = 0x1, /* Byte 1 Bit[6:3] */
> +       [ROC_NIX_TM_MARK_IPV4_ECN] = 0x6, /* Byte 1 Bit[1:0], Byte 2 Bit[7:6] */
> +       [ROC_NIX_TM_MARK_IPV6_DSCP] = 0x5, /* Byte 0 Bit[2:0], Byte 1 Bit[7] */
> +       [ROC_NIX_TM_MARK_IPV6_ECN] = 0x0,  /* Byte 1 Bit[7:4] */
> +};
> +
> +static const uint64_t mark_flag[ROC_NIX_TM_MARK_MAX] = {
> +       [ROC_NIX_TM_MARK_VLAN_DEI] = NIX_TM_MARK_VLAN_DEI_EN,
> +       [ROC_NIX_TM_MARK_IPV4_DSCP] = NIX_TM_MARK_IP_DSCP_EN,
> +       [ROC_NIX_TM_MARK_IPV4_ECN] = NIX_TM_MARK_IP_ECN_EN,
> +       [ROC_NIX_TM_MARK_IPV6_DSCP] = NIX_TM_MARK_IP_DSCP_EN,
> +       [ROC_NIX_TM_MARK_IPV6_ECN] = NIX_TM_MARK_IP_ECN_EN,
> +};
> +
> +static uint8_t
> +prepare_tm_shaper_red_algo(struct nix_tm_node *tm_node, volatile uint64_t *reg,
> +                          volatile uint64_t *regval,
> +                          volatile uint64_t *regval_mask)
> +{
> +       uint32_t schq = tm_node->hw_id;
> +       uint8_t k = 0;
> +
> +       plt_tm_dbg("Shaper read alg node %s(%u) lvl %u id %u, red_alg %x (%p)",
> +                  nix_tm_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
> +                  tm_node->id, tm_node->red_algo, tm_node);
> +
> +       /* Configure just RED algo */
> +       regval[k] = ((uint64_t)tm_node->red_algo << 9);
> +       regval_mask[k] = ~(BIT_ULL(10) | BIT_ULL(9));
> +
> +       switch (tm_node->hw_lvl) {
> +       case NIX_TXSCH_LVL_SMQ:
> +               reg[k] = NIX_AF_MDQX_SHAPE(schq);
> +               k++;
> +               break;
> +       case NIX_TXSCH_LVL_TL4:
> +               reg[k] = NIX_AF_TL4X_SHAPE(schq);
> +               k++;
> +               break;
> +       case NIX_TXSCH_LVL_TL3:
> +               reg[k] = NIX_AF_TL3X_SHAPE(schq);
> +               k++;
> +               break;
> +       case NIX_TXSCH_LVL_TL2:
> +               reg[k] = NIX_AF_TL2X_SHAPE(schq);
> +               k++;
> +               break;
> +       default:
> +               break;
> +       }
> +
> +       return k;
> +}
> +
> +/* Only called while device is stopped */
> +static int
> +nix_tm_update_red_algo(struct nix *nix, bool red_send)
> +{
> +       struct mbox *mbox = (&nix->dev)->mbox;
> +       struct nix_txschq_config *req;
> +       struct nix_tm_node_list *list;
> +       struct nix_tm_node *tm_node;
> +       uint8_t k;
> +       int rc;
> +
> +       list = nix_tm_node_list(nix, nix->tm_tree);
> +       TAILQ_FOREACH(tm_node, list, node) {
> +               /* Skip leaf nodes */
> +               if (nix_tm_is_leaf(nix, tm_node->lvl))
> +                       continue;
> +
> +               if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1)
> +                       continue;
> +
> +               /* Skip if no update of red_algo is needed */
> +               if ((red_send && (tm_node->red_algo == NIX_REDALG_SEND)) ||
> +                   (!red_send && (tm_node->red_algo != NIX_REDALG_SEND)))
> +                       continue;
> +
> +               /* Update Red algo */
> +               if (red_send)
> +                       tm_node->red_algo = NIX_REDALG_SEND;
> +               else
> +                       tm_node->red_algo = NIX_REDALG_STD;
> +
> +               /* Update txschq config  */
> +               req = mbox_alloc_msg_nix_txschq_cfg(mbox);
> +               req->lvl = tm_node->hw_lvl;
> +               k = prepare_tm_shaper_red_algo(tm_node, req->reg, req->regval,
> +                                              req->regval_mask);
> +               req->num_regs = k;
> +
> +               rc = mbox_process(mbox);
> +               if (rc)
> +                       return rc;
> +       }
> +       return 0;
> +}
> +
> +/* Return's true if queue reconfig is needed */
> +static bool
> +nix_tm_update_markfmt(struct nix *nix, enum roc_nix_tm_mark type,
> +                     int mark_yellow, int mark_red)
> +{
> +       uint64_t new_markfmt, old_markfmt;
> +       uint8_t *tm_markfmt;
> +       uint8_t en_shift;
> +       uint64_t mask;
> +
> +       if (type >= ROC_NIX_TM_MARK_MAX)
> +               return false;
> +
> +       /* Pre-allocated mark formats for type:color combinations */
> +       tm_markfmt = nix->tm_markfmt[type];
> +
> +       if (!mark_yellow && !mark_red) {
> +               /* Null format to disable */
> +               new_markfmt = nix->tm_markfmt_null;
> +       } else {
> +               /* Marking enabled with combination of yellow and red */
> +               if (mark_yellow && mark_red)
> +                       new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_Y_R];
> +               else if (mark_yellow)
> +                       new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_Y];
> +               else
> +                       new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_R];
> +       }
> +
> +       mask = 0xFFull;
> +       /* Format of fast path markfmt
> +        * ipv6_ecn[8]:ipv4_ecn[8]:ipv6_dscp[8]:ipv4_dscp[8]:vlan_dei[16]
> +        * fmt[7] = ptr offset for IPv4/IPv6 on l2_len.
> +        * fmt[6:0] = markfmt idx.
> +        */
> +       switch (type) {
> +       case ROC_NIX_TM_MARK_VLAN_DEI:
> +               en_shift = NIX_TM_MARK_VLAN_DEI_SHIFT;
> +               mask = 0xFFFFull;
> +               new_markfmt |= new_markfmt << 8;
> +               break;
> +       case ROC_NIX_TM_MARK_IPV4_DSCP:
> +               new_markfmt |= BIT_ULL(7);
> +               en_shift = NIX_TM_MARK_IPV4_DSCP_SHIFT;
> +               break;
> +       case ROC_NIX_TM_MARK_IPV4_ECN:
> +               new_markfmt |= BIT_ULL(7);
> +               en_shift = NIX_TM_MARK_IPV4_ECN_SHIFT;
> +               break;
> +       case ROC_NIX_TM_MARK_IPV6_DSCP:
> +               en_shift = NIX_TM_MARK_IPV6_DSCP_SHIFT;
> +               break;
> +       case ROC_NIX_TM_MARK_IPV6_ECN:
> +               new_markfmt |= BIT_ULL(7);
> +               en_shift = NIX_TM_MARK_IPV6_ECN_SHIFT;
> +               break;
> +       default:
> +               return false;
> +       }
> +
> +       /* Skip if same as old config */
> +       old_markfmt = (nix->tm_markfmt_en >> en_shift) & mask;
> +       if (old_markfmt == new_markfmt)
> +               return false;
> +
> +       /* Need queue reconfig */
> +       nix->tm_markfmt_en &= ~(mask << en_shift);
> +       nix->tm_markfmt_en |= (new_markfmt << en_shift);
> +
> +       return true;
> +}
> +
> +int
> +nix_tm_mark_init(struct nix *nix)
> +{
> +       struct mbox *mbox = (&nix->dev)->mbox;
> +       struct nix_mark_format_cfg_rsp *rsp;
> +       struct nix_mark_format_cfg *req;
> +       int rc, i, j;
> +
> +       /* Check for supported revisions */
> +       if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
> +               return 0;
> +
> +       /* Null mark format */
> +       req = mbox_alloc_msg_nix_mark_format_cfg(mbox);
> +       rc = mbox_process_msg(mbox, (void *)&rsp);
> +       if (rc) {
> +               plt_err("TM failed to alloc null mark format, rc=%d", rc);
> +               goto exit;
> +       }
> +
> +       nix->tm_markfmt_null = rsp->mark_format_idx;
> +
> +       /* Alloc vlan, dscp, ecn mark formats */
> +       for (i = 0; i < ROC_NIX_TM_MARK_MAX; i++) {
> +               for (j = 0; j < ROC_NIX_TM_MARK_COLOR_MAX; j++) {
> +                       req = mbox_alloc_msg_nix_mark_format_cfg(mbox);
> +                       req->offset = mark_off[i];
> +
> +                       switch (j) {
> +                       case ROC_NIX_TM_MARK_COLOR_Y:
> +                               req->y_mask = y_mask_val[i][0];
> +                               req->y_val = y_mask_val[i][1];
> +                               break;
> +                       case ROC_NIX_TM_MARK_COLOR_R:
> +                               req->r_mask = r_mask_val[i][0];
> +                               req->r_val = r_mask_val[i][1];
> +                               break;
> +                       case ROC_NIX_TM_MARK_COLOR_Y_R:
> +                               req->y_mask = y_mask_val[i][0];
> +                               req->y_val = y_mask_val[i][1];
> +                               req->r_mask = r_mask_val[i][0];
> +                               req->r_val = r_mask_val[i][1];
> +                               break;
> +                       }
> +
> +                       rc = mbox_process_msg(mbox, (void *)&rsp);
> +                       if (rc) {
> +                               plt_err("TM failed to alloc mark fmt "
> +                                       "type %u color %u, rc=%d",
> +                                       i, j, rc);
> +                               goto exit;
> +                       }
> +
> +                       nix->tm_markfmt[i][j] = rsp->mark_format_idx;
> +                       plt_tm_dbg("Mark type: %u, Mark Color:%u, id:%u\n", i,
> +                                  j, nix->tm_markfmt[i][j]);
> +               }
> +       }
> +       /* Update null mark format as default */
> +       nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_VLAN_DEI, 0, 0);
> +       nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV4_DSCP, 0, 0);
> +       nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV4_ECN, 0, 0);
> +       nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV6_DSCP, 0, 0);
> +       nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV6_ECN, 0, 0);
> +exit:
> +       return rc;
> +}
> +
> +int
> +roc_nix_tm_mark_config(struct roc_nix *roc_nix, enum roc_nix_tm_mark type,
> +                      int mark_yellow, int mark_red)
> +{
> +       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +       int rc;
> +
> +       if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
> +               return -EINVAL;
> +
> +       rc = nix_tm_update_markfmt(nix, type, mark_yellow, mark_red);
> +       if (!rc)
> +               return 0;
> +
> +       if (!mark_yellow && !mark_red)
> +               nix->tm_flags &= ~mark_flag[type];
> +       else
> +               nix->tm_flags |= mark_flag[type];
> +
> +       /* Update red algo for change in mark_red */
> +       return nix_tm_update_red_algo(nix, !!mark_red);
> +}
> +
> +uint64_t
> +roc_nix_tm_mark_format_get(struct roc_nix *roc_nix, uint64_t *flags)
> +{
> +       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +
> +       *flags = ((nix->tm_flags & NIX_TM_MARK_EN_MASK) >> 3);
> +       return nix->tm_markfmt_en;
> +}
> diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
> index 21f94e8..a41dc26 100644
> --- a/drivers/common/cnxk/version.map
> +++ b/drivers/common/cnxk/version.map
> @@ -253,6 +253,8 @@ INTERNAL {
>         roc_nix_tm_leaf_cnt;
>         roc_nix_tm_lvl_have_link_access;
>         roc_nix_tm_lvl_is_leaf;
> +       roc_nix_tm_mark_config;
> +       roc_nix_tm_mark_format_get;
>         roc_nix_tm_max_prio;
>         roc_nix_tm_node_add;
>         roc_nix_tm_node_delete;
> --
> 1.8.3.1
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v2 1/3] common/cnxk: enable packet marking
  2022-02-24  9:57 [PATCH 1/3] common/cnxk: enable packet marking skoteshwar
                   ` (2 preceding siblings ...)
  2022-02-24 19:36 ` [PATCH 1/3] common/cnxk: enable packet marking Jerin Jacob
@ 2022-02-25  4:59 ` skoteshwar
  2022-02-25  4:59   ` [PATCH v2 2/3] net/cnxk: event/cnxk: enable packet marking callbacks skoteshwar
                     ` (2 more replies)
  3 siblings, 3 replies; 8+ messages in thread
From: skoteshwar @ 2022-02-25  4:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Ray Kinsella
  Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

cnxk platforms supports packet marking when TM enabled with
valid shaper rates. VLAN DEI, IP ECN, or IP DSCP inside
packet will be updated based on mark flags selected.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---

v2:
- rebased to master, fixed conflicts

 drivers/common/cnxk/meson.build       |   1 +
 drivers/common/cnxk/roc_nix.h         |  21 +++
 drivers/common/cnxk/roc_nix_priv.h    |  23 ++-
 drivers/common/cnxk/roc_nix_tm.c      |   4 +
 drivers/common/cnxk/roc_nix_tm_mark.c | 295 ++++++++++++++++++++++++++++++++++
 drivers/common/cnxk/version.map       |   2 +
 6 files changed, 343 insertions(+), 3 deletions(-)
 create mode 100644 drivers/common/cnxk/roc_nix_tm_mark.c

diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 2834846..6f80827 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -44,6 +44,7 @@ sources = files(
         'roc_nix_rss.c',
         'roc_nix_stats.c',
         'roc_nix_tm.c',
+        'roc_nix_tm_mark.c',
         'roc_nix_tm_ops.c',
         'roc_nix_tm_utils.c',
         'roc_nix_vlan.c',
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 10e8375..5e6eb58 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -570,6 +570,22 @@ struct roc_nix_tm_node_stats {
 	uint64_t stats[ROC_NIX_TM_NODE_STATS_MAX];
 };
 
+enum roc_nix_tm_mark {
+	ROC_NIX_TM_MARK_VLAN_DEI,
+	ROC_NIX_TM_MARK_IPV4_DSCP,
+	ROC_NIX_TM_MARK_IPV4_ECN,
+	ROC_NIX_TM_MARK_IPV6_DSCP,
+	ROC_NIX_TM_MARK_IPV6_ECN,
+	ROC_NIX_TM_MARK_MAX
+};
+
+enum roc_nix_tm_mark_color {
+	ROC_NIX_TM_MARK_COLOR_Y,
+	ROC_NIX_TM_MARK_COLOR_R,
+	ROC_NIX_TM_MARK_COLOR_Y_R,
+	ROC_NIX_TM_MARK_COLOR_MAX
+};
+
 int __roc_api roc_nix_tm_node_add(struct roc_nix *roc_nix,
 				  struct roc_nix_tm_node *roc_node);
 int __roc_api roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
@@ -646,6 +662,11 @@ int __roc_api roc_nix_tm_node_name_get(struct roc_nix *roc_nix,
 int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
 int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
+int __roc_api roc_nix_tm_mark_config(struct roc_nix *roc_nix,
+				     enum roc_nix_tm_mark type, int mark_yellow,
+				     int mark_red);
+uint64_t __roc_api roc_nix_tm_mark_format_get(struct roc_nix *roc_nix,
+					      uint64_t *flags);
 
 /* Ingress Policer API */
 int __roc_api roc_nix_bpf_timeunit_get(struct roc_nix *roc_nix,
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 5d45f75..9b9ffae 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -36,9 +36,22 @@ struct nix_qint {
 #define NIX_TM_CHAN_INVALID UINT16_MAX
 
 /* TM flags */
-#define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
-#define NIX_TM_TL1_NO_SP     BIT_ULL(1)
-#define NIX_TM_TL1_ACCESS    BIT_ULL(2)
+#define NIX_TM_HIERARCHY_ENA	BIT_ULL(0)
+#define NIX_TM_TL1_NO_SP	BIT_ULL(1)
+#define NIX_TM_TL1_ACCESS	BIT_ULL(2)
+#define NIX_TM_MARK_VLAN_DEI_EN BIT_ULL(3)
+#define NIX_TM_MARK_IP_DSCP_EN	BIT_ULL(4)
+#define NIX_TM_MARK_IP_ECN_EN	BIT_ULL(5)
+
+#define NIX_TM_MARK_EN_MASK                                                    \
+	(NIX_TM_MARK_IP_DSCP_EN | NIX_TM_MARK_IP_ECN_EN |                      \
+	 NIX_TM_MARK_VLAN_DEI_EN)
+
+#define NIX_TM_MARK_VLAN_DEI_SHIFT  0 /* Leave 16b for VLAN for FP logic */
+#define NIX_TM_MARK_IPV4_DSCP_SHIFT 16
+#define NIX_TM_MARK_IPV6_DSCP_SHIFT 24
+#define NIX_TM_MARK_IPV4_ECN_SHIFT  32
+#define NIX_TM_MARK_IPV6_ECN_SHIFT  40
 
 struct nix_tm_tb {
 	/** Token bucket rate (bytes per second) */
@@ -170,6 +183,9 @@ struct nix {
 	uint16_t tm_link_cfg_lvl;
 	uint16_t contig_rsvd[NIX_TXSCH_LVL_CNT];
 	uint16_t discontig_rsvd[NIX_TXSCH_LVL_CNT];
+	uint64_t tm_markfmt_en;
+	uint8_t tm_markfmt_null;
+	uint8_t tm_markfmt[ROC_NIX_TM_MARK_MAX][ROC_NIX_TM_MARK_COLOR_MAX];
 
 	/* Ipsec info */
 	uint16_t cpt_msixoff[MAX_RVU_BLKLF_CNT];
@@ -386,6 +402,7 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
 int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
 			 bool enable);
 void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
+int nix_tm_mark_init(struct nix *nix);
 
 /*
  * TM priv utils.
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index ecf3edf..5b23ecd 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -1692,6 +1692,10 @@
 		bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
 	}
 
+	rc = nix_tm_mark_init(nix);
+	if (rc)
+		goto exit;
+
 	/* Disable TL1 Static Priority when VF's are enabled
 	 * as otherwise VF's TL2 reallocation will be needed
 	 * runtime to support a specific topology of PF.
diff --git a/drivers/common/cnxk/roc_nix_tm_mark.c b/drivers/common/cnxk/roc_nix_tm_mark.c
new file mode 100644
index 0000000..64cf679
--- /dev/null
+++ b/drivers/common/cnxk/roc_nix_tm_mark.c
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+static const uint8_t y_mask_val[ROC_NIX_TM_MARK_MAX][2] = {
+	[ROC_NIX_TM_MARK_VLAN_DEI] = {0x0, 0x8},
+	[ROC_NIX_TM_MARK_IPV4_DSCP] = {0x1, 0x2},
+	[ROC_NIX_TM_MARK_IPV4_ECN] = {0x0, 0xc},
+	[ROC_NIX_TM_MARK_IPV6_DSCP] = {0x1, 0x2},
+	[ROC_NIX_TM_MARK_IPV6_ECN] = {0x0, 0x3},
+};
+
+static const uint8_t r_mask_val[ROC_NIX_TM_MARK_MAX][2] = {
+	[ROC_NIX_TM_MARK_VLAN_DEI] = {0x0, 0x8},
+	[ROC_NIX_TM_MARK_IPV4_DSCP] = {0x0, 0x3},
+	[ROC_NIX_TM_MARK_IPV4_ECN] = {0x0, 0xc},
+	[ROC_NIX_TM_MARK_IPV6_DSCP] = {0x0, 0x3},
+	[ROC_NIX_TM_MARK_IPV6_ECN] = {0x0, 0x3},
+};
+
+static const uint8_t mark_off[ROC_NIX_TM_MARK_MAX] = {
+	[ROC_NIX_TM_MARK_VLAN_DEI] = 0x3,  /* Byte 14 Bit[4:1] */
+	[ROC_NIX_TM_MARK_IPV4_DSCP] = 0x1, /* Byte 1 Bit[6:3] */
+	[ROC_NIX_TM_MARK_IPV4_ECN] = 0x6, /* Byte 1 Bit[1:0], Byte 2 Bit[7:6] */
+	[ROC_NIX_TM_MARK_IPV6_DSCP] = 0x5, /* Byte 0 Bit[2:0], Byte 1 Bit[7] */
+	[ROC_NIX_TM_MARK_IPV6_ECN] = 0x0,  /* Byte 1 Bit[7:4] */
+};
+
+static const uint64_t mark_flag[ROC_NIX_TM_MARK_MAX] = {
+	[ROC_NIX_TM_MARK_VLAN_DEI] = NIX_TM_MARK_VLAN_DEI_EN,
+	[ROC_NIX_TM_MARK_IPV4_DSCP] = NIX_TM_MARK_IP_DSCP_EN,
+	[ROC_NIX_TM_MARK_IPV4_ECN] = NIX_TM_MARK_IP_ECN_EN,
+	[ROC_NIX_TM_MARK_IPV6_DSCP] = NIX_TM_MARK_IP_DSCP_EN,
+	[ROC_NIX_TM_MARK_IPV6_ECN] = NIX_TM_MARK_IP_ECN_EN,
+};
+
+static uint8_t
+prepare_tm_shaper_red_algo(struct nix_tm_node *tm_node, volatile uint64_t *reg,
+			   volatile uint64_t *regval,
+			   volatile uint64_t *regval_mask)
+{
+	uint32_t schq = tm_node->hw_id;
+	uint8_t k = 0;
+
+	plt_tm_dbg("Shaper read alg node %s(%u) lvl %u id %u, red_alg %x (%p)",
+		   nix_tm_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
+		   tm_node->id, tm_node->red_algo, tm_node);
+
+	/* Configure just RED algo */
+	regval[k] = ((uint64_t)tm_node->red_algo << 9);
+	regval_mask[k] = ~(BIT_ULL(10) | BIT_ULL(9));
+
+	switch (tm_node->hw_lvl) {
+	case NIX_TXSCH_LVL_SMQ:
+		reg[k] = NIX_AF_MDQX_SHAPE(schq);
+		k++;
+		break;
+	case NIX_TXSCH_LVL_TL4:
+		reg[k] = NIX_AF_TL4X_SHAPE(schq);
+		k++;
+		break;
+	case NIX_TXSCH_LVL_TL3:
+		reg[k] = NIX_AF_TL3X_SHAPE(schq);
+		k++;
+		break;
+	case NIX_TXSCH_LVL_TL2:
+		reg[k] = NIX_AF_TL2X_SHAPE(schq);
+		k++;
+		break;
+	default:
+		break;
+	}
+
+	return k;
+}
+
+/* Only called while device is stopped */
+static int
+nix_tm_update_red_algo(struct nix *nix, bool red_send)
+{
+	struct mbox *mbox = (&nix->dev)->mbox;
+	struct nix_txschq_config *req;
+	struct nix_tm_node_list *list;
+	struct nix_tm_node *tm_node;
+	uint8_t k;
+	int rc;
+
+	list = nix_tm_node_list(nix, nix->tm_tree);
+	TAILQ_FOREACH(tm_node, list, node) {
+		/* Skip leaf nodes */
+		if (nix_tm_is_leaf(nix, tm_node->lvl))
+			continue;
+
+		if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1)
+			continue;
+
+		/* Skip if no update of red_algo is needed */
+		if ((red_send && (tm_node->red_algo == NIX_REDALG_SEND)) ||
+		    (!red_send && (tm_node->red_algo != NIX_REDALG_SEND)))
+			continue;
+
+		/* Update Red algo */
+		if (red_send)
+			tm_node->red_algo = NIX_REDALG_SEND;
+		else
+			tm_node->red_algo = NIX_REDALG_STD;
+
+		/* Update txschq config  */
+		req = mbox_alloc_msg_nix_txschq_cfg(mbox);
+		req->lvl = tm_node->hw_lvl;
+		k = prepare_tm_shaper_red_algo(tm_node, req->reg, req->regval,
+					       req->regval_mask);
+		req->num_regs = k;
+
+		rc = mbox_process(mbox);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+/* Return's true if queue reconfig is needed */
+static bool
+nix_tm_update_markfmt(struct nix *nix, enum roc_nix_tm_mark type,
+		      int mark_yellow, int mark_red)
+{
+	uint64_t new_markfmt, old_markfmt;
+	uint8_t *tm_markfmt;
+	uint8_t en_shift;
+	uint64_t mask;
+
+	if (type >= ROC_NIX_TM_MARK_MAX)
+		return false;
+
+	/* Pre-allocated mark formats for type:color combinations */
+	tm_markfmt = nix->tm_markfmt[type];
+
+	if (!mark_yellow && !mark_red) {
+		/* Null format to disable */
+		new_markfmt = nix->tm_markfmt_null;
+	} else {
+		/* Marking enabled with combination of yellow and red */
+		if (mark_yellow && mark_red)
+			new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_Y_R];
+		else if (mark_yellow)
+			new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_Y];
+		else
+			new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_R];
+	}
+
+	mask = 0xFFull;
+	/* Format of fast path markfmt
+	 * ipv6_ecn[8]:ipv4_ecn[8]:ipv6_dscp[8]:ipv4_dscp[8]:vlan_dei[16]
+	 * fmt[7] = ptr offset for IPv4/IPv6 on l2_len.
+	 * fmt[6:0] = markfmt idx.
+	 */
+	switch (type) {
+	case ROC_NIX_TM_MARK_VLAN_DEI:
+		en_shift = NIX_TM_MARK_VLAN_DEI_SHIFT;
+		mask = 0xFFFFull;
+		new_markfmt |= new_markfmt << 8;
+		break;
+	case ROC_NIX_TM_MARK_IPV4_DSCP:
+		new_markfmt |= BIT_ULL(7);
+		en_shift = NIX_TM_MARK_IPV4_DSCP_SHIFT;
+		break;
+	case ROC_NIX_TM_MARK_IPV4_ECN:
+		new_markfmt |= BIT_ULL(7);
+		en_shift = NIX_TM_MARK_IPV4_ECN_SHIFT;
+		break;
+	case ROC_NIX_TM_MARK_IPV6_DSCP:
+		en_shift = NIX_TM_MARK_IPV6_DSCP_SHIFT;
+		break;
+	case ROC_NIX_TM_MARK_IPV6_ECN:
+		new_markfmt |= BIT_ULL(7);
+		en_shift = NIX_TM_MARK_IPV6_ECN_SHIFT;
+		break;
+	default:
+		return false;
+	}
+
+	/* Skip if same as old config */
+	old_markfmt = (nix->tm_markfmt_en >> en_shift) & mask;
+	if (old_markfmt == new_markfmt)
+		return false;
+
+	/* Need queue reconfig */
+	nix->tm_markfmt_en &= ~(mask << en_shift);
+	nix->tm_markfmt_en |= (new_markfmt << en_shift);
+
+	return true;
+}
+
+int
+nix_tm_mark_init(struct nix *nix)
+{
+	struct mbox *mbox = (&nix->dev)->mbox;
+	struct nix_mark_format_cfg_rsp *rsp;
+	struct nix_mark_format_cfg *req;
+	int rc, i, j;
+
+	/* Check for supported revisions */
+	if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
+		return 0;
+
+	/* Null mark format */
+	req = mbox_alloc_msg_nix_mark_format_cfg(mbox);
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc) {
+		plt_err("TM failed to alloc null mark format, rc=%d", rc);
+		goto exit;
+	}
+
+	nix->tm_markfmt_null = rsp->mark_format_idx;
+
+	/* Alloc vlan, dscp, ecn mark formats */
+	for (i = 0; i < ROC_NIX_TM_MARK_MAX; i++) {
+		for (j = 0; j < ROC_NIX_TM_MARK_COLOR_MAX; j++) {
+			req = mbox_alloc_msg_nix_mark_format_cfg(mbox);
+			req->offset = mark_off[i];
+
+			switch (j) {
+			case ROC_NIX_TM_MARK_COLOR_Y:
+				req->y_mask = y_mask_val[i][0];
+				req->y_val = y_mask_val[i][1];
+				break;
+			case ROC_NIX_TM_MARK_COLOR_R:
+				req->r_mask = r_mask_val[i][0];
+				req->r_val = r_mask_val[i][1];
+				break;
+			case ROC_NIX_TM_MARK_COLOR_Y_R:
+				req->y_mask = y_mask_val[i][0];
+				req->y_val = y_mask_val[i][1];
+				req->r_mask = r_mask_val[i][0];
+				req->r_val = r_mask_val[i][1];
+				break;
+			}
+
+			rc = mbox_process_msg(mbox, (void *)&rsp);
+			if (rc) {
+				plt_err("TM failed to alloc mark fmt "
+					"type %u color %u, rc=%d",
+					i, j, rc);
+				goto exit;
+			}
+
+			nix->tm_markfmt[i][j] = rsp->mark_format_idx;
+			plt_tm_dbg("Mark type: %u, Mark Color:%u, id:%u\n", i,
+				   j, nix->tm_markfmt[i][j]);
+		}
+	}
+	/* Update null mark format as default */
+	nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_VLAN_DEI, 0, 0);
+	nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV4_DSCP, 0, 0);
+	nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV4_ECN, 0, 0);
+	nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV6_DSCP, 0, 0);
+	nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV6_ECN, 0, 0);
+exit:
+	return rc;
+}
+
+int
+roc_nix_tm_mark_config(struct roc_nix *roc_nix, enum roc_nix_tm_mark type,
+		       int mark_yellow, int mark_red)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+	int rc;
+
+	if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
+		return -EINVAL;
+
+	rc = nix_tm_update_markfmt(nix, type, mark_yellow, mark_red);
+	if (!rc)
+		return 0;
+
+	if (!mark_yellow && !mark_red)
+		nix->tm_flags &= ~mark_flag[type];
+	else
+		nix->tm_flags |= mark_flag[type];
+
+	/* Update red algo for change in mark_red */
+	return nix_tm_update_red_algo(nix, !!mark_red);
+}
+
+uint64_t
+roc_nix_tm_mark_format_get(struct roc_nix *roc_nix, uint64_t *flags)
+{
+	struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+	*flags = ((nix->tm_flags & NIX_TM_MARK_EN_MASK) >> 3);
+	return nix->tm_markfmt_en;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 44adf91..d346e6f 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -256,6 +256,8 @@ INTERNAL {
 	roc_nix_tm_leaf_cnt;
 	roc_nix_tm_lvl_have_link_access;
 	roc_nix_tm_lvl_is_leaf;
+	roc_nix_tm_mark_config;
+	roc_nix_tm_mark_format_get;
 	roc_nix_tm_max_prio;
 	roc_nix_tm_node_add;
 	roc_nix_tm_node_delete;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v2 2/3] net/cnxk: event/cnxk: enable packet marking callbacks
  2022-02-25  4:59 ` [PATCH v2 " skoteshwar
@ 2022-02-25  4:59   ` skoteshwar
  2022-02-25  4:59   ` [PATCH v2 3/3] common/cnxk: check SQ node before setting bp config skoteshwar
  2022-02-25  7:48   ` [PATCH v2 1/3] common/cnxk: enable packet marking Jerin Jacob
  2 siblings, 0 replies; 8+ messages in thread
From: skoteshwar @ 2022-02-25  4:59 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Nithin Dabilpuram,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

cnxk platform supports red/yellow packet marking based on TM
configuration. This patch set hooks to enable/disable packet
marking for VLAN DEI, IP DSCP and IP ECN. Marking enabled only
in scalar mode.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/event/cnxk/cn10k_worker.h  |   3 +-
 drivers/event/cnxk/cn9k_worker.h   |   3 +-
 drivers/net/cnxk/cn10k_ethdev.c    | 138 +++++++++++++++++++++++++++++++++++++
 drivers/net/cnxk/cn10k_ethdev.h    |   2 +
 drivers/net/cnxk/cn10k_tx.h        |  45 +++++++++++-
 drivers/net/cnxk/cn10k_tx_select.c |   2 +-
 drivers/net/cnxk/cn9k_ethdev.c     | 137 ++++++++++++++++++++++++++++++++++++
 drivers/net/cnxk/cn9k_ethdev.h     |   2 +
 drivers/net/cnxk/cn9k_tx.h         |  48 +++++++++++--
 drivers/net/cnxk/cn9k_tx_select.c  |   2 +-
 drivers/net/cnxk/cnxk_ethdev.h     |  23 +++++++
 drivers/net/cnxk/cnxk_tm.c         | 125 +++++++++++++++++++++++++++++++--
 12 files changed, 513 insertions(+), 17 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 68f4d0e..fd8d8dd 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -547,7 +547,8 @@ uint16_t __rte_hot cn10k_sso_hws_ca_enq(void *port, struct rte_event ev[],
 	if (flags & NIX_TX_OFFLOAD_TSO_F)
 		cn10k_nix_xmit_prepare_tso(m, flags);
 
-	cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec);
+	cn10k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, &sec,
+			       txq->mark_flag, txq->mark_fmt);
 
 	laddr = lmt_addr;
 	/* Prepare CPT instruction and get nixtx addr if
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index c03b493..7f47b19 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -762,7 +762,8 @@ uint16_t __rte_hot cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[],
 		rte_io_wmb();
 	txq = cn9k_sso_hws_xtract_meta(m, txq_data);
 	cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
-	cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
+	cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt, txq->mark_flag,
+			      txq->mark_fmt);
 
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
 		uint64_t ol_flags = m->ol_flags;
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 9f2dc8a..15dbea2 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -110,6 +110,9 @@
 	if (conf & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
+	if (dev->tx_mark)
+		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+
 	return flags;
 }
 
@@ -169,6 +172,7 @@
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
 	struct roc_nix *nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
 	struct roc_cpt_lf *inl_lf;
 	struct cn10k_eth_txq *txq;
 	struct roc_nix_sq *sq;
@@ -206,6 +210,11 @@
 		PLT_STATIC_ASSERT(ROC_NIX_INL_SA_BASE_ALIGN == BIT_ULL(16));
 	}
 
+	/* Restore marking flag from roc */
+	mark_fmt = roc_nix_tm_mark_format_get(nix, &mark_flag);
+	txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+	txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+
 	nix_form_default_desc(dev, txq, qid);
 	txq->lso_tun_fmt = dev->lso_tun_fmt;
 	return 0;
@@ -546,6 +555,118 @@
 	return rc;
 }
 
+static int
+cn10k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+			   int mark_yellow, int mark_red,
+			   struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
+				       mark_red, error);
+
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn10k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
+static int
+cn10k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+			 int mark_yellow, int mark_red,
+			 struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
+				     error);
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn10k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
+static int
+cn10k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+			  int mark_yellow, int mark_red,
+			  struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
+				      mark_red, error);
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn10k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
 /* Update platform specific eth dev ops */
 static void
 nix_eth_dev_ops_override(void)
@@ -575,6 +696,22 @@
 	cnxk_eth_dev_ops.ip_reassembly_conf_set = cn10k_nix_reassembly_conf_set;
 }
 
+/* Update platform specific tm ops */
+static void
+nix_tm_ops_override(void)
+{
+	static int init_once;
+
+	if (init_once)
+		return;
+	init_once = 1;
+
+	/* Update platform specific ops */
+	cnxk_tm_ops.mark_vlan_dei = cn10k_nix_tm_mark_vlan_dei;
+	cnxk_tm_ops.mark_ip_ecn = cn10k_nix_tm_mark_ip_ecn;
+	cnxk_tm_ops.mark_ip_dscp = cn10k_nix_tm_mark_ip_dscp;
+}
+
 static void
 npc_flow_ops_override(void)
 {
@@ -614,6 +751,7 @@
 	}
 
 	nix_eth_dev_ops_override();
+	nix_tm_ops_override();
 	npc_flow_ops_override();
 
 	cn10k_eth_sec_ops_override();
diff --git a/drivers/net/cnxk/cn10k_ethdev.h b/drivers/net/cnxk/cn10k_ethdev.h
index 62b434e..1e49d65 100644
--- a/drivers/net/cnxk/cn10k_ethdev.h
+++ b/drivers/net/cnxk/cn10k_ethdev.h
@@ -21,6 +21,8 @@ struct cn10k_eth_txq {
 	uint16_t cpt_desc;
 	uint64_t lso_tun_fmt;
 	uint64_t ts_mem;
+	uint64_t mark_flag : 8;
+	uint64_t mark_fmt : 48;
 } __plt_cache_aligned;
 
 struct cn10k_eth_rxq {
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 695e3ed..de88a21 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -511,13 +511,16 @@
 
 static __rte_always_inline void
 cn10k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
-		       const uint64_t lso_tun_fmt, bool *sec)
+		       const uint64_t lso_tun_fmt, bool *sec, uint8_t mark_flag,
+		       uint64_t mark_fmt)
 {
+	uint8_t mark_off = 0, mark_vlan = 0, markptr = 0;
 	struct nix_send_ext_s *send_hdr_ext;
 	struct nix_send_hdr_s *send_hdr;
 	uint64_t ol_flags = 0, mask;
 	union nix_send_hdr_w1_u w1;
 	union nix_send_sg_s *sg;
+	uint16_t mark_form = 0;
 
 	send_hdr = (struct nix_send_hdr_s *)cmd;
 	if (flags & NIX_TX_NEED_EXT_HDR) {
@@ -525,7 +528,9 @@
 		sg = (union nix_send_sg_s *)(cmd + 4);
 		/* Clear previous markings */
 		send_hdr_ext->w0.lso = 0;
+		send_hdr_ext->w0.mark_en = 0;
 		send_hdr_ext->w1.u = 0;
+		ol_flags = m->ol_flags;
 	} else {
 		sg = (union nix_send_sg_s *)(cmd + 2);
 	}
@@ -621,6 +626,10 @@
 	}
 
 	if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		const uint8_t ipv6 = !!(ol_flags & RTE_MBUF_F_TX_IPV6);
+		const uint8_t ip = !!(ol_flags & (RTE_MBUF_F_TX_IPV4 |
+						  RTE_MBUF_F_TX_IPV6));
+
 		send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
 		/* HW will update ptr after vlan0 update */
 		send_hdr_ext->w1.vlan1_ins_ptr = 12;
@@ -630,6 +639,22 @@
 		/* 2B before end of l2 header */
 		send_hdr_ext->w1.vlan0_ins_ptr = 12;
 		send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
+		/* Fill for VLAN marking only when VLAN insertion enabled */
+		mark_vlan = ((mark_flag & CNXK_TM_MARK_VLAN_DEI) &
+			     (send_hdr_ext->w1.vlan1_ins_ena ||
+			      send_hdr_ext->w1.vlan0_ins_ena));
+
+		/* Mask requested flags with packet data information */
+		mark_off = mark_flag & ((ip << 2) | (ip << 1) | mark_vlan);
+		mark_off = ffs(mark_off & CNXK_TM_MARK_MASK);
+
+		mark_form = (mark_fmt >> ((mark_off - !!mark_off) << 4));
+		mark_form = (mark_form >> (ipv6 << 3)) & 0xFF;
+		markptr = m->l2_len + (mark_form >> 7) - (mark_vlan << 2);
+
+		send_hdr_ext->w0.mark_en = !!mark_off;
+		send_hdr_ext->w0.markform = mark_form & 0x7F;
+		send_hdr_ext->w0.markptr = markptr;
 	}
 
 	if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
@@ -841,6 +866,8 @@
 	uintptr_t pa, lbase = txq->lmt_base;
 	uint16_t lmt_id, burst, left, i;
 	uintptr_t c_lbase = lbase;
+	uint64_t mark_fmt = 0;
+	uint8_t mark_flag = 0;
 	rte_iova_t c_io_addr;
 	uint64_t lso_tun_fmt;
 	uint16_t c_lmt_id;
@@ -860,6 +887,11 @@
 	if (flags & NIX_TX_OFFLOAD_TSO_F)
 		lso_tun_fmt = txq->lso_tun_fmt;
 
+	if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		mark_fmt = txq->mark_fmt;
+		mark_flag = txq->mark_flag;
+	}
+
 	/* Get LMT base address and LMT ID as lcore id */
 	ROC_LMT_BASE_ID_GET(lbase, lmt_id);
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
@@ -887,7 +919,7 @@
 			cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
 
 		cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
-				       &sec);
+				       &sec, mark_flag, mark_fmt);
 
 		laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
 
@@ -967,6 +999,8 @@
 	uint16_t segdw, lmt_id, burst, left, i;
 	uint8_t lnum, c_lnum, c_loff;
 	uintptr_t c_lbase = lbase;
+	uint64_t mark_fmt = 0;
+	uint8_t mark_flag = 0;
 	uint64_t data0, data1;
 	rte_iova_t c_io_addr;
 	uint64_t lso_tun_fmt;
@@ -988,6 +1022,11 @@
 	if (flags & NIX_TX_OFFLOAD_TSO_F)
 		lso_tun_fmt = txq->lso_tun_fmt;
 
+	if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		mark_fmt = txq->mark_fmt;
+		mark_flag = txq->mark_flag;
+	}
+
 	/* Get LMT base address and LMT ID as lcore id */
 	ROC_LMT_BASE_ID_GET(lbase, lmt_id);
 	if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
@@ -1017,7 +1056,7 @@
 			cn10k_nix_xmit_prepare_tso(tx_pkts[i], flags);
 
 		cn10k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
-				       &sec);
+				       &sec, mark_flag, mark_fmt);
 
 		laddr = (uintptr_t)LMT_OFF(lbase, lnum, 0);
 
diff --git a/drivers/net/cnxk/cn10k_tx_select.c b/drivers/net/cnxk/cn10k_tx_select.c
index 9fdf014..54023c4 100644
--- a/drivers/net/cnxk/cn10k_tx_select.c
+++ b/drivers/net/cnxk/cn10k_tx_select.c
@@ -53,7 +53,7 @@
 #undef T
 	};
 
-	if (dev->scalar_ena) {
+	if (dev->scalar_ena || dev->tx_mark) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
 		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 6b049b2..ca17cbe 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -110,6 +110,9 @@
 	if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
 		flags |= NIX_TX_OFFLOAD_SECURITY_F;
 
+	if (dev->tx_mark)
+		flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+
 	return flags;
 }
 
@@ -168,6 +171,7 @@
 			const struct rte_eth_txconf *tx_conf)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	uint64_t mark_fmt, mark_flag;
 	struct roc_cpt_lf *inl_lf;
 	struct cn9k_eth_txq *txq;
 	struct roc_nix_sq *sq;
@@ -204,6 +208,10 @@
 		PLT_STATIC_ASSERT(BIT_ULL(16) == ROC_NIX_INL_SA_BASE_ALIGN);
 	}
 
+	mark_fmt = roc_nix_tm_mark_format_get(&dev->nix, &mark_flag);
+	txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+	txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+
 	nix_form_default_desc(dev, txq, qid);
 	txq->lso_tun_fmt = dev->lso_tun_fmt;
 	return 0;
@@ -490,6 +498,118 @@
 	return 0;
 }
 
+static int
+cn9k_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+			  int mark_yellow, int mark_red,
+			  struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_vlan_dei(eth_dev, mark_green, mark_yellow,
+				       mark_red, error);
+
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn9k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
+static int
+cn9k_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+			int mark_yellow, int mark_red,
+			struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_ip_ecn(eth_dev, mark_green, mark_yellow, mark_red,
+				     error);
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn9k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
+static int
+cn9k_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+			 int mark_yellow, int mark_red,
+			 struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	uint64_t mark_fmt, mark_flag;
+	int rc, i;
+
+	rc = cnxk_nix_tm_mark_ip_dscp(eth_dev, mark_green, mark_yellow,
+				      mark_red, error);
+	if (rc)
+		goto exit;
+
+	mark_fmt = roc_nix_tm_mark_format_get(roc_nix, &mark_flag);
+	if (mark_flag) {
+		dev->tx_offload_flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+		dev->tx_mark = true;
+	} else {
+		dev->tx_mark = false;
+		if (!(dev->tx_offloads & RTE_ETH_TX_OFFLOAD_VLAN_INSERT ||
+		      dev->tx_offloads & RTE_ETH_TX_OFFLOAD_QINQ_INSERT))
+			dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_VLAN_QINQ_F;
+	}
+
+	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+		struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[i];
+
+		txq->mark_flag = mark_flag & CNXK_TM_MARK_MASK;
+		txq->mark_fmt = mark_fmt & CNXK_TX_MARK_FMT_MASK;
+	}
+	cn9k_eth_set_tx_function(eth_dev);
+exit:
+	return rc;
+}
+
 /* Update platform specific eth dev ops */
 static void
 nix_eth_dev_ops_override(void)
@@ -515,6 +635,22 @@
 		cn9k_nix_timesync_read_tx_timestamp;
 }
 
+/* Update platform specific eth dev ops */
+static void
+nix_tm_ops_override(void)
+{
+	static int init_once;
+
+	if (init_once)
+		return;
+	init_once = 1;
+
+	/* Update platform specific ops */
+	cnxk_tm_ops.mark_vlan_dei = cn9k_nix_tm_mark_vlan_dei;
+	cnxk_tm_ops.mark_ip_ecn = cn9k_nix_tm_mark_ip_ecn;
+	cnxk_tm_ops.mark_ip_dscp = cn9k_nix_tm_mark_ip_dscp;
+}
+
 static void
 npc_flow_ops_override(void)
 {
@@ -554,6 +690,7 @@
 	}
 
 	nix_eth_dev_ops_override();
+	nix_tm_ops_override();
 	npc_flow_ops_override();
 
 	cn9k_eth_sec_ops_override();
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index 8ab9249..449729f 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -22,6 +22,8 @@ struct cn9k_eth_txq {
 	uint64_t sa_base;
 	uint64_t *cpt_fc;
 	uint16_t cpt_desc;
+	uint64_t mark_flag : 8;
+	uint64_t mark_fmt : 48;
 } __plt_cache_aligned;
 
 struct cn9k_eth_rxq {
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index d23e4b6..f55cd4b 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -135,13 +135,16 @@
 
 static __rte_always_inline void
 cn9k_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags,
-		      const uint64_t lso_tun_fmt)
+		      const uint64_t lso_tun_fmt, uint8_t mark_flag,
+		      uint64_t mark_fmt)
 {
+	uint8_t mark_off = 0, mark_vlan = 0, markptr = 0;
 	struct nix_send_ext_s *send_hdr_ext;
 	struct nix_send_hdr_s *send_hdr;
 	uint64_t ol_flags = 0, mask;
 	union nix_send_hdr_w1_u w1;
 	union nix_send_sg_s *sg;
+	uint16_t mark_form = 0;
 
 	send_hdr = (struct nix_send_hdr_s *)cmd;
 	if (flags & NIX_TX_NEED_EXT_HDR) {
@@ -149,7 +152,9 @@
 		sg = (union nix_send_sg_s *)(cmd + 4);
 		/* Clear previous markings */
 		send_hdr_ext->w0.lso = 0;
+		send_hdr_ext->w0.mark_en = 0;
 		send_hdr_ext->w1.u = 0;
+		ol_flags = m->ol_flags;
 	} else {
 		sg = (union nix_send_sg_s *)(cmd + 2);
 	}
@@ -245,6 +250,10 @@
 	}
 
 	if (flags & NIX_TX_NEED_EXT_HDR && flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		const uint8_t ipv6 = !!(ol_flags & RTE_MBUF_F_TX_IPV6);
+		const uint8_t ip = !!(ol_flags & (RTE_MBUF_F_TX_IPV4 |
+						  RTE_MBUF_F_TX_IPV6));
+
 		send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
 		/* HW will update ptr after vlan0 update */
 		send_hdr_ext->w1.vlan1_ins_ptr = 12;
@@ -254,6 +263,21 @@
 		/* 2B before end of l2 header */
 		send_hdr_ext->w1.vlan0_ins_ptr = 12;
 		send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
+		/* Fill for VLAN marking only when VLAN insertion enabled */
+		mark_vlan = ((mark_flag & CNXK_TM_MARK_VLAN_DEI) &
+			     (send_hdr_ext->w1.vlan1_ins_ena ||
+			      send_hdr_ext->w1.vlan0_ins_ena));
+		/* Mask requested flags with packet data information */
+		mark_off = mark_flag & ((ip << 2) | (ip << 1) | mark_vlan);
+		mark_off = ffs(mark_off & CNXK_TM_MARK_MASK);
+
+		mark_form = (mark_fmt >> ((mark_off - !!mark_off) << 4));
+		mark_form = (mark_form >> (ipv6 << 3)) & 0xFF;
+		markptr = m->l2_len + (mark_form >> 7) - (mark_vlan << 2);
+
+		send_hdr_ext->w0.mark_en = !!mark_off;
+		send_hdr_ext->w0.markform = mark_form & 0x7F;
+		send_hdr_ext->w0.markptr = markptr;
 	}
 
 	if (flags & NIX_TX_OFFLOAD_TSO_F && (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
@@ -502,8 +526,9 @@
 {
 	struct cn9k_eth_txq *txq = tx_queue;
 	const rte_iova_t io_addr = txq->io_addr;
+	uint64_t lso_tun_fmt, mark_fmt = 0;
 	void *lmt_addr = txq->lmt_addr;
-	uint64_t lso_tun_fmt;
+	uint8_t mark_flag = 0;
 	uint16_t i;
 
 	NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -518,6 +543,11 @@
 			cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
 	}
 
+	if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		mark_fmt = txq->mark_fmt;
+		mark_flag = txq->mark_flag;
+	}
+
 	/* Lets commit any changes in the packet here as no further changes
 	 * to the packet will be done unless no fast free is enabled.
 	 */
@@ -525,7 +555,8 @@
 		rte_io_wmb();
 
 	for (i = 0; i < pkts; i++) {
-		cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
+		cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+				      mark_flag, mark_fmt);
 		cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags, 4,
 					     flags);
 		cn9k_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
@@ -543,8 +574,9 @@
 {
 	struct cn9k_eth_txq *txq = tx_queue;
 	const rte_iova_t io_addr = txq->io_addr;
+	uint64_t lso_tun_fmt, mark_fmt = 0;
 	void *lmt_addr = txq->lmt_addr;
-	uint64_t lso_tun_fmt;
+	uint8_t mark_flag = 0;
 	uint16_t segdw;
 	uint64_t i;
 
@@ -560,6 +592,11 @@
 			cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
 	}
 
+	if (flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+		mark_fmt = txq->mark_fmt;
+		mark_flag = txq->mark_flag;
+	}
+
 	/* Lets commit any changes in the packet here as no further changes
 	 * to the packet will be done unless no fast free is enabled.
 	 */
@@ -567,7 +604,8 @@
 		rte_io_wmb();
 
 	for (i = 0; i < pkts; i++) {
-		cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
+		cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt,
+				      mark_flag, mark_fmt);
 		segdw = cn9k_nix_prepare_mseg(tx_pkts[i], cmd, flags);
 		cn9k_nix_xmit_prepare_tstamp(txq, cmd, tx_pkts[i]->ol_flags,
 					     segdw, flags);
diff --git a/drivers/net/cnxk/cn9k_tx_select.c b/drivers/net/cnxk/cn9k_tx_select.c
index 407ede9..62beb1b 100644
--- a/drivers/net/cnxk/cn9k_tx_select.c
+++ b/drivers/net/cnxk/cn9k_tx_select.c
@@ -49,7 +49,7 @@
 #undef T
 	};
 
-	if (dev->scalar_ena) {
+	if (dev->scalar_ena || dev->tx_mark) {
 		pick_tx_func(eth_dev, nix_eth_tx_burst);
 		if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
 			pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 445b7ab..a4d3b2f 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -18,6 +18,7 @@
 #include <rte_security_driver.h>
 #include <rte_tailq.h>
 #include <rte_time.h>
+#include <rte_tm_driver.h>
 
 #include "roc_api.h"
 
@@ -139,6 +140,15 @@
 
 #define CNXK_NIX_PFC_CHAN_COUNT 16
 
+#define CNXK_TM_MARK_VLAN_DEI BIT_ULL(0)
+#define CNXK_TM_MARK_IP_DSCP  BIT_ULL(1)
+#define CNXK_TM_MARK_IP_ECN   BIT_ULL(2)
+
+#define CNXK_TM_MARK_MASK                                                      \
+	(CNXK_TM_MARK_VLAN_DEI | CNXK_TM_MARK_IP_DSCP | CNXK_TM_MARK_IP_ECN)
+
+#define CNXK_TX_MARK_FMT_MASK (0xFFFFFFFFFFFFull)
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
@@ -350,6 +360,7 @@ struct cnxk_eth_dev {
 	uint16_t flags;
 	uint8_t ptype_disable;
 	bool scalar_ena;
+	bool tx_mark;
 	bool ptp_en;
 	bool rx_mark_update; /* Enable/Disable mark update to mbuf */
 
@@ -464,6 +475,9 @@ struct cnxk_eth_txq_sp {
 /* Common security ops */
 extern struct rte_security_ops cnxk_eth_sec_ops;
 
+/* Common tm ops */
+extern struct rte_tm_ops cnxk_tm_ops;
+
 /* Ops */
 int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
 		   struct rte_pci_device *pci_dev);
@@ -540,6 +554,15 @@ int cnxk_nix_timesync_write_time(struct rte_eth_dev *eth_dev,
 int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
 int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
 				     uint16_t queue_idx, uint16_t tx_rate);
+int cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+			      int mark_yellow, int mark_red,
+			      struct rte_tm_error *error);
+int cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+			    int mark_yellow, int mark_red,
+			    struct rte_tm_error *error);
+int cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+			     int mark_yellow, int mark_red,
+			     struct rte_tm_error *error);
 
 /* MTR */
 int cnxk_nix_mtr_ops_get(struct rte_eth_dev *dev, void *ops);
diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
index 9015a45..d45e70a 100644
--- a/drivers/net/cnxk/cnxk_tm.c
+++ b/drivers/net/cnxk/cnxk_tm.c
@@ -88,10 +88,16 @@
 			  RTE_TM_STATS_N_PKTS_RED_DROPPED |
 			  RTE_TM_STATS_N_BYTES_RED_DROPPED;
 
-	for (i = 0; i < RTE_COLORS; i++) {
-		cap->mark_vlan_dei_supported[i] = false;
-		cap->mark_ip_ecn_tcp_supported[i] = false;
-		cap->mark_ip_dscp_supported[i] = false;
+	cap->mark_vlan_dei_supported[RTE_COLOR_GREEN] = false;
+	cap->mark_ip_ecn_tcp_supported[RTE_COLOR_GREEN] = false;
+	cap->mark_ip_ecn_sctp_supported[RTE_COLOR_GREEN] = false;
+	cap->mark_ip_dscp_supported[RTE_COLOR_GREEN] = false;
+
+	for (i = RTE_COLOR_YELLOW; i < RTE_COLORS; i++) {
+		cap->mark_vlan_dei_supported[i] = true;
+		cap->mark_ip_ecn_tcp_supported[i] = true;
+		cap->mark_ip_ecn_sctp_supported[i] = true;
+		cap->mark_ip_dscp_supported[i] = true;
 	}
 
 	return 0;
@@ -599,7 +605,112 @@
 	return rc;
 }
 
-const struct rte_tm_ops cnxk_tm_ops = {
+int
+cnxk_nix_tm_mark_vlan_dei(struct rte_eth_dev *eth_dev, int mark_green,
+			  int mark_yellow, int mark_red,
+			  struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	int rc;
+
+	if (mark_green) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "Green VLAN marking not supported";
+		return -EINVAL;
+	}
+
+	if (eth_dev->data->dev_started) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "VLAN DEI mark for running ports not "
+				 "supported";
+		return -EBUSY;
+	}
+
+	rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_VLAN_DEI,
+				    mark_yellow, mark_red);
+	if (rc) {
+		error->type = roc_nix_tm_err_to_rte_err(rc);
+		error->message = roc_error_msg_get(rc);
+	}
+	return rc;
+}
+
+int
+cnxk_nix_tm_mark_ip_ecn(struct rte_eth_dev *eth_dev, int mark_green,
+			int mark_yellow, int mark_red,
+			struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	int rc;
+
+	if (mark_green) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "Green IP ECN marking not supported";
+		return -EINVAL;
+	}
+
+	if (eth_dev->data->dev_started) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "IP ECN mark for running ports not "
+				 "supported";
+		return -EBUSY;
+	}
+
+	rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_ECN,
+				    mark_yellow, mark_red);
+	if (rc < 0)
+		goto exit;
+
+	rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_ECN,
+				    mark_yellow, mark_red);
+exit:
+	if (rc < 0) {
+		error->type = roc_nix_tm_err_to_rte_err(rc);
+		error->message = roc_error_msg_get(rc);
+	}
+	return rc;
+}
+
+int
+cnxk_nix_tm_mark_ip_dscp(struct rte_eth_dev *eth_dev, int mark_green,
+			 int mark_yellow, int mark_red,
+			 struct rte_tm_error *error)
+{
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *roc_nix = &dev->nix;
+	int rc;
+
+	if (mark_green) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "Green IP DSCP marking not supported";
+		return -EINVAL;
+	}
+
+	if (eth_dev->data->dev_started) {
+		error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+		error->message = "IP DSCP mark for running ports not "
+				 "supported";
+		return -EBUSY;
+	}
+
+	rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV4_DSCP,
+				    mark_yellow, mark_red);
+	if (rc < 0)
+		goto exit;
+
+	rc = roc_nix_tm_mark_config(roc_nix, ROC_NIX_TM_MARK_IPV6_DSCP,
+				    mark_yellow, mark_red);
+exit:
+	if (rc < 0) {
+		error->type = roc_nix_tm_err_to_rte_err(rc);
+		error->message = roc_error_msg_get(rc);
+	}
+	return rc;
+}
+
+struct rte_tm_ops cnxk_tm_ops = {
 	.node_type_get = cnxk_nix_tm_node_type_get,
 	.capabilities_get = cnxk_nix_tm_capa_get,
 	.level_capabilities_get = cnxk_nix_tm_level_capa_get,
@@ -617,6 +728,10 @@
 	.node_shaper_update = cnxk_nix_tm_node_shaper_update,
 	.node_parent_update = cnxk_nix_tm_node_parent_update,
 	.node_stats_read = cnxk_nix_tm_node_stats_read,
+
+	.mark_vlan_dei = cnxk_nix_tm_mark_vlan_dei,
+	.mark_ip_ecn = cnxk_nix_tm_mark_ip_ecn,
+	.mark_ip_dscp = cnxk_nix_tm_mark_ip_dscp,
 };
 
 int
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v2 3/3] common/cnxk: check SQ node before setting bp config
  2022-02-25  4:59 ` [PATCH v2 " skoteshwar
  2022-02-25  4:59   ` [PATCH v2 2/3] net/cnxk: event/cnxk: enable packet marking callbacks skoteshwar
@ 2022-02-25  4:59   ` skoteshwar
  2022-02-25  7:48   ` [PATCH v2 1/3] common/cnxk: enable packet marking Jerin Jacob
  2 siblings, 0 replies; 8+ messages in thread
From: skoteshwar @ 2022-02-25  4:59 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev

From: Satha Rao <skoteshwar@marvell.com>

Validate sq_node and parent before accessing their fields.
SQ was created without any associated TM node, this is valid negative
case, so return success while stopping TM without SQ node.

Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
 drivers/common/cnxk/roc_nix_tm.c     | 8 ++++++--
 drivers/common/cnxk/roc_nix_tm_ops.c | 2 +-
 2 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 5b23ecd..7a17780 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -325,14 +325,17 @@
 	struct mbox *mbox = (&nix->dev)->mbox;
 	struct nix_txschq_config *req = NULL;
 	struct nix_tm_node_list *list;
+	uint16_t link = nix->tx_link;
 	struct nix_tm_node *sq_node;
 	struct nix_tm_node *parent;
 	struct nix_tm_node *node;
 	uint8_t k = 0;
-	uint16_t link;
 	int rc = 0;
 
 	sq_node = nix_tm_node_search(nix, sq, nix->tm_tree);
+	if (!sq_node)
+		return -ENOENT;
+
 	parent = sq_node->parent;
 	while (parent) {
 		if (parent->lvl == ROC_TM_LVL_SCH2)
@@ -340,9 +343,10 @@
 
 		parent = parent->parent;
 	}
+	if (!parent)
+		return -ENOENT;
 
 	list = nix_tm_node_list(nix, tree);
-	link = nix->tx_link;
 
 	if (parent->rel_chan != NIX_TM_CHAN_INVALID && parent->rel_chan != tc) {
 		rc = -EINVAL;
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 5a25b3e..1d9a02b 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -474,7 +474,7 @@
 			continue;
 
 		rc = nix_tm_bp_config_set(roc_nix, sq->qid, 0, false);
-		if (rc) {
+		if (rc && rc != -ENOENT) {
 			plt_err("Failed to disable backpressure, rc=%d", rc);
 			goto cleanup;
 		}
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 1/3] common/cnxk: enable packet marking
  2022-02-25  4:59 ` [PATCH v2 " skoteshwar
  2022-02-25  4:59   ` [PATCH v2 2/3] net/cnxk: event/cnxk: enable packet marking callbacks skoteshwar
  2022-02-25  4:59   ` [PATCH v2 3/3] common/cnxk: check SQ node before setting bp config skoteshwar
@ 2022-02-25  7:48   ` Jerin Jacob
  2 siblings, 0 replies; 8+ messages in thread
From: Jerin Jacob @ 2022-02-25  7:48 UTC (permalink / raw)
  To: Satha Koteswara Rao Kottidi
  Cc: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Ray Kinsella,
	dpdk-dev

On Fri, Feb 25, 2022 at 10:30 AM <skoteshwar@marvell.com> wrote:
>
> From: Satha Rao <skoteshwar@marvell.com>
>
> cnxk platforms supports packet marking when TM enabled with
> valid shaper rates. VLAN DEI, IP ECN, or IP DSCP inside
> packet will be updated based on mark flags selected.
>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>
> ---
>
> v2:
> - rebased to master, fixed conflicts

# Updated release notes for cnxk driver section with " Added support
for packet marking in traffic manager."

Series Acked-by: Jerin Jacob <jerinj@marvell.com>
Series applied to dpdk-next-net-mrvl/for-next-net. Thanks.


>
>  drivers/common/cnxk/meson.build       |   1 +
>  drivers/common/cnxk/roc_nix.h         |  21 +++
>  drivers/common/cnxk/roc_nix_priv.h    |  23 ++-
>  drivers/common/cnxk/roc_nix_tm.c      |   4 +
>  drivers/common/cnxk/roc_nix_tm_mark.c | 295 ++++++++++++++++++++++++++++++++++
>  drivers/common/cnxk/version.map       |   2 +
>  6 files changed, 343 insertions(+), 3 deletions(-)
>  create mode 100644 drivers/common/cnxk/roc_nix_tm_mark.c
>
> diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
> index 2834846..6f80827 100644
> --- a/drivers/common/cnxk/meson.build
> +++ b/drivers/common/cnxk/meson.build
> @@ -44,6 +44,7 @@ sources = files(
>          'roc_nix_rss.c',
>          'roc_nix_stats.c',
>          'roc_nix_tm.c',
> +        'roc_nix_tm_mark.c',
>          'roc_nix_tm_ops.c',
>          'roc_nix_tm_utils.c',
>          'roc_nix_vlan.c',
> diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
> index 10e8375..5e6eb58 100644
> --- a/drivers/common/cnxk/roc_nix.h
> +++ b/drivers/common/cnxk/roc_nix.h
> @@ -570,6 +570,22 @@ struct roc_nix_tm_node_stats {
>         uint64_t stats[ROC_NIX_TM_NODE_STATS_MAX];
>  };
>
> +enum roc_nix_tm_mark {
> +       ROC_NIX_TM_MARK_VLAN_DEI,
> +       ROC_NIX_TM_MARK_IPV4_DSCP,
> +       ROC_NIX_TM_MARK_IPV4_ECN,
> +       ROC_NIX_TM_MARK_IPV6_DSCP,
> +       ROC_NIX_TM_MARK_IPV6_ECN,
> +       ROC_NIX_TM_MARK_MAX
> +};
> +
> +enum roc_nix_tm_mark_color {
> +       ROC_NIX_TM_MARK_COLOR_Y,
> +       ROC_NIX_TM_MARK_COLOR_R,
> +       ROC_NIX_TM_MARK_COLOR_Y_R,
> +       ROC_NIX_TM_MARK_COLOR_MAX
> +};
> +
>  int __roc_api roc_nix_tm_node_add(struct roc_nix *roc_nix,
>                                   struct roc_nix_tm_node *roc_node);
>  int __roc_api roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id,
> @@ -646,6 +662,11 @@ int __roc_api roc_nix_tm_node_name_get(struct roc_nix *roc_nix,
>  int __roc_api roc_nix_tm_pfc_prepare_tree(struct roc_nix *roc_nix);
>  bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
>  int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
> +int __roc_api roc_nix_tm_mark_config(struct roc_nix *roc_nix,
> +                                    enum roc_nix_tm_mark type, int mark_yellow,
> +                                    int mark_red);
> +uint64_t __roc_api roc_nix_tm_mark_format_get(struct roc_nix *roc_nix,
> +                                             uint64_t *flags);
>
>  /* Ingress Policer API */
>  int __roc_api roc_nix_bpf_timeunit_get(struct roc_nix *roc_nix,
> diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
> index 5d45f75..9b9ffae 100644
> --- a/drivers/common/cnxk/roc_nix_priv.h
> +++ b/drivers/common/cnxk/roc_nix_priv.h
> @@ -36,9 +36,22 @@ struct nix_qint {
>  #define NIX_TM_CHAN_INVALID UINT16_MAX
>
>  /* TM flags */
> -#define NIX_TM_HIERARCHY_ENA BIT_ULL(0)
> -#define NIX_TM_TL1_NO_SP     BIT_ULL(1)
> -#define NIX_TM_TL1_ACCESS    BIT_ULL(2)
> +#define NIX_TM_HIERARCHY_ENA   BIT_ULL(0)
> +#define NIX_TM_TL1_NO_SP       BIT_ULL(1)
> +#define NIX_TM_TL1_ACCESS      BIT_ULL(2)
> +#define NIX_TM_MARK_VLAN_DEI_EN BIT_ULL(3)
> +#define NIX_TM_MARK_IP_DSCP_EN BIT_ULL(4)
> +#define NIX_TM_MARK_IP_ECN_EN  BIT_ULL(5)
> +
> +#define NIX_TM_MARK_EN_MASK                                                    \
> +       (NIX_TM_MARK_IP_DSCP_EN | NIX_TM_MARK_IP_ECN_EN |                      \
> +        NIX_TM_MARK_VLAN_DEI_EN)
> +
> +#define NIX_TM_MARK_VLAN_DEI_SHIFT  0 /* Leave 16b for VLAN for FP logic */
> +#define NIX_TM_MARK_IPV4_DSCP_SHIFT 16
> +#define NIX_TM_MARK_IPV6_DSCP_SHIFT 24
> +#define NIX_TM_MARK_IPV4_ECN_SHIFT  32
> +#define NIX_TM_MARK_IPV6_ECN_SHIFT  40
>
>  struct nix_tm_tb {
>         /** Token bucket rate (bytes per second) */
> @@ -170,6 +183,9 @@ struct nix {
>         uint16_t tm_link_cfg_lvl;
>         uint16_t contig_rsvd[NIX_TXSCH_LVL_CNT];
>         uint16_t discontig_rsvd[NIX_TXSCH_LVL_CNT];
> +       uint64_t tm_markfmt_en;
> +       uint8_t tm_markfmt_null;
> +       uint8_t tm_markfmt[ROC_NIX_TM_MARK_MAX][ROC_NIX_TM_MARK_COLOR_MAX];
>
>         /* Ipsec info */
>         uint16_t cpt_msixoff[MAX_RVU_BLKLF_CNT];
> @@ -386,6 +402,7 @@ int nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg,
>  int nix_tm_bp_config_set(struct roc_nix *roc_nix, uint16_t sq, uint16_t tc,
>                          bool enable);
>  void nix_rq_vwqe_flush(struct roc_nix_rq *rq, uint16_t vwqe_interval);
> +int nix_tm_mark_init(struct nix *nix);
>
>  /*
>   * TM priv utils.
> diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
> index ecf3edf..5b23ecd 100644
> --- a/drivers/common/cnxk/roc_nix_tm.c
> +++ b/drivers/common/cnxk/roc_nix_tm.c
> @@ -1692,6 +1692,10 @@
>                 bmp_mem = PLT_PTR_ADD(bmp_mem, bmp_sz);
>         }
>
> +       rc = nix_tm_mark_init(nix);
> +       if (rc)
> +               goto exit;
> +
>         /* Disable TL1 Static Priority when VF's are enabled
>          * as otherwise VF's TL2 reallocation will be needed
>          * runtime to support a specific topology of PF.
> diff --git a/drivers/common/cnxk/roc_nix_tm_mark.c b/drivers/common/cnxk/roc_nix_tm_mark.c
> new file mode 100644
> index 0000000..64cf679
> --- /dev/null
> +++ b/drivers/common/cnxk/roc_nix_tm_mark.c
> @@ -0,0 +1,295 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2022 Marvell.
> + */
> +
> +#include "roc_api.h"
> +#include "roc_priv.h"
> +
> +static const uint8_t y_mask_val[ROC_NIX_TM_MARK_MAX][2] = {
> +       [ROC_NIX_TM_MARK_VLAN_DEI] = {0x0, 0x8},
> +       [ROC_NIX_TM_MARK_IPV4_DSCP] = {0x1, 0x2},
> +       [ROC_NIX_TM_MARK_IPV4_ECN] = {0x0, 0xc},
> +       [ROC_NIX_TM_MARK_IPV6_DSCP] = {0x1, 0x2},
> +       [ROC_NIX_TM_MARK_IPV6_ECN] = {0x0, 0x3},
> +};
> +
> +static const uint8_t r_mask_val[ROC_NIX_TM_MARK_MAX][2] = {
> +       [ROC_NIX_TM_MARK_VLAN_DEI] = {0x0, 0x8},
> +       [ROC_NIX_TM_MARK_IPV4_DSCP] = {0x0, 0x3},
> +       [ROC_NIX_TM_MARK_IPV4_ECN] = {0x0, 0xc},
> +       [ROC_NIX_TM_MARK_IPV6_DSCP] = {0x0, 0x3},
> +       [ROC_NIX_TM_MARK_IPV6_ECN] = {0x0, 0x3},
> +};
> +
> +static const uint8_t mark_off[ROC_NIX_TM_MARK_MAX] = {
> +       [ROC_NIX_TM_MARK_VLAN_DEI] = 0x3,  /* Byte 14 Bit[4:1] */
> +       [ROC_NIX_TM_MARK_IPV4_DSCP] = 0x1, /* Byte 1 Bit[6:3] */
> +       [ROC_NIX_TM_MARK_IPV4_ECN] = 0x6, /* Byte 1 Bit[1:0], Byte 2 Bit[7:6] */
> +       [ROC_NIX_TM_MARK_IPV6_DSCP] = 0x5, /* Byte 0 Bit[2:0], Byte 1 Bit[7] */
> +       [ROC_NIX_TM_MARK_IPV6_ECN] = 0x0,  /* Byte 1 Bit[7:4] */
> +};
> +
> +static const uint64_t mark_flag[ROC_NIX_TM_MARK_MAX] = {
> +       [ROC_NIX_TM_MARK_VLAN_DEI] = NIX_TM_MARK_VLAN_DEI_EN,
> +       [ROC_NIX_TM_MARK_IPV4_DSCP] = NIX_TM_MARK_IP_DSCP_EN,
> +       [ROC_NIX_TM_MARK_IPV4_ECN] = NIX_TM_MARK_IP_ECN_EN,
> +       [ROC_NIX_TM_MARK_IPV6_DSCP] = NIX_TM_MARK_IP_DSCP_EN,
> +       [ROC_NIX_TM_MARK_IPV6_ECN] = NIX_TM_MARK_IP_ECN_EN,
> +};
> +
> +static uint8_t
> +prepare_tm_shaper_red_algo(struct nix_tm_node *tm_node, volatile uint64_t *reg,
> +                          volatile uint64_t *regval,
> +                          volatile uint64_t *regval_mask)
> +{
> +       uint32_t schq = tm_node->hw_id;
> +       uint8_t k = 0;
> +
> +       plt_tm_dbg("Shaper read alg node %s(%u) lvl %u id %u, red_alg %x (%p)",
> +                  nix_tm_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
> +                  tm_node->id, tm_node->red_algo, tm_node);
> +
> +       /* Configure just RED algo */
> +       regval[k] = ((uint64_t)tm_node->red_algo << 9);
> +       regval_mask[k] = ~(BIT_ULL(10) | BIT_ULL(9));
> +
> +       switch (tm_node->hw_lvl) {
> +       case NIX_TXSCH_LVL_SMQ:
> +               reg[k] = NIX_AF_MDQX_SHAPE(schq);
> +               k++;
> +               break;
> +       case NIX_TXSCH_LVL_TL4:
> +               reg[k] = NIX_AF_TL4X_SHAPE(schq);
> +               k++;
> +               break;
> +       case NIX_TXSCH_LVL_TL3:
> +               reg[k] = NIX_AF_TL3X_SHAPE(schq);
> +               k++;
> +               break;
> +       case NIX_TXSCH_LVL_TL2:
> +               reg[k] = NIX_AF_TL2X_SHAPE(schq);
> +               k++;
> +               break;
> +       default:
> +               break;
> +       }
> +
> +       return k;
> +}
> +
> +/* Only called while device is stopped */
> +static int
> +nix_tm_update_red_algo(struct nix *nix, bool red_send)
> +{
> +       struct mbox *mbox = (&nix->dev)->mbox;
> +       struct nix_txschq_config *req;
> +       struct nix_tm_node_list *list;
> +       struct nix_tm_node *tm_node;
> +       uint8_t k;
> +       int rc;
> +
> +       list = nix_tm_node_list(nix, nix->tm_tree);
> +       TAILQ_FOREACH(tm_node, list, node) {
> +               /* Skip leaf nodes */
> +               if (nix_tm_is_leaf(nix, tm_node->lvl))
> +                       continue;
> +
> +               if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1)
> +                       continue;
> +
> +               /* Skip if no update of red_algo is needed */
> +               if ((red_send && (tm_node->red_algo == NIX_REDALG_SEND)) ||
> +                   (!red_send && (tm_node->red_algo != NIX_REDALG_SEND)))
> +                       continue;
> +
> +               /* Update Red algo */
> +               if (red_send)
> +                       tm_node->red_algo = NIX_REDALG_SEND;
> +               else
> +                       tm_node->red_algo = NIX_REDALG_STD;
> +
> +               /* Update txschq config  */
> +               req = mbox_alloc_msg_nix_txschq_cfg(mbox);
> +               req->lvl = tm_node->hw_lvl;
> +               k = prepare_tm_shaper_red_algo(tm_node, req->reg, req->regval,
> +                                              req->regval_mask);
> +               req->num_regs = k;
> +
> +               rc = mbox_process(mbox);
> +               if (rc)
> +                       return rc;
> +       }
> +       return 0;
> +}
> +
> +/* Return's true if queue reconfig is needed */
> +static bool
> +nix_tm_update_markfmt(struct nix *nix, enum roc_nix_tm_mark type,
> +                     int mark_yellow, int mark_red)
> +{
> +       uint64_t new_markfmt, old_markfmt;
> +       uint8_t *tm_markfmt;
> +       uint8_t en_shift;
> +       uint64_t mask;
> +
> +       if (type >= ROC_NIX_TM_MARK_MAX)
> +               return false;
> +
> +       /* Pre-allocated mark formats for type:color combinations */
> +       tm_markfmt = nix->tm_markfmt[type];
> +
> +       if (!mark_yellow && !mark_red) {
> +               /* Null format to disable */
> +               new_markfmt = nix->tm_markfmt_null;
> +       } else {
> +               /* Marking enabled with combination of yellow and red */
> +               if (mark_yellow && mark_red)
> +                       new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_Y_R];
> +               else if (mark_yellow)
> +                       new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_Y];
> +               else
> +                       new_markfmt = tm_markfmt[ROC_NIX_TM_MARK_COLOR_R];
> +       }
> +
> +       mask = 0xFFull;
> +       /* Format of fast path markfmt
> +        * ipv6_ecn[8]:ipv4_ecn[8]:ipv6_dscp[8]:ipv4_dscp[8]:vlan_dei[16]
> +        * fmt[7] = ptr offset for IPv4/IPv6 on l2_len.
> +        * fmt[6:0] = markfmt idx.
> +        */
> +       switch (type) {
> +       case ROC_NIX_TM_MARK_VLAN_DEI:
> +               en_shift = NIX_TM_MARK_VLAN_DEI_SHIFT;
> +               mask = 0xFFFFull;
> +               new_markfmt |= new_markfmt << 8;
> +               break;
> +       case ROC_NIX_TM_MARK_IPV4_DSCP:
> +               new_markfmt |= BIT_ULL(7);
> +               en_shift = NIX_TM_MARK_IPV4_DSCP_SHIFT;
> +               break;
> +       case ROC_NIX_TM_MARK_IPV4_ECN:
> +               new_markfmt |= BIT_ULL(7);
> +               en_shift = NIX_TM_MARK_IPV4_ECN_SHIFT;
> +               break;
> +       case ROC_NIX_TM_MARK_IPV6_DSCP:
> +               en_shift = NIX_TM_MARK_IPV6_DSCP_SHIFT;
> +               break;
> +       case ROC_NIX_TM_MARK_IPV6_ECN:
> +               new_markfmt |= BIT_ULL(7);
> +               en_shift = NIX_TM_MARK_IPV6_ECN_SHIFT;
> +               break;
> +       default:
> +               return false;
> +       }
> +
> +       /* Skip if same as old config */
> +       old_markfmt = (nix->tm_markfmt_en >> en_shift) & mask;
> +       if (old_markfmt == new_markfmt)
> +               return false;
> +
> +       /* Need queue reconfig */
> +       nix->tm_markfmt_en &= ~(mask << en_shift);
> +       nix->tm_markfmt_en |= (new_markfmt << en_shift);
> +
> +       return true;
> +}
> +
> +int
> +nix_tm_mark_init(struct nix *nix)
> +{
> +       struct mbox *mbox = (&nix->dev)->mbox;
> +       struct nix_mark_format_cfg_rsp *rsp;
> +       struct nix_mark_format_cfg *req;
> +       int rc, i, j;
> +
> +       /* Check for supported revisions */
> +       if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
> +               return 0;
> +
> +       /* Null mark format */
> +       req = mbox_alloc_msg_nix_mark_format_cfg(mbox);
> +       rc = mbox_process_msg(mbox, (void *)&rsp);
> +       if (rc) {
> +               plt_err("TM failed to alloc null mark format, rc=%d", rc);
> +               goto exit;
> +       }
> +
> +       nix->tm_markfmt_null = rsp->mark_format_idx;
> +
> +       /* Alloc vlan, dscp, ecn mark formats */
> +       for (i = 0; i < ROC_NIX_TM_MARK_MAX; i++) {
> +               for (j = 0; j < ROC_NIX_TM_MARK_COLOR_MAX; j++) {
> +                       req = mbox_alloc_msg_nix_mark_format_cfg(mbox);
> +                       req->offset = mark_off[i];
> +
> +                       switch (j) {
> +                       case ROC_NIX_TM_MARK_COLOR_Y:
> +                               req->y_mask = y_mask_val[i][0];
> +                               req->y_val = y_mask_val[i][1];
> +                               break;
> +                       case ROC_NIX_TM_MARK_COLOR_R:
> +                               req->r_mask = r_mask_val[i][0];
> +                               req->r_val = r_mask_val[i][1];
> +                               break;
> +                       case ROC_NIX_TM_MARK_COLOR_Y_R:
> +                               req->y_mask = y_mask_val[i][0];
> +                               req->y_val = y_mask_val[i][1];
> +                               req->r_mask = r_mask_val[i][0];
> +                               req->r_val = r_mask_val[i][1];
> +                               break;
> +                       }
> +
> +                       rc = mbox_process_msg(mbox, (void *)&rsp);
> +                       if (rc) {
> +                               plt_err("TM failed to alloc mark fmt "
> +                                       "type %u color %u, rc=%d",
> +                                       i, j, rc);
> +                               goto exit;
> +                       }
> +
> +                       nix->tm_markfmt[i][j] = rsp->mark_format_idx;
> +                       plt_tm_dbg("Mark type: %u, Mark Color:%u, id:%u\n", i,
> +                                  j, nix->tm_markfmt[i][j]);
> +               }
> +       }
> +       /* Update null mark format as default */
> +       nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_VLAN_DEI, 0, 0);
> +       nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV4_DSCP, 0, 0);
> +       nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV4_ECN, 0, 0);
> +       nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV6_DSCP, 0, 0);
> +       nix_tm_update_markfmt(nix, ROC_NIX_TM_MARK_IPV6_ECN, 0, 0);
> +exit:
> +       return rc;
> +}
> +
> +int
> +roc_nix_tm_mark_config(struct roc_nix *roc_nix, enum roc_nix_tm_mark type,
> +                      int mark_yellow, int mark_red)
> +{
> +       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +       int rc;
> +
> +       if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
> +               return -EINVAL;
> +
> +       rc = nix_tm_update_markfmt(nix, type, mark_yellow, mark_red);
> +       if (!rc)
> +               return 0;
> +
> +       if (!mark_yellow && !mark_red)
> +               nix->tm_flags &= ~mark_flag[type];
> +       else
> +               nix->tm_flags |= mark_flag[type];
> +
> +       /* Update red algo for change in mark_red */
> +       return nix_tm_update_red_algo(nix, !!mark_red);
> +}
> +
> +uint64_t
> +roc_nix_tm_mark_format_get(struct roc_nix *roc_nix, uint64_t *flags)
> +{
> +       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> +
> +       *flags = ((nix->tm_flags & NIX_TM_MARK_EN_MASK) >> 3);
> +       return nix->tm_markfmt_en;
> +}
> diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
> index 44adf91..d346e6f 100644
> --- a/drivers/common/cnxk/version.map
> +++ b/drivers/common/cnxk/version.map
> @@ -256,6 +256,8 @@ INTERNAL {
>         roc_nix_tm_leaf_cnt;
>         roc_nix_tm_lvl_have_link_access;
>         roc_nix_tm_lvl_is_leaf;
> +       roc_nix_tm_mark_config;
> +       roc_nix_tm_mark_format_get;
>         roc_nix_tm_max_prio;
>         roc_nix_tm_node_add;
>         roc_nix_tm_node_delete;
> --
> 1.8.3.1
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2022-02-25  7:48 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-02-24  9:57 [PATCH 1/3] common/cnxk: enable packet marking skoteshwar
2022-02-24  9:57 ` [PATCH 2/3] net/cnxk: event/cnxk: enable packet marking callbacks skoteshwar
2022-02-24  9:57 ` [PATCH 3/3] common/cnxk: check SQ node before setting bp config skoteshwar
2022-02-24 19:36 ` [PATCH 1/3] common/cnxk: enable packet marking Jerin Jacob
2022-02-25  4:59 ` [PATCH v2 " skoteshwar
2022-02-25  4:59   ` [PATCH v2 2/3] net/cnxk: event/cnxk: enable packet marking callbacks skoteshwar
2022-02-25  4:59   ` [PATCH v2 3/3] common/cnxk: check SQ node before setting bp config skoteshwar
2022-02-25  7:48   ` [PATCH v2 1/3] common/cnxk: enable packet marking Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).