DPDK patches and discussions
 help / color / mirror / Atom feed
From: Serhii Iliushyk <sil-plv@napatech.com>
To: dev@dpdk.org
Cc: mko-plv@napatech.com, sil-plv@napatech.com, ckm@napatech.com,
	andrew.rybchenko@oktetlabs.ru, ferruh.yigit@amd.com,
	Danylo Vodopianov <dvo-plv@napatech.com>
Subject: [PATCH v1 32/73] net/ntnic: add TPE module
Date: Mon, 21 Oct 2024 23:04:34 +0200	[thread overview]
Message-ID: <20241021210527.2075431-33-sil-plv@napatech.com> (raw)
In-Reply-To: <20241021210527.2075431-1-sil-plv@napatech.com>

From: Danylo Vodopianov <dvo-plv@napatech.com>

The TX Packet Editor is a software abstraction module,
that keeps track of the handful of FPGA modules
that are used to edit packets in the TX pipeline.

Signed-off-by: Danylo Vodopianov <dvo-plv@napatech.com>
---
 drivers/net/ntnic/include/hw_mod_backend.h    |  16 +
 .../ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c   | 757 ++++++++++++++++++
 .../profile_inline/flow_api_hw_db_inline.c    | 373 +++++++++
 .../profile_inline/flow_api_hw_db_inline.h    |  70 ++
 .../profile_inline/flow_api_profile_inline.c  | 127 ++-
 5 files changed, 1342 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ntnic/include/hw_mod_backend.h b/drivers/net/ntnic/include/hw_mod_backend.h
index cee148807a..e16dcd478f 100644
--- a/drivers/net/ntnic/include/hw_mod_backend.h
+++ b/drivers/net/ntnic/include/hw_mod_backend.h
@@ -889,24 +889,40 @@ void hw_mod_tpe_free(struct flow_api_backend_s *be);
 int hw_mod_tpe_reset(struct flow_api_backend_s *be);
 
 int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value);
 
 int hw_mod_tpe_rpp_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
 
 int hw_mod_tpe_ifr_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
 
 int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value);
 
 int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value);
 
 int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value);
 
 int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t *value);
 
 int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value);
 
 int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value);
 
 int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int count);
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value);
 
 enum debug_mode_e {
 	FLOW_BACKEND_DEBUG_MODE_NONE = 0x0000,
diff --git a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
index 0d73b795d5..ba8f2d0dbb 100644
--- a/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
+++ b/drivers/net/ntnic/nthw/flow_api/hw_mod/hw_mod_tpe.c
@@ -169,6 +169,82 @@ int hw_mod_tpe_rpp_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
 	return be->iface->tpe_rpp_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
 }
 
+static int hw_mod_tpe_rpp_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+	uint32_t index, uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_rcp_categories) {
+		INDEX_TOO_LARGE_LOG;
+		return INDEX_TOO_LARGE;
+	}
+
+	switch (_VER_) {
+	case 3:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			memset(&be->tpe.v3.rpp_rcp[index], (uint8_t)*value,
+				sizeof(struct tpe_v1_rpp_v0_rcp_s));
+			break;
+
+		case HW_TPE_FIND:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rcp_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			FIND_EQUAL_INDEX(be->tpe.v3.rpp_rcp, struct tpe_v1_rpp_v0_rcp_s, index,
+				*value, be->tpe.nb_rcp_categories);
+			break;
+
+		case HW_TPE_COMPARE:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rcp_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			DO_COMPARE_INDEXS(be->tpe.v3.rpp_rcp, struct tpe_v1_rpp_v0_rcp_s, index,
+				*value);
+			break;
+
+		case HW_TPE_RPP_RCP_EXP:
+			GET_SET(be->tpe.v3.rpp_rcp[index].exp, value);
+			break;
+
+		default:
+			UNSUP_FIELD_LOG;
+			return UNSUP_FIELD;
+		}
+
+		break;
+
+	default:
+		UNSUP_VER_LOG;
+		return UNSUP_VER;
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpp_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value)
+{
+	return hw_mod_tpe_rpp_rcp_mod(be, field, index, &value, 0);
+}
+
 /*
  * IFR_RCP
  */
@@ -203,6 +279,90 @@ int hw_mod_tpe_ins_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
 	return be->iface->tpe_ins_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
 }
 
+static int hw_mod_tpe_ins_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+	uint32_t index, uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_rcp_categories) {
+		INDEX_TOO_LARGE_LOG;
+		return INDEX_TOO_LARGE;
+	}
+
+	switch (_VER_) {
+	case 3:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			memset(&be->tpe.v3.ins_rcp[index], (uint8_t)*value,
+				sizeof(struct tpe_v1_ins_v1_rcp_s));
+			break;
+
+		case HW_TPE_FIND:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rcp_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			FIND_EQUAL_INDEX(be->tpe.v3.ins_rcp, struct tpe_v1_ins_v1_rcp_s, index,
+				*value, be->tpe.nb_rcp_categories);
+			break;
+
+		case HW_TPE_COMPARE:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rcp_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			DO_COMPARE_INDEXS(be->tpe.v3.ins_rcp, struct tpe_v1_ins_v1_rcp_s, index,
+				*value);
+			break;
+
+		case HW_TPE_INS_RCP_DYN:
+			GET_SET(be->tpe.v3.ins_rcp[index].dyn, value);
+			break;
+
+		case HW_TPE_INS_RCP_OFS:
+			GET_SET(be->tpe.v3.ins_rcp[index].ofs, value);
+			break;
+
+		case HW_TPE_INS_RCP_LEN:
+			GET_SET(be->tpe.v3.ins_rcp[index].len, value);
+			break;
+
+		default:
+			UNSUP_FIELD_LOG;
+			return UNSUP_FIELD;
+		}
+
+		break;
+
+	default:
+		UNSUP_VER_LOG;
+		return UNSUP_VER;
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_ins_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value)
+{
+	return hw_mod_tpe_ins_rcp_mod(be, field, index, &value, 0);
+}
+
 /*
  * RPL_RCP
  */
@@ -220,6 +380,102 @@ int hw_mod_tpe_rpl_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
 	return be->iface->tpe_rpl_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
 }
 
+static int hw_mod_tpe_rpl_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+	uint32_t index, uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_rcp_categories) {
+		INDEX_TOO_LARGE_LOG;
+		return INDEX_TOO_LARGE;
+	}
+
+	switch (_VER_) {
+	case 3:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			memset(&be->tpe.v3.rpl_rcp[index], (uint8_t)*value,
+				sizeof(struct tpe_v3_rpl_v4_rcp_s));
+			break;
+
+		case HW_TPE_FIND:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rcp_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			FIND_EQUAL_INDEX(be->tpe.v3.rpl_rcp, struct tpe_v3_rpl_v4_rcp_s, index,
+				*value, be->tpe.nb_rcp_categories);
+			break;
+
+		case HW_TPE_COMPARE:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rcp_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			DO_COMPARE_INDEXS(be->tpe.v3.rpl_rcp, struct tpe_v3_rpl_v4_rcp_s, index,
+				*value);
+			break;
+
+		case HW_TPE_RPL_RCP_DYN:
+			GET_SET(be->tpe.v3.rpl_rcp[index].dyn, value);
+			break;
+
+		case HW_TPE_RPL_RCP_OFS:
+			GET_SET(be->tpe.v3.rpl_rcp[index].ofs, value);
+			break;
+
+		case HW_TPE_RPL_RCP_LEN:
+			GET_SET(be->tpe.v3.rpl_rcp[index].len, value);
+			break;
+
+		case HW_TPE_RPL_RCP_RPL_PTR:
+			GET_SET(be->tpe.v3.rpl_rcp[index].rpl_ptr, value);
+			break;
+
+		case HW_TPE_RPL_RCP_EXT_PRIO:
+			GET_SET(be->tpe.v3.rpl_rcp[index].ext_prio, value);
+			break;
+
+		case HW_TPE_RPL_RCP_ETH_TYPE_WR:
+			GET_SET(be->tpe.v3.rpl_rcp[index].eth_type_wr, value);
+			break;
+
+		default:
+			UNSUP_FIELD_LOG;
+			return UNSUP_FIELD;
+		}
+
+		break;
+
+	default:
+		UNSUP_VER_LOG;
+		return UNSUP_VER;
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value)
+{
+	return hw_mod_tpe_rpl_rcp_mod(be, field, index, &value, 0);
+}
+
 /*
  * RPL_EXT
  */
@@ -237,6 +493,86 @@ int hw_mod_tpe_rpl_ext_flush(struct flow_api_backend_s *be, int start_idx, int c
 	return be->iface->tpe_rpl_ext_flush(be->be_dev, &be->tpe, start_idx, count);
 }
 
+static int hw_mod_tpe_rpl_ext_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+	uint32_t index, uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_rpl_ext_categories) {
+		INDEX_TOO_LARGE_LOG;
+		return INDEX_TOO_LARGE;
+	}
+
+	switch (_VER_) {
+	case 3:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			memset(&be->tpe.v3.rpl_ext[index], (uint8_t)*value,
+				sizeof(struct tpe_v1_rpl_v2_ext_s));
+			break;
+
+		case HW_TPE_FIND:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rpl_ext_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			FIND_EQUAL_INDEX(be->tpe.v3.rpl_ext, struct tpe_v1_rpl_v2_ext_s, index,
+				*value, be->tpe.nb_rpl_ext_categories);
+			break;
+
+		case HW_TPE_COMPARE:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rpl_ext_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			DO_COMPARE_INDEXS(be->tpe.v3.rpl_ext, struct tpe_v1_rpl_v2_ext_s, index,
+				*value);
+			break;
+
+		case HW_TPE_RPL_EXT_RPL_PTR:
+			GET_SET(be->tpe.v3.rpl_ext[index].rpl_ptr, value);
+			break;
+
+		case HW_TPE_RPL_EXT_META_RPL_LEN:
+			GET_SET(be->tpe.v3.rpl_ext[index].meta_rpl_len, value);
+			break;
+
+		default:
+			UNSUP_FIELD_LOG;
+			return UNSUP_FIELD;
+		}
+
+		break;
+
+	default:
+		UNSUP_VER_LOG;
+		return UNSUP_VER;
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_ext_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value)
+{
+	return hw_mod_tpe_rpl_ext_mod(be, field, index, &value, 0);
+}
+
 /*
  * RPL_RPL
  */
@@ -254,6 +590,89 @@ int hw_mod_tpe_rpl_rpl_flush(struct flow_api_backend_s *be, int start_idx, int c
 	return be->iface->tpe_rpl_rpl_flush(be->be_dev, &be->tpe, start_idx, count);
 }
 
+static int hw_mod_tpe_rpl_rpl_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+	uint32_t index, uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_rpl_depth) {
+		INDEX_TOO_LARGE_LOG;
+		return INDEX_TOO_LARGE;
+	}
+
+	switch (_VER_) {
+	case 3:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			memset(&be->tpe.v3.rpl_rpl[index], (uint8_t)*value,
+				sizeof(struct tpe_v1_rpl_v2_rpl_s));
+			break;
+
+		case HW_TPE_FIND:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rpl_depth) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			FIND_EQUAL_INDEX(be->tpe.v3.rpl_rpl, struct tpe_v1_rpl_v2_rpl_s, index,
+				*value, be->tpe.nb_rpl_depth);
+			break;
+
+		case HW_TPE_COMPARE:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rpl_depth) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			DO_COMPARE_INDEXS(be->tpe.v3.rpl_rpl, struct tpe_v1_rpl_v2_rpl_s, index,
+				*value);
+			break;
+
+		case HW_TPE_RPL_RPL_VALUE:
+			if (get)
+				memcpy(value, be->tpe.v3.rpl_rpl[index].value,
+					sizeof(uint32_t) * 4);
+
+			else
+				memcpy(be->tpe.v3.rpl_rpl[index].value, value,
+					sizeof(uint32_t) * 4);
+
+			break;
+
+		default:
+			UNSUP_FIELD_LOG;
+			return UNSUP_FIELD;
+		}
+
+		break;
+
+	default:
+		UNSUP_VER_LOG;
+		return UNSUP_VER;
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_rpl_rpl_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t *value)
+{
+	return hw_mod_tpe_rpl_rpl_mod(be, field, index, value, 0);
+}
+
 /*
  * CPY_RCP
  */
@@ -273,6 +692,96 @@ int hw_mod_tpe_cpy_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
 	return be->iface->tpe_cpy_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
 }
 
+static int hw_mod_tpe_cpy_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+	uint32_t index, uint32_t *value, int get)
+{
+	const uint32_t cpy_size = be->tpe.nb_cpy_writers * be->tpe.nb_rcp_categories;
+
+	if (index >= cpy_size) {
+		INDEX_TOO_LARGE_LOG;
+		return INDEX_TOO_LARGE;
+	}
+
+	switch (_VER_) {
+	case 3:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			memset(&be->tpe.v3.cpy_rcp[index], (uint8_t)*value,
+				sizeof(struct tpe_v1_cpy_v1_rcp_s));
+			break;
+
+		case HW_TPE_FIND:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= cpy_size) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			FIND_EQUAL_INDEX(be->tpe.v3.cpy_rcp, struct tpe_v1_cpy_v1_rcp_s, index,
+				*value, cpy_size);
+			break;
+
+		case HW_TPE_COMPARE:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= cpy_size) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			DO_COMPARE_INDEXS(be->tpe.v3.cpy_rcp, struct tpe_v1_cpy_v1_rcp_s, index,
+				*value);
+			break;
+
+		case HW_TPE_CPY_RCP_READER_SELECT:
+			GET_SET(be->tpe.v3.cpy_rcp[index].reader_select, value);
+			break;
+
+		case HW_TPE_CPY_RCP_DYN:
+			GET_SET(be->tpe.v3.cpy_rcp[index].dyn, value);
+			break;
+
+		case HW_TPE_CPY_RCP_OFS:
+			GET_SET(be->tpe.v3.cpy_rcp[index].ofs, value);
+			break;
+
+		case HW_TPE_CPY_RCP_LEN:
+			GET_SET(be->tpe.v3.cpy_rcp[index].len, value);
+			break;
+
+		default:
+			UNSUP_FIELD_LOG;
+			return UNSUP_FIELD;
+		}
+
+		break;
+
+	default:
+		UNSUP_VER_LOG;
+		return UNSUP_VER;
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_cpy_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value)
+{
+	return hw_mod_tpe_cpy_rcp_mod(be, field, index, &value, 0);
+}
+
 /*
  * HFU_RCP
  */
@@ -290,6 +799,166 @@ int hw_mod_tpe_hfu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
 	return be->iface->tpe_hfu_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
 }
 
+static int hw_mod_tpe_hfu_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+	uint32_t index, uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_rcp_categories) {
+		INDEX_TOO_LARGE_LOG;
+		return INDEX_TOO_LARGE;
+	}
+
+	switch (_VER_) {
+	case 3:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			memset(&be->tpe.v3.hfu_rcp[index], (uint8_t)*value,
+				sizeof(struct tpe_v1_hfu_v1_rcp_s));
+			break;
+
+		case HW_TPE_FIND:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rcp_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			FIND_EQUAL_INDEX(be->tpe.v3.hfu_rcp, struct tpe_v1_hfu_v1_rcp_s, index,
+				*value, be->tpe.nb_rcp_categories);
+			break;
+
+		case HW_TPE_COMPARE:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rcp_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			DO_COMPARE_INDEXS(be->tpe.v3.hfu_rcp, struct tpe_v1_hfu_v1_rcp_s, index,
+				*value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_A_WR:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_a_wr, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_a_outer_l4_len, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_A_POS_DYN:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_a_pos_dyn, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_A_POS_OFS:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_a_pos_ofs, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_A_ADD_DYN:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_a_add_dyn, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_A_ADD_OFS:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_a_add_ofs, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_A_SUB_DYN:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_a_sub_dyn, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_WR:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_b_wr, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_POS_DYN:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_b_pos_dyn, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_POS_OFS:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_b_pos_ofs, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_ADD_DYN:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_b_add_dyn, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_ADD_OFS:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_b_add_ofs, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_B_SUB_DYN:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_b_sub_dyn, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_WR:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_c_wr, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_POS_DYN:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_c_pos_dyn, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_POS_OFS:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_c_pos_ofs, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_ADD_DYN:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_c_add_dyn, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_ADD_OFS:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_c_add_ofs, value);
+			break;
+
+		case HW_TPE_HFU_RCP_LEN_C_SUB_DYN:
+			GET_SET(be->tpe.v3.hfu_rcp[index].len_c_sub_dyn, value);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_WR:
+			GET_SET(be->tpe.v3.hfu_rcp[index].ttl_wr, value);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_POS_DYN:
+			GET_SET(be->tpe.v3.hfu_rcp[index].ttl_pos_dyn, value);
+			break;
+
+		case HW_TPE_HFU_RCP_TTL_POS_OFS:
+			GET_SET(be->tpe.v3.hfu_rcp[index].ttl_pos_ofs, value);
+			break;
+
+		default:
+			UNSUP_FIELD_LOG;
+			return UNSUP_FIELD;
+		}
+
+		break;
+
+	default:
+		UNSUP_VER_LOG;
+		return UNSUP_VER;
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_hfu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value)
+{
+	return hw_mod_tpe_hfu_rcp_mod(be, field, index, &value, 0);
+}
+
 /*
  * CSU_RCP
  */
@@ -306,3 +975,91 @@ int hw_mod_tpe_csu_rcp_flush(struct flow_api_backend_s *be, int start_idx, int c
 
 	return be->iface->tpe_csu_rcp_flush(be->be_dev, &be->tpe, start_idx, count);
 }
+
+static int hw_mod_tpe_csu_rcp_mod(struct flow_api_backend_s *be, enum hw_tpe_e field,
+	uint32_t index, uint32_t *value, int get)
+{
+	if (index >= be->tpe.nb_rcp_categories) {
+		INDEX_TOO_LARGE_LOG;
+		return INDEX_TOO_LARGE;
+	}
+
+	switch (_VER_) {
+	case 3:
+		switch (field) {
+		case HW_TPE_PRESET_ALL:
+			if (get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			memset(&be->tpe.v3.csu_rcp[index], (uint8_t)*value,
+				sizeof(struct tpe_v1_csu_v0_rcp_s));
+			break;
+
+		case HW_TPE_FIND:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rcp_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			FIND_EQUAL_INDEX(be->tpe.v3.csu_rcp, struct tpe_v1_csu_v0_rcp_s, index,
+				*value, be->tpe.nb_rcp_categories);
+			break;
+
+		case HW_TPE_COMPARE:
+			if (!get) {
+				UNSUP_FIELD_LOG;
+				return UNSUP_FIELD;
+			}
+
+			if (*value >= be->tpe.nb_rcp_categories) {
+				INDEX_TOO_LARGE_LOG;
+				return INDEX_TOO_LARGE;
+			}
+
+			DO_COMPARE_INDEXS(be->tpe.v3.csu_rcp, struct tpe_v1_csu_v0_rcp_s, index,
+				*value);
+			break;
+
+		case HW_TPE_CSU_RCP_OUTER_L3_CMD:
+			GET_SET(be->tpe.v3.csu_rcp[index].ol3_cmd, value);
+			break;
+
+		case HW_TPE_CSU_RCP_OUTER_L4_CMD:
+			GET_SET(be->tpe.v3.csu_rcp[index].ol4_cmd, value);
+			break;
+
+		case HW_TPE_CSU_RCP_INNER_L3_CMD:
+			GET_SET(be->tpe.v3.csu_rcp[index].il3_cmd, value);
+			break;
+
+		case HW_TPE_CSU_RCP_INNER_L4_CMD:
+			GET_SET(be->tpe.v3.csu_rcp[index].il4_cmd, value);
+			break;
+
+		default:
+			UNSUP_FIELD_LOG;
+			return UNSUP_FIELD;
+		}
+
+		break;
+
+	default:
+		UNSUP_VER_LOG;
+		return UNSUP_VER;
+	}
+
+	return 0;
+}
+
+int hw_mod_tpe_csu_rcp_set(struct flow_api_backend_s *be, enum hw_tpe_e field, int index,
+	uint32_t value)
+{
+	return hw_mod_tpe_csu_rcp_mod(be, field, index, &value, 0);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.c
index 8b62ce11dd..ea7cc82d54 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.c
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.c
@@ -29,6 +29,17 @@ struct hw_db_inline_resource_db {
 		int ref;
 	} *slc_lr;
 
+	struct hw_db_inline_resource_db_tpe {
+		struct hw_db_inline_tpe_data data;
+		int ref;
+	} *tpe;
+
+	struct hw_db_inline_resource_db_tpe_ext {
+		struct hw_db_inline_tpe_ext_data data;
+		int replace_ram_idx;
+		int ref;
+	} *tpe_ext;
+
 	struct hw_db_inline_resource_db_hsh {
 		struct hw_db_inline_hsh_data data;
 		int ref;
@@ -37,6 +48,8 @@ struct hw_db_inline_resource_db {
 	uint32_t nb_cot;
 	uint32_t nb_qsl;
 	uint32_t nb_slc_lr;
+	uint32_t nb_tpe;
+	uint32_t nb_tpe_ext;
 	uint32_t nb_hsh;
 
 	/* Items */
@@ -100,6 +113,22 @@ int hw_db_inline_create(struct flow_nic_dev *ndev, void **db_handle)
 		return -1;
 	}
 
+	db->nb_tpe = ndev->be.tpe.nb_rcp_categories;
+	db->tpe = calloc(db->nb_tpe, sizeof(struct hw_db_inline_resource_db_tpe));
+
+	if (db->tpe == NULL) {
+		hw_db_inline_destroy(db);
+		return -1;
+	}
+
+	db->nb_tpe_ext = ndev->be.tpe.nb_rpl_ext_categories;
+	db->tpe_ext = calloc(db->nb_tpe_ext, sizeof(struct hw_db_inline_resource_db_tpe_ext));
+
+	if (db->tpe_ext == NULL) {
+		hw_db_inline_destroy(db);
+		return -1;
+	}
+
 	db->nb_cat = ndev->be.cat.nb_cat_funcs;
 	db->cat = calloc(db->nb_cat, sizeof(struct hw_db_inline_resource_db_cat));
 
@@ -153,6 +182,8 @@ void hw_db_inline_destroy(void *db_handle)
 	free(db->cot);
 	free(db->qsl);
 	free(db->slc_lr);
+	free(db->tpe);
+	free(db->tpe_ext);
 	free(db->hsh);
 
 	free(db->cat);
@@ -194,6 +225,15 @@ void hw_db_inline_deref_idxs(struct flow_nic_dev *ndev, void *db_handle, struct
 				*(struct hw_db_slc_lr_idx *)&idxs[i]);
 			break;
 
+		case HW_DB_IDX_TYPE_TPE:
+			hw_db_inline_tpe_deref(ndev, db_handle, *(struct hw_db_tpe_idx *)&idxs[i]);
+			break;
+
+		case HW_DB_IDX_TYPE_TPE_EXT:
+			hw_db_inline_tpe_ext_deref(ndev, db_handle,
+				*(struct hw_db_tpe_ext_idx *)&idxs[i]);
+			break;
+
 		case HW_DB_IDX_TYPE_KM_RCP:
 			hw_db_inline_km_deref(ndev, db_handle, *(struct hw_db_km_idx *)&idxs[i]);
 			break;
@@ -239,6 +279,12 @@ const void *hw_db_inline_find_data(struct flow_nic_dev *ndev, void *db_handle,
 		case HW_DB_IDX_TYPE_SLC_LR:
 			return &db->slc_lr[idxs[i].ids].data;
 
+		case HW_DB_IDX_TYPE_TPE:
+			return &db->tpe[idxs[i].ids].data;
+
+		case HW_DB_IDX_TYPE_TPE_EXT:
+			return &db->tpe_ext[idxs[i].ids].data;
+
 		case HW_DB_IDX_TYPE_KM_RCP:
 			return &db->km[idxs[i].id1].data;
 
@@ -651,6 +697,333 @@ void hw_db_inline_slc_lr_deref(struct flow_nic_dev *ndev, void *db_handle,
 	}
 }
 
+/******************************************************************************/
+/* TPE                                                                        */
+/******************************************************************************/
+
+static int hw_db_inline_tpe_compare(const struct hw_db_inline_tpe_data *data1,
+	const struct hw_db_inline_tpe_data *data2)
+{
+	for (int i = 0; i < 6; ++i)
+		if (data1->writer[i].en != data2->writer[i].en ||
+			data1->writer[i].reader_select != data2->writer[i].reader_select ||
+			data1->writer[i].dyn != data2->writer[i].dyn ||
+			data1->writer[i].ofs != data2->writer[i].ofs ||
+			data1->writer[i].len != data2->writer[i].len)
+			return 0;
+
+	return data1->insert_len == data2->insert_len && data1->new_outer == data2->new_outer &&
+		data1->calc_eth_type_from_inner_ip == data2->calc_eth_type_from_inner_ip &&
+		data1->ttl_en == data2->ttl_en && data1->ttl_dyn == data2->ttl_dyn &&
+		data1->ttl_ofs == data2->ttl_ofs && data1->len_a_en == data2->len_a_en &&
+		data1->len_a_pos_dyn == data2->len_a_pos_dyn &&
+		data1->len_a_pos_ofs == data2->len_a_pos_ofs &&
+		data1->len_a_add_dyn == data2->len_a_add_dyn &&
+		data1->len_a_add_ofs == data2->len_a_add_ofs &&
+		data1->len_a_sub_dyn == data2->len_a_sub_dyn &&
+		data1->len_b_en == data2->len_b_en &&
+		data1->len_b_pos_dyn == data2->len_b_pos_dyn &&
+		data1->len_b_pos_ofs == data2->len_b_pos_ofs &&
+		data1->len_b_add_dyn == data2->len_b_add_dyn &&
+		data1->len_b_add_ofs == data2->len_b_add_ofs &&
+		data1->len_b_sub_dyn == data2->len_b_sub_dyn &&
+		data1->len_c_en == data2->len_c_en &&
+		data1->len_c_pos_dyn == data2->len_c_pos_dyn &&
+		data1->len_c_pos_ofs == data2->len_c_pos_ofs &&
+		data1->len_c_add_dyn == data2->len_c_add_dyn &&
+		data1->len_c_add_ofs == data2->len_c_add_ofs &&
+		data1->len_c_sub_dyn == data2->len_c_sub_dyn;
+}
+
+struct hw_db_tpe_idx hw_db_inline_tpe_add(struct flow_nic_dev *ndev, void *db_handle,
+	const struct hw_db_inline_tpe_data *data)
+{
+	struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+	struct hw_db_tpe_idx idx = { .raw = 0 };
+	int found = 0;
+
+	idx.type = HW_DB_IDX_TYPE_TPE;
+
+	for (uint32_t i = 1; i < db->nb_tpe; ++i) {
+		int ref = db->tpe[i].ref;
+
+		if (ref > 0 && hw_db_inline_tpe_compare(data, &db->tpe[i].data)) {
+			idx.ids = i;
+			hw_db_inline_tpe_ref(ndev, db, idx);
+			return idx;
+		}
+
+		if (!found && ref <= 0) {
+			found = 1;
+			idx.ids = i;
+		}
+	}
+
+	if (!found) {
+		idx.error = 1;
+		return idx;
+	}
+
+	db->tpe[idx.ids].ref = 1;
+	memcpy(&db->tpe[idx.ids].data, data, sizeof(struct hw_db_inline_tpe_data));
+
+	if (data->insert_len > 0) {
+		hw_mod_tpe_rpp_rcp_set(&ndev->be, HW_TPE_RPP_RCP_EXP, idx.ids, data->insert_len);
+		hw_mod_tpe_rpp_rcp_flush(&ndev->be, idx.ids, 1);
+
+		hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_INS_RCP_DYN, idx.ids, 1);
+		hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_INS_RCP_OFS, idx.ids, 0);
+		hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_INS_RCP_LEN, idx.ids, data->insert_len);
+		hw_mod_tpe_ins_rcp_flush(&ndev->be, idx.ids, 1);
+
+		hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_DYN, idx.ids, 1);
+		hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_OFS, idx.ids, 0);
+		hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_LEN, idx.ids, data->insert_len);
+		hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_RPL_PTR, idx.ids, 0);
+		hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_EXT_PRIO, idx.ids, 1);
+		hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_RPL_RCP_ETH_TYPE_WR, idx.ids,
+			data->calc_eth_type_from_inner_ip);
+		hw_mod_tpe_rpl_rcp_flush(&ndev->be, idx.ids, 1);
+	}
+
+	for (uint32_t i = 0; i < 6; ++i) {
+		if (data->writer[i].en) {
+			hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_READER_SELECT,
+				idx.ids + db->nb_tpe * i,
+				data->writer[i].reader_select);
+			hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_DYN,
+				idx.ids + db->nb_tpe * i, data->writer[i].dyn);
+			hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_OFS,
+				idx.ids + db->nb_tpe * i, data->writer[i].ofs);
+			hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_LEN,
+				idx.ids + db->nb_tpe * i, data->writer[i].len);
+
+		} else {
+			hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_READER_SELECT,
+				idx.ids + db->nb_tpe * i, 0);
+			hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_DYN,
+				idx.ids + db->nb_tpe * i, 0);
+			hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_OFS,
+				idx.ids + db->nb_tpe * i, 0);
+			hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_CPY_RCP_LEN,
+				idx.ids + db->nb_tpe * i, 0);
+		}
+
+		hw_mod_tpe_cpy_rcp_flush(&ndev->be, idx.ids + db->nb_tpe * i, 1);
+	}
+
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_WR, idx.ids, data->len_a_en);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_OUTER_L4_LEN, idx.ids,
+		data->new_outer);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_POS_DYN, idx.ids,
+		data->len_a_pos_dyn);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_POS_OFS, idx.ids,
+		data->len_a_pos_ofs);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_ADD_DYN, idx.ids,
+		data->len_a_add_dyn);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_ADD_OFS, idx.ids,
+		data->len_a_add_ofs);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_A_SUB_DYN, idx.ids,
+		data->len_a_sub_dyn);
+
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_WR, idx.ids, data->len_b_en);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_POS_DYN, idx.ids,
+		data->len_b_pos_dyn);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_POS_OFS, idx.ids,
+		data->len_b_pos_ofs);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_ADD_DYN, idx.ids,
+		data->len_b_add_dyn);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_ADD_OFS, idx.ids,
+		data->len_b_add_ofs);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_B_SUB_DYN, idx.ids,
+		data->len_b_sub_dyn);
+
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_WR, idx.ids, data->len_c_en);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_POS_DYN, idx.ids,
+		data->len_c_pos_dyn);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_POS_OFS, idx.ids,
+		data->len_c_pos_ofs);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_ADD_DYN, idx.ids,
+		data->len_c_add_dyn);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_ADD_OFS, idx.ids,
+		data->len_c_add_ofs);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_LEN_C_SUB_DYN, idx.ids,
+		data->len_c_sub_dyn);
+
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_TTL_WR, idx.ids, data->ttl_en);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_TTL_POS_DYN, idx.ids, data->ttl_dyn);
+	hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_HFU_RCP_TTL_POS_OFS, idx.ids, data->ttl_ofs);
+	hw_mod_tpe_hfu_rcp_flush(&ndev->be, idx.ids, 1);
+
+	hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_OUTER_L3_CMD, idx.ids, 3);
+	hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_OUTER_L4_CMD, idx.ids, 3);
+	hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_INNER_L3_CMD, idx.ids, 3);
+	hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_CSU_RCP_INNER_L4_CMD, idx.ids, 3);
+	hw_mod_tpe_csu_rcp_flush(&ndev->be, idx.ids, 1);
+
+	return idx;
+}
+
+void hw_db_inline_tpe_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx)
+{
+	(void)ndev;
+	struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+	if (!idx.error)
+		db->tpe[idx.ids].ref += 1;
+}
+
+void hw_db_inline_tpe_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx)
+{
+	struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+	if (idx.error)
+		return;
+
+	db->tpe[idx.ids].ref -= 1;
+
+	if (db->tpe[idx.ids].ref <= 0) {
+		for (uint32_t i = 0; i < 6; ++i) {
+			hw_mod_tpe_cpy_rcp_set(&ndev->be, HW_TPE_PRESET_ALL,
+				idx.ids + db->nb_tpe * i, 0);
+			hw_mod_tpe_cpy_rcp_flush(&ndev->be, idx.ids + db->nb_tpe * i, 1);
+		}
+
+		hw_mod_tpe_rpp_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+		hw_mod_tpe_rpp_rcp_flush(&ndev->be, idx.ids, 1);
+
+		hw_mod_tpe_ins_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+		hw_mod_tpe_ins_rcp_flush(&ndev->be, idx.ids, 1);
+
+		hw_mod_tpe_rpl_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+		hw_mod_tpe_rpl_rcp_flush(&ndev->be, idx.ids, 1);
+
+		hw_mod_tpe_hfu_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+		hw_mod_tpe_hfu_rcp_flush(&ndev->be, idx.ids, 1);
+
+		hw_mod_tpe_csu_rcp_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+		hw_mod_tpe_csu_rcp_flush(&ndev->be, idx.ids, 1);
+
+		memset(&db->tpe[idx.ids].data, 0x0, sizeof(struct hw_db_inline_tpe_data));
+		db->tpe[idx.ids].ref = 0;
+	}
+}
+
+/******************************************************************************/
+/* TPE_EXT                                                                    */
+/******************************************************************************/
+
+static int hw_db_inline_tpe_ext_compare(const struct hw_db_inline_tpe_ext_data *data1,
+	const struct hw_db_inline_tpe_ext_data *data2)
+{
+	return data1->size == data2->size &&
+		memcmp(data1->hdr8, data2->hdr8, HW_DB_INLINE_MAX_ENCAP_SIZE) == 0;
+}
+
+struct hw_db_tpe_ext_idx hw_db_inline_tpe_ext_add(struct flow_nic_dev *ndev, void *db_handle,
+	const struct hw_db_inline_tpe_ext_data *data)
+{
+	struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+	struct hw_db_tpe_ext_idx idx = { .raw = 0 };
+	int rpl_rpl_length = ((int)data->size + 15) / 16;
+	int found = 0, rpl_rpl_index = 0;
+
+	idx.type = HW_DB_IDX_TYPE_TPE_EXT;
+
+	if (data->size > HW_DB_INLINE_MAX_ENCAP_SIZE) {
+		idx.error = 1;
+		return idx;
+	}
+
+	for (uint32_t i = 1; i < db->nb_tpe_ext; ++i) {
+		int ref = db->tpe_ext[i].ref;
+
+		if (ref > 0 && hw_db_inline_tpe_ext_compare(data, &db->tpe_ext[i].data)) {
+			idx.ids = i;
+			hw_db_inline_tpe_ext_ref(ndev, db, idx);
+			return idx;
+		}
+
+		if (!found && ref <= 0) {
+			found = 1;
+			idx.ids = i;
+		}
+	}
+
+	if (!found) {
+		idx.error = 1;
+		return idx;
+	}
+
+	rpl_rpl_index = flow_nic_alloc_resource_config(ndev, RES_TPE_RPL, rpl_rpl_length, 1);
+
+	if (rpl_rpl_index < 0) {
+		idx.error = 1;
+		return idx;
+	}
+
+	db->tpe_ext[idx.ids].ref = 1;
+	db->tpe_ext[idx.ids].replace_ram_idx = rpl_rpl_index;
+	memcpy(&db->tpe_ext[idx.ids].data, data, sizeof(struct hw_db_inline_tpe_ext_data));
+
+	hw_mod_tpe_rpl_ext_set(&ndev->be, HW_TPE_RPL_EXT_RPL_PTR, idx.ids, rpl_rpl_index);
+	hw_mod_tpe_rpl_ext_set(&ndev->be, HW_TPE_RPL_EXT_META_RPL_LEN, idx.ids, data->size);
+	hw_mod_tpe_rpl_ext_flush(&ndev->be, idx.ids, 1);
+
+	for (int i = 0; i < rpl_rpl_length; ++i) {
+		uint32_t rpl_data[4];
+		memcpy(rpl_data, data->hdr32 + i * 4, sizeof(rpl_data));
+		hw_mod_tpe_rpl_rpl_set(&ndev->be, HW_TPE_RPL_RPL_VALUE, rpl_rpl_index + i,
+			rpl_data);
+	}
+
+	hw_mod_tpe_rpl_rpl_flush(&ndev->be, rpl_rpl_index, rpl_rpl_length);
+
+	return idx;
+}
+
+void hw_db_inline_tpe_ext_ref(struct flow_nic_dev *ndev, void *db_handle,
+	struct hw_db_tpe_ext_idx idx)
+{
+	(void)ndev;
+	struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+	if (!idx.error)
+		db->tpe_ext[idx.ids].ref += 1;
+}
+
+void hw_db_inline_tpe_ext_deref(struct flow_nic_dev *ndev, void *db_handle,
+	struct hw_db_tpe_ext_idx idx)
+{
+	struct hw_db_inline_resource_db *db = (struct hw_db_inline_resource_db *)db_handle;
+
+	if (idx.error)
+		return;
+
+	db->tpe_ext[idx.ids].ref -= 1;
+
+	if (db->tpe_ext[idx.ids].ref <= 0) {
+		const int rpl_rpl_length = ((int)db->tpe_ext[idx.ids].data.size + 15) / 16;
+		const int rpl_rpl_index = db->tpe_ext[idx.ids].replace_ram_idx;
+
+		hw_mod_tpe_rpl_ext_set(&ndev->be, HW_TPE_PRESET_ALL, idx.ids, 0);
+		hw_mod_tpe_rpl_ext_flush(&ndev->be, idx.ids, 1);
+
+		for (int i = 0; i < rpl_rpl_length; ++i) {
+			uint32_t rpl_zero[] = { 0, 0, 0, 0 };
+			hw_mod_tpe_rpl_rpl_set(&ndev->be, HW_TPE_RPL_RPL_VALUE, rpl_rpl_index + i,
+				rpl_zero);
+			flow_nic_free_resource(ndev, RES_TPE_RPL, rpl_rpl_index + i);
+		}
+
+		hw_mod_tpe_rpl_rpl_flush(&ndev->be, rpl_rpl_index, rpl_rpl_length);
+
+		memset(&db->tpe_ext[idx.ids].data, 0x0, sizeof(struct hw_db_inline_tpe_ext_data));
+		db->tpe_ext[idx.ids].ref = 0;
+	}
+}
+
+
 /******************************************************************************/
 /* CAT                                                                        */
 /******************************************************************************/
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.h
index c97bdef1b7..18d959307e 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.h
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_hw_db_inline.h
@@ -52,6 +52,60 @@ struct hw_db_slc_lr_idx {
 	HW_DB_IDX;
 };
 
+struct hw_db_inline_tpe_data {
+	uint32_t insert_len : 16;
+	uint32_t new_outer : 1;
+	uint32_t calc_eth_type_from_inner_ip : 1;
+	uint32_t ttl_en : 1;
+	uint32_t ttl_dyn : 5;
+	uint32_t ttl_ofs : 8;
+
+	struct {
+		uint32_t en : 1;
+		uint32_t reader_select : 3;
+		uint32_t dyn : 5;
+		uint32_t ofs : 14;
+		uint32_t len : 5;
+		uint32_t padding : 4;
+	} writer[6];
+
+	uint32_t len_a_en : 1;
+	uint32_t len_a_pos_dyn : 5;
+	uint32_t len_a_pos_ofs : 8;
+	uint32_t len_a_add_dyn : 5;
+	uint32_t len_a_add_ofs : 8;
+	uint32_t len_a_sub_dyn : 5;
+
+	uint32_t len_b_en : 1;
+	uint32_t len_b_pos_dyn : 5;
+	uint32_t len_b_pos_ofs : 8;
+	uint32_t len_b_add_dyn : 5;
+	uint32_t len_b_add_ofs : 8;
+	uint32_t len_b_sub_dyn : 5;
+
+	uint32_t len_c_en : 1;
+	uint32_t len_c_pos_dyn : 5;
+	uint32_t len_c_pos_ofs : 8;
+	uint32_t len_c_add_dyn : 5;
+	uint32_t len_c_add_ofs : 8;
+	uint32_t len_c_sub_dyn : 5;
+};
+
+struct hw_db_inline_tpe_ext_data {
+	uint32_t size;
+	union {
+		uint8_t hdr8[HW_DB_INLINE_MAX_ENCAP_SIZE];
+		uint32_t hdr32[(HW_DB_INLINE_MAX_ENCAP_SIZE + 3) / 4];
+	};
+};
+
+struct hw_db_tpe_idx {
+	HW_DB_IDX;
+};
+struct hw_db_tpe_ext_idx {
+	HW_DB_IDX;
+};
+
 struct hw_db_km_idx {
 	HW_DB_IDX;
 };
@@ -70,6 +124,9 @@ enum hw_db_idx_type {
 	HW_DB_IDX_TYPE_CAT,
 	HW_DB_IDX_TYPE_QSL,
 	HW_DB_IDX_TYPE_SLC_LR,
+	HW_DB_IDX_TYPE_TPE,
+	HW_DB_IDX_TYPE_TPE_EXT,
+
 	HW_DB_IDX_TYPE_KM_RCP,
 	HW_DB_IDX_TYPE_KM_FT,
 	HW_DB_IDX_TYPE_HSH,
@@ -138,6 +195,7 @@ struct hw_db_inline_action_set_data {
 		struct {
 			struct hw_db_cot_idx cot;
 			struct hw_db_qsl_idx qsl;
+			struct hw_db_tpe_idx tpe;
 			struct hw_db_hsh_idx hsh;
 		};
 	};
@@ -181,6 +239,18 @@ void hw_db_inline_slc_lr_ref(struct flow_nic_dev *ndev, void *db_handle,
 void hw_db_inline_slc_lr_deref(struct flow_nic_dev *ndev, void *db_handle,
 	struct hw_db_slc_lr_idx idx);
 
+struct hw_db_tpe_idx hw_db_inline_tpe_add(struct flow_nic_dev *ndev, void *db_handle,
+	const struct hw_db_inline_tpe_data *data);
+void hw_db_inline_tpe_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx);
+void hw_db_inline_tpe_deref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_tpe_idx idx);
+
+struct hw_db_tpe_ext_idx hw_db_inline_tpe_ext_add(struct flow_nic_dev *ndev, void *db_handle,
+	const struct hw_db_inline_tpe_ext_data *data);
+void hw_db_inline_tpe_ext_ref(struct flow_nic_dev *ndev, void *db_handle,
+	struct hw_db_tpe_ext_idx idx);
+void hw_db_inline_tpe_ext_deref(struct flow_nic_dev *ndev, void *db_handle,
+	struct hw_db_tpe_ext_idx idx);
+
 struct hw_db_hsh_idx hw_db_inline_hsh_add(struct flow_nic_dev *ndev, void *db_handle,
 	const struct hw_db_inline_hsh_data *data);
 void hw_db_inline_hsh_ref(struct flow_nic_dev *ndev, void *db_handle, struct hw_db_hsh_idx idx);
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
index 2d795e2c7f..2fce706ce1 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
@@ -17,6 +17,8 @@
 #include "flow_api_profile_inline.h"
 #include "ntnic_mod_reg.h"
 
+#define NT_FLM_MISS_FLOW_TYPE 0
+#define NT_FLM_UNHANDLED_FLOW_TYPE 1
 #define NT_FLM_OP_UNLEARN 0
 #define NT_FLM_OP_LEARN 1
 
@@ -2426,6 +2428,92 @@ static int setup_flow_flm_actions(struct flow_eth_dev *dev,
 		}
 	}
 
+	/* Setup TPE EXT */
+	if (fd->tun_hdr.len > 0) {
+		assert(fd->tun_hdr.len <= HW_DB_INLINE_MAX_ENCAP_SIZE);
+
+		struct hw_db_inline_tpe_ext_data tpe_ext_data = {
+			.size = fd->tun_hdr.len,
+		};
+
+		memset(tpe_ext_data.hdr8, 0x0, HW_DB_INLINE_MAX_ENCAP_SIZE);
+		memcpy(tpe_ext_data.hdr8, fd->tun_hdr.d.hdr8, (fd->tun_hdr.len + 15) & ~15);
+
+		struct hw_db_tpe_ext_idx tpe_ext_idx =
+			hw_db_inline_tpe_ext_add(dev->ndev, dev->ndev->hw_db_handle,
+			&tpe_ext_data);
+		local_idxs[(*local_idx_counter)++] = tpe_ext_idx.raw;
+
+		if (tpe_ext_idx.error) {
+			NT_LOG(ERR, FILTER, "Could not reference TPE EXT resource");
+			flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+			return -1;
+		}
+
+		if (flm_rpl_ext_ptr)
+			*flm_rpl_ext_ptr = tpe_ext_idx.ids;
+	}
+
+	/* Setup TPE */
+	assert(fd->modify_field_count <= 6);
+
+	struct hw_db_inline_tpe_data tpe_data = {
+		.insert_len = fd->tun_hdr.len,
+		.new_outer = fd->tun_hdr.new_outer,
+		.calc_eth_type_from_inner_ip =
+			!fd->tun_hdr.new_outer && fd->header_strip_end_dyn == DYN_TUN_L3,
+		.ttl_en = fd->ttl_sub_enable,
+		.ttl_dyn = fd->ttl_sub_outer ? DYN_L3 : DYN_TUN_L3,
+		.ttl_ofs = fd->ttl_sub_ipv4 ? 8 : 7,
+	};
+
+	for (unsigned int i = 0; i < fd->modify_field_count; ++i) {
+		tpe_data.writer[i].en = 1;
+		tpe_data.writer[i].reader_select = fd->modify_field[i].select;
+		tpe_data.writer[i].dyn = fd->modify_field[i].dyn;
+		tpe_data.writer[i].ofs = fd->modify_field[i].ofs;
+		tpe_data.writer[i].len = fd->modify_field[i].len;
+	}
+
+	if (fd->tun_hdr.new_outer) {
+		const int fcs_length = 4;
+
+		/* L4 length */
+		tpe_data.len_a_en = 1;
+		tpe_data.len_a_pos_dyn = DYN_L4;
+		tpe_data.len_a_pos_ofs = 4;
+		tpe_data.len_a_add_dyn = 18;
+		tpe_data.len_a_add_ofs = (uint32_t)(-fcs_length) & 0xff;
+		tpe_data.len_a_sub_dyn = DYN_L4;
+
+		/* L3 length */
+		tpe_data.len_b_en = 1;
+		tpe_data.len_b_pos_dyn = DYN_L3;
+		tpe_data.len_b_pos_ofs = fd->tun_hdr.ip_version == 4 ? 2 : 4;
+		tpe_data.len_b_add_dyn = 18;
+		tpe_data.len_b_add_ofs = (uint32_t)(-fcs_length) & 0xff;
+		tpe_data.len_b_sub_dyn = DYN_L3;
+
+		/* GTP length */
+		tpe_data.len_c_en = 1;
+		tpe_data.len_c_pos_dyn = DYN_L4_PAYLOAD;
+		tpe_data.len_c_pos_ofs = 2;
+		tpe_data.len_c_add_dyn = 18;
+		tpe_data.len_c_add_ofs = (uint32_t)(-8 - fcs_length) & 0xff;
+		tpe_data.len_c_sub_dyn = DYN_L4_PAYLOAD;
+	}
+
+	struct hw_db_tpe_idx tpe_idx =
+		hw_db_inline_tpe_add(dev->ndev, dev->ndev->hw_db_handle, &tpe_data);
+
+	local_idxs[(*local_idx_counter)++] = tpe_idx.raw;
+
+	if (tpe_idx.error) {
+		NT_LOG(ERR, FILTER, "Could not reference TPE resource");
+		flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+		return -1;
+	}
+
 	return 0;
 }
 
@@ -2552,6 +2640,30 @@ static struct flow_handle *create_flow_filter(struct flow_eth_dev *dev, struct n
 				flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
 				goto error_out;
 			}
+
+			/* Setup TPE */
+			if (fd->ttl_sub_enable) {
+				struct hw_db_inline_tpe_data tpe_data = {
+					.insert_len = fd->tun_hdr.len,
+					.new_outer = fd->tun_hdr.new_outer,
+					.calc_eth_type_from_inner_ip = !fd->tun_hdr.new_outer &&
+						fd->header_strip_end_dyn == DYN_TUN_L3,
+					.ttl_en = fd->ttl_sub_enable,
+					.ttl_dyn = fd->ttl_sub_outer ? DYN_L3 : DYN_TUN_L3,
+					.ttl_ofs = fd->ttl_sub_ipv4 ? 8 : 7,
+				};
+				struct hw_db_tpe_idx tpe_idx =
+					hw_db_inline_tpe_add(dev->ndev, dev->ndev->hw_db_handle,
+					&tpe_data);
+				fh->db_idxs[fh->db_idx_counter++] = tpe_idx.raw;
+				action_set_data.tpe = tpe_idx;
+
+				if (tpe_idx.error) {
+					NT_LOG(ERR, FILTER, "Could not reference TPE resource");
+					flow_nic_set_error(ERR_MATCH_RESOURCE_EXHAUSTION, error);
+					goto error_out;
+				}
+			}
 		}
 
 		/* Setup CAT */
@@ -2860,6 +2972,16 @@ int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
 	if (!ndev->flow_mgnt_prepared) {
 		/* Check static arrays are big enough */
 		assert(ndev->be.tpe.nb_cpy_writers <= MAX_CPY_WRITERS_SUPPORTED);
+		/* KM Flow Type 0 is reserved */
+		flow_nic_mark_resource_used(ndev, RES_KM_FLOW_TYPE, 0);
+		flow_nic_mark_resource_used(ndev, RES_KM_CATEGORY, 0);
+
+		/* Reserved FLM Flow Types */
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, NT_FLM_MISS_FLOW_TYPE);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE, NT_FLM_UNHANDLED_FLOW_TYPE);
+		flow_nic_mark_resource_used(ndev, RES_FLM_FLOW_TYPE,
+			NT_FLM_VIOLATING_MBR_FLOW_TYPE);
+		flow_nic_mark_resource_used(ndev, RES_FLM_RCP, 0);
 
 		/* COT is locked to CFN. Don't set color for CFN 0 */
 		hw_mod_cat_cot_set(&ndev->be, HW_CAT_COT_PRESET_ALL, 0, 0);
@@ -2885,8 +3007,11 @@ int initialize_flow_management_of_ndev_profile_inline(struct flow_nic_dev *ndev)
 
 		flow_nic_mark_resource_used(ndev, RES_QSL_QST, 0);
 
-		/* SLC LR index 0 is reserved */
+		/* SLC LR & TPE index 0 were reserved */
 		flow_nic_mark_resource_used(ndev, RES_SLC_LR_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RCP, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_EXT, 0);
+		flow_nic_mark_resource_used(ndev, RES_TPE_RPL, 0);
 
 		/* PDB setup Direct Virtio Scatter-Gather descriptor of 12 bytes for its recipe 0
 		 */
-- 
2.45.0


  parent reply	other threads:[~2024-10-21 21:09 UTC|newest]

Thread overview: 229+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-21 21:04 [PATCH v1 00/73] Provide flow filter API and statistics Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 01/73] net/ntnic: add API for configuration NT flow dev Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 02/73] net/ntnic: add flow filter API Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 03/73] net/ntnic: add minimal create/destroy flow operations Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 04/73] net/ntnic: add internal flow create/destroy API Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 05/73] net/ntnic: add minimal NT flow inline profile Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 06/73] net/ntnic: add management API for NT flow profile Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 07/73] net/ntnic: add NT flow profile management implementation Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 08/73] net/ntnic: add create/destroy implementation for NT flows Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 09/73] net/ntnic: add infrastructure for for flow actions and items Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 10/73] net/ntnic: add action queue Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 11/73] net/ntnic: add action mark Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 12/73] net/ntnic: add ation jump Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 13/73] net/ntnic: add action drop Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 14/73] net/ntnic: add item eth Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 15/73] net/ntnic: add item IPv4 Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 16/73] net/ntnic: add item ICMP Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 17/73] net/ntnic: add item port ID Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 18/73] net/ntnic: add item void Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 19/73] net/ntnic: add item UDP Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 20/73] net/ntnic: add action TCP Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 21/73] net/ntnic: add action VLAN Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 22/73] net/ntnic: add item SCTP Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 23/73] net/ntnic: add items IPv6 and ICMPv6 Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 24/73] net/ntnic: add action modify filed Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 25/73] net/ntnic: add items gtp and actions raw encap/decap Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 26/73] net/ntnic: add cat module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 27/73] net/ntnic: add SLC LR module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 28/73] net/ntnic: add PDB module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 29/73] net/ntnic: add QSL module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 30/73] net/ntnic: add KM module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 31/73] net/ntnic: add hash API Serhii Iliushyk
2024-10-21 21:04 ` Serhii Iliushyk [this message]
2024-10-21 21:04 ` [PATCH v1 33/73] net/ntnic: add FLM module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 34/73] net/ntnic: add flm rcp module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 35/73] net/ntnic: add learn flow queue handling Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 36/73] net/ntnic: match and action db attributes were added Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 37/73] net/ntnic: add flow dump feature Serhii Iliushyk
2024-10-21 23:10   ` Stephen Hemminger
2024-10-21 21:04 ` [PATCH v1 38/73] net/ntnic: add flow flush Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 39/73] net/ntnic: add GMF (Generic MAC Feeder) module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 40/73] net/ntnic: sort FPGA registers alphanumerically Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 41/73] net/ntnic: add MOD CSU Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 42/73] net/ntnic: add MOD FLM Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 43/73] net/ntnic: add HFU module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 44/73] net/ntnic: add IFR module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 45/73] net/ntnic: add MAC Rx module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 46/73] net/ntnic: add MAC Tx module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 47/73] net/ntnic: add RPP LR module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 48/73] net/ntnic: add MOD SLC LR Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 49/73] net/ntnic: add Tx CPY module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 50/73] net/ntnic: add Tx INS module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 51/73] net/ntnic: add Tx RPL module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 52/73] net/ntnic: update alignment for virt queue structs Serhii Iliushyk
2024-10-21 23:12   ` Stephen Hemminger
2024-10-21 21:04 ` [PATCH v1 53/73] net/ntnic: enable RSS feature Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 54/73] net/ntnic: add statistics API Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 55/73] net/ntnic: add rpf module Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 56/73] net/ntnic: add statistics poll Serhii Iliushyk
2024-10-21 21:04 ` [PATCH v1 57/73] net/ntnic: added flm stat interface Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 58/73] net/ntnic: add tsm module Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 59/73] net/ntnic: add STA module Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 60/73] net/ntnic: add TSM module Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 61/73] net/ntnic: add xstats Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 62/73] net/ntnic: added flow statistics Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 63/73] net/ntnic: add scrub registers Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 64/73] net/ntnic: update documentation Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 65/73] net/ntnic: added flow aged APIs Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 66/73] net/ntnic: add aged API to the inline profile Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 67/73] net/ntnic: add info and configure flow API Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 68/73] net/ntnic: add aged flow event Serhii Iliushyk
2024-10-21 23:22   ` Stephen Hemminger
2024-10-21 21:05 ` [PATCH v1 69/73] net/ntnic: add thread termination Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 70/73] net/ntnic: add age documentation Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 71/73] net/ntnic: add meter API Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 72/73] net/ntnic: add meter module Serhii Iliushyk
2024-10-21 21:05 ` [PATCH v1 73/73] net/ntnic: add meter documentation Serhii Iliushyk
2024-10-22 16:54 ` [PATCH v2 00/73] Provide flow filter API and statistics Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 01/73] net/ntnic: add API for configuration NT flow dev Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 02/73] net/ntnic: add flow filter API Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 03/73] net/ntnic: add minimal create/destroy flow operations Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 04/73] net/ntnic: add internal flow create/destroy API Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 05/73] net/ntnic: add minimal NT flow inline profile Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 06/73] net/ntnic: add management API for NT flow profile Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 07/73] net/ntnic: add NT flow profile management implementation Serhii Iliushyk
2024-10-22 17:17     ` Stephen Hemminger
2024-10-22 16:54   ` [PATCH v2 08/73] net/ntnic: add create/destroy implementation for NT flows Serhii Iliushyk
2024-10-22 17:20     ` Stephen Hemminger
2024-10-23 16:09       ` Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 09/73] net/ntnic: add infrastructure for for flow actions and items Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 10/73] net/ntnic: add action queue Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 11/73] net/ntnic: add action mark Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 12/73] net/ntnic: add ation jump Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 13/73] net/ntnic: add action drop Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 14/73] net/ntnic: add item eth Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 15/73] net/ntnic: add item IPv4 Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 16/73] net/ntnic: add item ICMP Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 17/73] net/ntnic: add item port ID Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 18/73] net/ntnic: add item void Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 19/73] net/ntnic: add item UDP Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 20/73] net/ntnic: add action TCP Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 21/73] net/ntnic: add action VLAN Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 22/73] net/ntnic: add item SCTP Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 23/73] net/ntnic: add items IPv6 and ICMPv6 Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 24/73] net/ntnic: add action modify filed Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 25/73] net/ntnic: add items gtp and actions raw encap/decap Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 26/73] net/ntnic: add cat module Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 27/73] net/ntnic: add SLC LR module Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 28/73] net/ntnic: add PDB module Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 29/73] net/ntnic: add QSL module Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 30/73] net/ntnic: add KM module Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 31/73] net/ntnic: add hash API Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 32/73] net/ntnic: add TPE module Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 33/73] net/ntnic: add FLM module Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 34/73] net/ntnic: add flm rcp module Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 35/73] net/ntnic: add learn flow queue handling Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 36/73] net/ntnic: match and action db attributes were added Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 37/73] net/ntnic: add flow dump feature Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 38/73] net/ntnic: add flow flush Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 39/73] net/ntnic: add GMF (Generic MAC Feeder) module Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 40/73] net/ntnic: sort FPGA registers alphanumerically Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 41/73] net/ntnic: add MOD CSU Serhii Iliushyk
2024-10-22 16:54   ` [PATCH v2 42/73] net/ntnic: add MOD FLM Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 43/73] net/ntnic: add HFU module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 44/73] net/ntnic: add IFR module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 45/73] net/ntnic: add MAC Rx module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 46/73] net/ntnic: add MAC Tx module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 47/73] net/ntnic: add RPP LR module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 48/73] net/ntnic: add MOD SLC LR Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 49/73] net/ntnic: add Tx CPY module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 50/73] net/ntnic: add Tx INS module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 51/73] net/ntnic: add Tx RPL module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 52/73] net/ntnic: update alignment for virt queue structs Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 53/73] net/ntnic: enable RSS feature Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 54/73] net/ntnic: add statistics API Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 55/73] net/ntnic: add rpf module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 56/73] net/ntnic: add statistics poll Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 57/73] net/ntnic: added flm stat interface Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 58/73] net/ntnic: add tsm module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 59/73] net/ntnic: add STA module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 60/73] net/ntnic: add TSM module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 61/73] net/ntnic: add xstats Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 62/73] net/ntnic: added flow statistics Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 63/73] net/ntnic: add scrub registers Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 64/73] net/ntnic: update documentation Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 65/73] net/ntnic: added flow aged APIs Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 66/73] net/ntnic: add aged API to the inline profile Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 67/73] net/ntnic: add info and configure flow API Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 68/73] net/ntnic: add aged flow event Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 69/73] net/ntnic: add thread termination Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 70/73] net/ntnic: add age documentation Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 71/73] net/ntnic: add meter API Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 72/73] net/ntnic: add meter module Serhii Iliushyk
2024-10-22 16:55   ` [PATCH v2 73/73] net/ntnic: add meter documentation Serhii Iliushyk
2024-10-22 17:11   ` [PATCH v2 00/73] Provide flow filter API and statistics Stephen Hemminger
2024-10-23 16:59 ` [PATCH v3 " Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 01/73] net/ntnic: add API for configuration NT flow dev Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 02/73] net/ntnic: add flow filter API Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 03/73] net/ntnic: add minimal create/destroy flow operations Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 04/73] net/ntnic: add internal flow create/destroy API Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 05/73] net/ntnic: add minimal NT flow inline profile Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 06/73] net/ntnic: add management API for NT flow profile Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 07/73] net/ntnic: add NT flow profile management implementation Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 08/73] net/ntnic: add create/destroy implementation for NT flows Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 09/73] net/ntnic: add infrastructure for for flow actions and items Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 10/73] net/ntnic: add action queue Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 11/73] net/ntnic: add action mark Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 12/73] net/ntnic: add ation jump Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 13/73] net/ntnic: add action drop Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 14/73] net/ntnic: add item eth Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 15/73] net/ntnic: add item IPv4 Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 16/73] net/ntnic: add item ICMP Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 17/73] net/ntnic: add item port ID Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 18/73] net/ntnic: add item void Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 19/73] net/ntnic: add item UDP Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 20/73] net/ntnic: add action TCP Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 21/73] net/ntnic: add action VLAN Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 22/73] net/ntnic: add item SCTP Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 23/73] net/ntnic: add items IPv6 and ICMPv6 Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 24/73] net/ntnic: add action modify filed Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 25/73] net/ntnic: add items gtp and actions raw encap/decap Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 26/73] net/ntnic: add cat module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 27/73] net/ntnic: add SLC LR module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 28/73] net/ntnic: add PDB module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 29/73] net/ntnic: add QSL module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 30/73] net/ntnic: add KM module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 31/73] net/ntnic: add hash API Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 32/73] net/ntnic: add TPE module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 33/73] net/ntnic: add FLM module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 34/73] net/ntnic: add flm rcp module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 35/73] net/ntnic: add learn flow queue handling Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 36/73] net/ntnic: match and action db attributes were added Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 37/73] net/ntnic: add flow dump feature Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 38/73] net/ntnic: add flow flush Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 39/73] net/ntnic: add GMF (Generic MAC Feeder) module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 40/73] net/ntnic: sort FPGA registers alphanumerically Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 41/73] net/ntnic: add MOD CSU Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 42/73] net/ntnic: add MOD FLM Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 43/73] net/ntnic: add HFU module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 44/73] net/ntnic: add IFR module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 45/73] net/ntnic: add MAC Rx module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 46/73] net/ntnic: add MAC Tx module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 47/73] net/ntnic: add RPP LR module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 48/73] net/ntnic: add MOD SLC LR Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 49/73] net/ntnic: add Tx CPY module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 50/73] net/ntnic: add Tx INS module Serhii Iliushyk
2024-10-23 16:59   ` [PATCH v3 51/73] net/ntnic: add Tx RPL module Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 52/73] net/ntnic: update alignment for virt queue structs Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 53/73] net/ntnic: enable RSS feature Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 54/73] net/ntnic: add statistics API Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 55/73] net/ntnic: add rpf module Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 56/73] net/ntnic: add statistics poll Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 57/73] net/ntnic: added flm stat interface Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 58/73] net/ntnic: add tsm module Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 59/73] net/ntnic: add STA module Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 60/73] net/ntnic: add TSM module Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 61/73] net/ntnic: add xstats Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 62/73] net/ntnic: added flow statistics Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 63/73] net/ntnic: add scrub registers Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 64/73] net/ntnic: update documentation Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 65/73] net/ntnic: add flow aging API Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 66/73] net/ntnic: add aging API to the inline profile Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 67/73] net/ntnic: add flow info and flow configure APIs Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 68/73] net/ntnic: add flow aging event Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 69/73] net/ntnic: add termination thread Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 70/73] net/ntnic: add aging documentation Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 71/73] net/ntnic: add meter API Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 72/73] net/ntnic: add meter module Serhii Iliushyk
2024-10-23 17:00   ` [PATCH v3 73/73] net/ntnic: update meter documentation Serhii Iliushyk

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241021210527.2075431-33-sil-plv@napatech.com \
    --to=sil-plv@napatech.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=ckm@napatech.com \
    --cc=dev@dpdk.org \
    --cc=dvo-plv@napatech.com \
    --cc=ferruh.yigit@amd.com \
    --cc=mko-plv@napatech.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).