DPDK patches and discussions
 help / color / Atom feed
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
To: dev@dpdk.org
Cc: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Subject: [dpdk-dev] [PATCH 14/14] net/bnxt: add VXLAN decap offload support
Date: Sat, 17 Oct 2020 11:58:09 +0530
Message-ID: <1602916089-18576-15-git-send-email-venkatkumar.duvvuru@broadcom.com> (raw)
In-Reply-To: <1602916089-18576-1-git-send-email-venkatkumar.duvvuru@broadcom.com>

vxlan decap offload can happen in stages. The offload request may
not come as a single flow request rather may come as two flow offload
requests F1 & F2. This patch is adding support for this two stage
offload design. The match criteria for F1 is O_DMAC, O_SMAC,
O_DST_IP, O_UDP_DPORT and actions are COUNT, MARK, JUMP. The match
criteria for F2 is O_SRC_IP, O_DST_IP, VNI and inner header fields.
F1 and F2 flow offload requests can come in any order. If F2 flow
offload request comes first then F2 can’t be offloaded as there is
no O_DMAC information in F2. In this case, F2 will be deferred until
F1 flow offload request arrives. When F1 flow offload request is
received it will have O_DMAC information. Using F1’s O_DMAC, driver
creates an L2 context entry in the hardware as part of offloading F1.
F2 will now use F1’s O_DMAC to get the L2 context id associated with
this O_DMAC and other flow fields that are cached already at the time
of deferring F2 for offloading. F2s that arrive after F1 is offloaded
will be directly programmed and not cached.

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
 drivers/net/bnxt/meson.build                   |   1 +
 drivers/net/bnxt/tf_ulp/bnxt_tf_common.h       |   4 +-
 drivers/net/bnxt/tf_ulp/bnxt_ulp.c             |  10 +
 drivers/net/bnxt/tf_ulp/bnxt_ulp.h             |  12 +
 drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c        |  84 +++----
 drivers/net/bnxt/tf_ulp/ulp_flow_db.c          | 149 +++++++++---
 drivers/net/bnxt/tf_ulp/ulp_flow_db.h          |   2 +
 drivers/net/bnxt/tf_ulp/ulp_mapper.c           |   1 +
 drivers/net/bnxt/tf_ulp/ulp_mapper.h           |   2 +
 drivers/net/bnxt/tf_ulp/ulp_rte_parser.c       |  75 +++++-
 drivers/net/bnxt/tf_ulp/ulp_rte_parser.h       |   4 +-
 drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h |   4 +-
 drivers/net/bnxt/tf_ulp/ulp_template_struct.h  |   7 +
 drivers/net/bnxt/tf_ulp/ulp_tun.c              | 310 +++++++++++++++++++++++++
 drivers/net/bnxt/tf_ulp/ulp_tun.h              |  92 ++++++++
 15 files changed, 675 insertions(+), 82 deletions(-)
 create mode 100644 drivers/net/bnxt/tf_ulp/ulp_tun.c
 create mode 100644 drivers/net/bnxt/tf_ulp/ulp_tun.h

diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build
index 3952108..bc74f88 100644
--- a/drivers/net/bnxt/meson.build
+++ b/drivers/net/bnxt/meson.build
@@ -64,6 +64,7 @@ sources = files('bnxt_cpr.c',
 	'tf_ulp/ulp_port_db.c',
 	'tf_ulp/ulp_def_rules.c',
 	'tf_ulp/ulp_fc_mgr.c',
+	'tf_ulp/ulp_tun.c',
 	'tf_ulp/ulp_template_db_wh_plus_act.c',
 	'tf_ulp/ulp_template_db_wh_plus_class.c',
 	'tf_ulp/ulp_template_db_stingray_act.c',
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h b/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h
index f0633f0..b2629e4 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h
@@ -33,7 +33,9 @@
 enum bnxt_tf_rc {
 	BNXT_TF_RC_PARSE_ERR	= -2,
 	BNXT_TF_RC_ERROR	= -1,
-	BNXT_TF_RC_SUCCESS	= 0
+	BNXT_TF_RC_SUCCESS	= 0,
+	BNXT_TF_RC_NORMAL	= 1,
+	BNXT_TF_RC_FID		= 2,
 };
 
 /* eth IPv4 Type */
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index d753b5a..26fd300 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -1321,6 +1321,16 @@ bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context	*ulp_ctx)
 	return ulp_ctx->cfg_data->flow_db;
 }
 
+/* Function to get the tunnel cache table info from the ulp context. */
+struct bnxt_tun_cache_entry *
+bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx)
+{
+	if (!ulp_ctx || !ulp_ctx->cfg_data)
+		return NULL;
+
+	return ulp_ctx->cfg_data->tun_tbl;
+}
+
 /* Function to get the ulp context from eth device. */
 struct bnxt_ulp_context	*
 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev	*dev)
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index c2c5bcb..db1ee50 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -13,6 +13,8 @@
 #include "rte_ethdev.h"
 
 #include "ulp_template_db_enum.h"
+#include "ulp_tun.h"
+#include "bnxt_tf_common.h"
 
 /* NAT defines to reuse existing inner L2 SMAC and DMAC */
 #define BNXT_ULP_NAT_INNER_L2_HEADER_SMAC	0x2000
@@ -55,6 +57,9 @@ struct bnxt_ulp_data {
 	struct bnxt_ulp_df_rule_info	df_rule_info[RTE_MAX_ETHPORTS];
 	struct bnxt_ulp_vfr_rule_info	vfr_rule_info[RTE_MAX_ETHPORTS];
 	enum bnxt_ulp_flow_mem_type	mem_type;
+#define	BNXT_ULP_TUN_ENTRY_INVALID	-1
+#define	BNXT_ULP_MAX_TUN_CACHE_ENTRIES	16
+	struct bnxt_tun_cache_entry	tun_tbl[BNXT_ULP_MAX_TUN_CACHE_ENTRIES];
 };
 
 struct bnxt_ulp_context {
@@ -151,6 +156,10 @@ bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context	*ulp_ctx,
 struct bnxt_ulp_flow_db	*
 bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context	*ulp_ctx);
 
+/* Function to get the tunnel cache table info from the ulp context. */
+struct bnxt_tun_cache_entry *
+bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context	*ulp_ctx);
+
 /* Function to get the ulp context from eth device. */
 struct bnxt_ulp_context	*
 bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev);
@@ -214,4 +223,7 @@ bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context	*ulp_ctx);
 void
 bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context	*ulp_ctx);
 
+int32_t
+ulp_post_process_tun_flow(struct ulp_rte_parser_params *params);
+
 #endif /* _BNXT_ULP_H_ */
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
index 47fbaba..75a7dbe 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c
@@ -77,24 +77,22 @@ bnxt_ulp_set_dir_attributes(struct ulp_rte_parser_params *params,
 void
 bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms *mapper_cparms,
 			    struct ulp_rte_parser_params *params,
-			    uint32_t priority, uint32_t class_id,
-			    uint32_t act_tmpl, uint16_t func_id,
-			    uint32_t fid,
 			    enum bnxt_ulp_fdb_type flow_type)
 {
-	mapper_cparms->app_priority = priority;
-	mapper_cparms->dir_attr = params->dir_attr;
-
-	mapper_cparms->class_tid = class_id;
-	mapper_cparms->act_tid = act_tmpl;
-	mapper_cparms->func_id = func_id;
-	mapper_cparms->hdr_bitmap = &params->hdr_bitmap;
-	mapper_cparms->hdr_field = params->hdr_field;
-	mapper_cparms->comp_fld = params->comp_fld;
-	mapper_cparms->act = &params->act_bitmap;
-	mapper_cparms->act_prop = &params->act_prop;
-	mapper_cparms->flow_type = flow_type;
-	mapper_cparms->flow_id = fid;
+	mapper_cparms->flow_type	= flow_type;
+	mapper_cparms->app_priority	= params->priority;
+	mapper_cparms->dir_attr		= params->dir_attr;
+	mapper_cparms->class_tid	= params->class_id;
+	mapper_cparms->act_tid		= params->act_tmpl;
+	mapper_cparms->func_id		= params->func_id;
+	mapper_cparms->hdr_bitmap	= &params->hdr_bitmap;
+	mapper_cparms->hdr_field	= params->hdr_field;
+	mapper_cparms->comp_fld		= params->comp_fld;
+	mapper_cparms->act		= &params->act_bitmap;
+	mapper_cparms->act_prop		= &params->act_prop;
+	mapper_cparms->flow_id		= params->fid;
+	mapper_cparms->parent_flow	= params->parent_flow;
+	mapper_cparms->parent_fid	= params->parent_fid;
 }
 
 /* Function to create the rte flow. */
@@ -109,7 +107,6 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev,
 	struct ulp_rte_parser_params params;
 	struct bnxt_ulp_context *ulp_ctx;
 	int rc, ret = BNXT_TF_RC_ERROR;
-	uint32_t class_id, act_tmpl;
 	struct rte_flow *flow_id;
 	uint16_t func_id;
 	uint32_t fid;
@@ -118,13 +115,13 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev,
 					pattern, actions,
 					error) == BNXT_TF_RC_ERROR) {
 		BNXT_TF_DBG(ERR, "Invalid arguments being passed\n");
-		goto parse_err1;
+		goto flow_error;
 	}
 
 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
 	if (!ulp_ctx) {
 		BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
-		goto parse_err1;
+		goto flow_error;
 	}
 
 	/* Initialize the parser params */
@@ -145,13 +142,13 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev,
 					 dev->data->port_id,
 					 &func_id)) {
 		BNXT_TF_DBG(ERR, "conversion of port to func id failed\n");
-		goto parse_err1;
+		goto flow_error;
 	}
 
 	/* Protect flow creation */
 	if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) {
 		BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n");
-		goto parse_err1;
+		goto flow_error;
 	}
 
 	/* Allocate a Flow ID for attaching all resources for the flow to.
@@ -162,50 +159,55 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev,
 				   func_id, &fid);
 	if (rc) {
 		BNXT_TF_DBG(ERR, "Unable to allocate flow table entry\n");
-		goto parse_err2;
+		goto release_lock;
 	}
 
 	/* Parse the rte flow pattern */
 	ret = bnxt_ulp_rte_parser_hdr_parse(pattern, &params);
 	if (ret != BNXT_TF_RC_SUCCESS)
-		goto parse_err3;
+		goto free_fid;
 
 	/* Parse the rte flow action */
 	ret = bnxt_ulp_rte_parser_act_parse(actions, &params);
 	if (ret != BNXT_TF_RC_SUCCESS)
-		goto parse_err3;
+		goto free_fid;
 
+	params.fid = fid;
+	params.func_id = func_id;
+	params.priority = attr->priority;
 	/* Perform the rte flow post process */
 	ret = bnxt_ulp_rte_parser_post_process(&params);
-	if (ret != BNXT_TF_RC_SUCCESS)
-		goto parse_err3;
+	if (ret == BNXT_TF_RC_ERROR)
+		goto free_fid;
+	else if (ret == BNXT_TF_RC_FID)
+		goto return_fid;
 
-	ret = ulp_matcher_pattern_match(&params, &class_id);
+	ret = ulp_matcher_pattern_match(&params, &params.class_id);
 	if (ret != BNXT_TF_RC_SUCCESS)
-		goto parse_err3;
+		goto free_fid;
 
-	ret = ulp_matcher_action_match(&params, &act_tmpl);
+	ret = ulp_matcher_action_match(&params, &params.act_tmpl);
 	if (ret != BNXT_TF_RC_SUCCESS)
-		goto parse_err3;
+		goto free_fid;
 
-	bnxt_ulp_init_mapper_params(&mapper_cparms, &params, attr->priority,
-				    class_id, act_tmpl, func_id, fid,
+	bnxt_ulp_init_mapper_params(&mapper_cparms, &params,
 				    BNXT_ULP_FDB_TYPE_REGULAR);
 	/* Call the ulp mapper to create the flow in the hardware. */
 	ret = ulp_mapper_flow_create(ulp_ctx, &mapper_cparms);
 	if (ret)
-		goto parse_err3;
+		goto free_fid;
 
+return_fid:
 	bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
 
 	flow_id = (struct rte_flow *)((uintptr_t)fid);
 	return flow_id;
 
-parse_err3:
+free_fid:
 	ulp_flow_db_fid_free(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, fid);
-parse_err2:
+release_lock:
 	bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
-parse_err1:
+flow_error:
 	rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
 			   "Failed to create flow.");
 	return NULL;
@@ -219,10 +221,10 @@ bnxt_ulp_flow_validate(struct rte_eth_dev *dev,
 		       const struct rte_flow_action actions[],
 		       struct rte_flow_error *error)
 {
-	struct ulp_rte_parser_params		params;
+	struct ulp_rte_parser_params params;
+	struct bnxt_ulp_context *ulp_ctx;
 	uint32_t class_id, act_tmpl;
 	int ret = BNXT_TF_RC_ERROR;
-	struct bnxt_ulp_context *ulp_ctx;
 
 	if (bnxt_ulp_flow_validate_args(attr,
 					pattern, actions,
@@ -256,8 +258,10 @@ bnxt_ulp_flow_validate(struct rte_eth_dev *dev,
 
 	/* Perform the rte flow post process */
 	ret = bnxt_ulp_rte_parser_post_process(&params);
-	if (ret != BNXT_TF_RC_SUCCESS)
+	if (ret == BNXT_TF_RC_ERROR)
 		goto parse_error;
+	else if (ret == BNXT_TF_RC_FID)
+		return 0;
 
 	ret = ulp_matcher_pattern_match(&params, &class_id);
 
@@ -283,10 +287,10 @@ bnxt_ulp_flow_destroy(struct rte_eth_dev *dev,
 		      struct rte_flow *flow,
 		      struct rte_flow_error *error)
 {
-	int ret = 0;
 	struct bnxt_ulp_context *ulp_ctx;
 	uint32_t flow_id;
 	uint16_t func_id;
+	int ret;
 
 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
 	if (!ulp_ctx) {
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
index 8780c01..5e7c8ab 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
@@ -11,6 +11,7 @@
 #include "ulp_mapper.h"
 #include "ulp_flow_db.h"
 #include "ulp_fc_mgr.h"
+#include "ulp_tun.h"
 
 #define ULP_FLOW_DB_RES_DIR_BIT		31
 #define ULP_FLOW_DB_RES_DIR_MASK	0x80000000
@@ -375,6 +376,101 @@ ulp_flow_db_parent_tbl_deinit(struct bnxt_ulp_flow_db *flow_db)
 	}
 }
 
+/* internal validation function for parent flow tbl */
+static struct bnxt_ulp_flow_db *
+ulp_flow_db_parent_arg_validation(struct bnxt_ulp_context *ulp_ctxt,
+				  uint32_t fid)
+{
+	struct bnxt_ulp_flow_db *flow_db;
+
+	flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt);
+	if (!flow_db) {
+		BNXT_TF_DBG(ERR, "Invalid Arguments\n");
+		return NULL;
+	}
+
+	/* check for max flows */
+	if (fid >= flow_db->flow_tbl.num_flows || !fid) {
+		BNXT_TF_DBG(ERR, "Invalid flow index\n");
+		return NULL;
+	}
+
+	/* No support for parent child db then just exit */
+	if (!flow_db->parent_child_db.entries_count) {
+		BNXT_TF_DBG(ERR, "parent child db not supported\n");
+		return NULL;
+	}
+
+	return flow_db;
+}
+
+/*
+ * Set the tunnel index in the parent flow
+ *
+ * ulp_ctxt [in] Ptr to ulp_context
+ * parent_idx [in] The parent index of the parent flow entry
+ *
+ * returns index on success and negative on failure.
+ */
+static int32_t
+ulp_flow_db_parent_tun_idx_set(struct bnxt_ulp_context *ulp_ctxt,
+			       uint32_t parent_idx, uint8_t tun_idx)
+{
+	struct bnxt_ulp_flow_db *flow_db;
+	struct ulp_fdb_parent_child_db *p_pdb;
+
+	flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt);
+	if (!flow_db) {
+		BNXT_TF_DBG(ERR, "Invalid Arguments\n");
+		return -EINVAL;
+	}
+
+	/* check for parent idx validity */
+	p_pdb = &flow_db->parent_child_db;
+	if (parent_idx >= p_pdb->entries_count ||
+	    !p_pdb->parent_flow_tbl[parent_idx].parent_fid) {
+		BNXT_TF_DBG(ERR, "Invalid parent flow index %x\n", parent_idx);
+		return -EINVAL;
+	}
+
+	p_pdb->parent_flow_tbl[parent_idx].tun_idx = tun_idx;
+	return 0;
+}
+
+/*
+ * Get the tunnel index from the parent flow
+ *
+ * ulp_ctxt [in] Ptr to ulp_context
+ * parent_fid [in] The flow id of the parent flow entry
+ *
+ * returns 0 if counter accum is set else -1.
+ */
+static int32_t
+ulp_flow_db_parent_tun_idx_get(struct bnxt_ulp_context *ulp_ctxt,
+			       uint32_t parent_fid, uint8_t *tun_idx)
+{
+	struct bnxt_ulp_flow_db *flow_db;
+	struct ulp_fdb_parent_child_db *p_pdb;
+	uint32_t idx;
+
+	/* validate the arguments */
+	flow_db = ulp_flow_db_parent_arg_validation(ulp_ctxt, parent_fid);
+	if (!flow_db) {
+		BNXT_TF_DBG(ERR, "parent child db validation failed\n");
+		return -EINVAL;
+	}
+
+	p_pdb = &flow_db->parent_child_db;
+	for (idx = 0; idx < p_pdb->entries_count; idx++) {
+		if (p_pdb->parent_flow_tbl[idx].parent_fid == parent_fid) {
+			*tun_idx = p_pdb->parent_flow_tbl[idx].tun_idx;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
 /*
  * Initialize the flow database. Memory is allocated in this
  * call and assigned to the flow database.
@@ -663,6 +759,9 @@ ulp_flow_db_resource_del(struct bnxt_ulp_context *ulp_ctxt,
 	struct bnxt_ulp_flow_tbl *flow_tbl;
 	struct ulp_fdb_resource_info *nxt_resource, *fid_resource;
 	uint32_t nxt_idx = 0;
+	struct bnxt_tun_cache_entry *tun_tbl;
+	uint8_t tun_idx = 0;
+	int rc;
 
 	flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt);
 	if (!flow_db) {
@@ -739,6 +838,18 @@ ulp_flow_db_resource_del(struct bnxt_ulp_context *ulp_ctxt,
 				      params->resource_hndl);
 	}
 
+	if (params->resource_func == BNXT_ULP_RESOURCE_FUNC_PARENT_FLOW) {
+		tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(ulp_ctxt);
+		if (!tun_tbl)
+			return -EINVAL;
+
+		rc = ulp_flow_db_parent_tun_idx_get(ulp_ctxt, fid, &tun_idx);
+		if (rc)
+			return rc;
+
+		ulp_clear_tun_entry(tun_tbl, tun_idx);
+	}
+
 	/* all good, return success */
 	return 0;
 }
@@ -1159,34 +1270,6 @@ ulp_default_flow_db_cfa_action_get(struct bnxt_ulp_context *ulp_ctx,
 	return 0;
 }
 
-/* internal validation function for parent flow tbl */
-static struct bnxt_ulp_flow_db *
-ulp_flow_db_parent_arg_validation(struct bnxt_ulp_context *ulp_ctxt,
-				  uint32_t fid)
-{
-	struct bnxt_ulp_flow_db *flow_db;
-
-	flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt);
-	if (!flow_db) {
-		BNXT_TF_DBG(ERR, "Invalid Arguments\n");
-		return NULL;
-	}
-
-	/* check for max flows */
-	if (fid >= flow_db->flow_tbl.num_flows || !fid) {
-		BNXT_TF_DBG(ERR, "Invalid flow index\n");
-		return NULL;
-	}
-
-	/* No support for parent child db then just exit */
-	if (!flow_db->parent_child_db.entries_count) {
-		BNXT_TF_DBG(ERR, "parent child db not supported\n");
-		return NULL;
-	}
-
-	return flow_db;
-}
-
 /*
  * Allocate the entry in the parent-child database
  *
@@ -1559,7 +1642,7 @@ ulp_flow_db_parent_flow_create(struct bnxt_ulp_mapper_parms *parms)
 	struct ulp_flow_db_res_params fid_parms;
 	uint32_t sub_type = BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_INT_COUNT_ACC;
 	struct ulp_flow_db_res_params res_params;
-	int32_t fid_idx;
+	int32_t fid_idx, rc;
 
 	/* create the child flow entry in parent flow table */
 	fid_idx = ulp_flow_db_parent_flow_alloc(parms->ulp_ctx, parms->fid);
@@ -1596,6 +1679,14 @@ ulp_flow_db_parent_flow_create(struct bnxt_ulp_mapper_parms *parms)
 			return -1;
 		}
 	}
+
+	rc  = ulp_flow_db_parent_tun_idx_set(parms->ulp_ctx, fid_idx,
+					     parms->tun_idx);
+	if (rc) {
+		BNXT_TF_DBG(ERR, "Error setting tun_idx in the parent flow\n");
+		return rc;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
index 10e69ba..f7dfd67 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
@@ -60,6 +60,8 @@ struct ulp_fdb_parent_info {
 	uint64_t	pkt_count;
 	uint64_t	byte_count;
 	uint64_t	*child_fid_bitset;
+	uint32_t	f2_cnt;
+	uint8_t		tun_idx;
 };
 
 /* Structure to maintain parent-child flow relationships */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index d5c129b..2964323 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -2815,6 +2815,7 @@ ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx,
 	parms.parent_flow = cparms->parent_flow;
 	parms.parent_fid = cparms->parent_fid;
 	parms.fid = cparms->flow_id;
+	parms.tun_idx = cparms->tun_idx;
 
 	/* Get the device id from the ulp context */
 	if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &parms.dev_id)) {
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.h b/drivers/net/bnxt/tf_ulp/ulp_mapper.h
index 0595d15..9bd94f5 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.h
@@ -78,6 +78,7 @@ struct bnxt_ulp_mapper_parms {
 	struct bnxt_ulp_device_params           *device_params;
 	uint32_t				parent_fid;
 	uint32_t				parent_flow;
+	uint8_t					tun_idx;
 };
 
 struct bnxt_ulp_mapper_create_parms {
@@ -98,6 +99,7 @@ struct bnxt_ulp_mapper_create_parms {
 	uint32_t			parent_fid;
 	/* if set then create a parent flow */
 	uint32_t			parent_flow;
+	uint8_t				tun_idx;
 };
 
 /* Function to initialize any dynamic mapper data. */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 42021ae..df38b83 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -6,11 +6,16 @@
 #include "bnxt.h"
 #include "ulp_template_db_enum.h"
 #include "ulp_template_struct.h"
+#include "bnxt_ulp.h"
 #include "bnxt_tf_common.h"
 #include "ulp_rte_parser.h"
+#include "ulp_matcher.h"
 #include "ulp_utils.h"
 #include "tfp.h"
 #include "ulp_port_db.h"
+#include "ulp_flow_db.h"
+#include "ulp_mapper.h"
+#include "ulp_tun.h"
 
 /* Local defines for the parsing functions */
 #define ULP_VLAN_PRIORITY_SHIFT		13 /* First 3 bits */
@@ -243,14 +248,11 @@ bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
 	}
 }
 
-/*
- * Function to handle the post processing of the parsing details
- */
-int32_t
-bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
+static int32_t
+ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
 {
-	enum bnxt_ulp_direction_type dir;
 	enum bnxt_ulp_intf_type match_port_type, act_port_type;
+	enum bnxt_ulp_direction_type dir;
 	uint32_t act_port_set;
 
 	/* Get the computed details */
@@ -306,6 +308,16 @@ bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
 }
 
 /*
+ * Function to handle the post processing of the parsing details
+ */
+int32_t
+bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
+{
+	ulp_post_process_normal_flow(params);
+	return ulp_post_process_tun_flow(params);
+}
+
+/*
  * Function to compute the flow direction based on the match port details
  */
 static void
@@ -679,7 +691,16 @@ ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
 	params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
 
 	/* Update the protocol hdr bitmap */
-	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
+	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+			     BNXT_ULP_HDR_BIT_O_ETH) ||
+	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+			     BNXT_ULP_HDR_BIT_O_IPV4) ||
+	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+			     BNXT_ULP_HDR_BIT_O_IPV6) ||
+	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+			     BNXT_ULP_HDR_BIT_O_UDP) ||
+	    ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+			     BNXT_ULP_HDR_BIT_O_TCP)) {
 		ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
 		inner_flag = 1;
 	} else {
@@ -875,6 +896,22 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
 		return BNXT_TF_RC_ERROR;
 	}
 
+	if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+			      BNXT_ULP_HDR_BIT_O_ETH) &&
+	    !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+			      BNXT_ULP_HDR_BIT_I_ETH)) {
+		/* Since F2 flow does not include eth item, when parser detects
+		 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
+		 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
+		 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
+		 * This will allow the parser post processor to update the
+		 * t_dmac in hdr_field[o_eth.dmac]
+		 */
+		idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
+			BNXT_ULP_PROTO_HDR_VLAN_NUM);
+		params->field_idx = idx;
+	}
+
 	/*
 	 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
 	 * header fields
@@ -1004,6 +1041,22 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
 		return BNXT_TF_RC_ERROR;
 	}
 
+	if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+			      BNXT_ULP_HDR_BIT_O_ETH) &&
+	    !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+			      BNXT_ULP_HDR_BIT_I_ETH)) {
+		/* Since F2 flow does not include eth item, when parser detects
+		 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
+		 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
+		 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
+		 * This will allow the parser post processor to update the
+		 * t_dmac in hdr_field[o_eth.dmac]
+		 */
+		idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
+			BNXT_ULP_PROTO_HDR_VLAN_NUM);
+		params->field_idx = idx;
+	}
+
 	/*
 	 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
 	 * header fields
@@ -1109,9 +1162,11 @@ static void
 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
 			     uint16_t dst_port)
 {
-	if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
+	if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
 		ULP_BITMAP_SET(param->hdr_fp_bit.bits,
 			       BNXT_ULP_HDR_BIT_T_VXLAN);
+		ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
+	}
 }
 
 /* Function to handle the parsing of RTE Flow item UDP Header. */
@@ -1143,6 +1198,7 @@ ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
 		field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
 						&udp_spec->hdr.src_port,
 						size);
+
 		size = sizeof(udp_spec->hdr.dst_port);
 		field = ulp_rte_parser_fld_copy(field,
 						&udp_spec->hdr.dst_port,
@@ -1689,6 +1745,9 @@ ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
 	/* update the hdr_bitmap with vxlan */
 	ULP_BITMAP_SET(params->act_bitmap.bits,
 		       BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
+	/* Update computational field with tunnel decap info */
+	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
+	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
 	return BNXT_TF_RC_SUCCESS;
 }
 
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h
index a71aabe..7996317 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.h
@@ -12,6 +12,7 @@
 #include "ulp_template_db_enum.h"
 #include "ulp_template_struct.h"
 #include "ulp_mapper.h"
+#include "bnxt_tf_common.h"
 
 /* defines to be used in the tunnel header parsing */
 #define BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS	2
@@ -38,9 +39,6 @@
 void
 bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms *mapper_cparms,
 			    struct ulp_rte_parser_params *params,
-			    uint32_t priority, uint32_t class_id,
-			    uint32_t act_tmpl, uint16_t func_id,
-			    uint32_t flow_id,
 			    enum bnxt_ulp_fdb_type flow_type);
 
 /* Function to handle the parsing of the RTE port id. */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index 10838f5..6802deb 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -135,7 +135,9 @@ enum bnxt_ulp_cf_idx {
 	BNXT_ULP_CF_IDX_L4_HDR_CNT = 41,
 	BNXT_ULP_CF_IDX_VFR_MODE = 42,
 	BNXT_ULP_CF_IDX_LOOPBACK_PARIF = 43,
-	BNXT_ULP_CF_IDX_LAST = 44
+	BNXT_ULP_CF_IDX_L3_TUN = 44,
+	BNXT_ULP_CF_IDX_L3_TUN_DECAP = 45,
+	BNXT_ULP_CF_IDX_LAST = 46
 };
 
 enum bnxt_ulp_cond_opcode {
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
index 69bb61e..9d690a9 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
@@ -72,6 +72,13 @@ struct ulp_rte_parser_params {
 	struct ulp_rte_act_bitmap	act_bitmap;
 	struct ulp_rte_act_prop		act_prop;
 	uint32_t			dir_attr;
+	uint32_t			priority;
+	uint32_t			fid;
+	uint32_t			parent_flow;
+	uint32_t			parent_fid;
+	uint16_t			func_id;
+	uint32_t			class_id;
+	uint32_t			act_tmpl;
 	struct bnxt_ulp_context		*ulp_ctx;
 };
 
diff --git a/drivers/net/bnxt/tf_ulp/ulp_tun.c b/drivers/net/bnxt/tf_ulp/ulp_tun.c
new file mode 100644
index 0000000..e8d2861
--- /dev/null
+++ b/drivers/net/bnxt/tf_ulp/ulp_tun.c
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2020 Broadcom
+ * All rights reserved.
+ */
+
+#include <rte_malloc.h>
+
+#include "ulp_tun.h"
+#include "ulp_rte_parser.h"
+#include "ulp_template_db_enum.h"
+#include "ulp_template_struct.h"
+#include "ulp_matcher.h"
+#include "ulp_mapper.h"
+#include "ulp_flow_db.h"
+
+/* This function programs the outer tunnel flow in the hardware. */
+static int32_t
+ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
+			   struct bnxt_tun_cache_entry *tun_entry,
+			   uint16_t tun_idx)
+{
+	struct bnxt_ulp_mapper_create_parms mparms = { 0 };
+	int ret;
+
+	/* Reset the JUMP action bit in the action bitmap as we don't
+	 * offload this action.
+	 */
+	ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
+
+	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
+
+	ret = ulp_matcher_pattern_match(params, &params->class_id);
+	if (ret != BNXT_TF_RC_SUCCESS)
+		goto err;
+
+	ret = ulp_matcher_action_match(params, &params->act_tmpl);
+	if (ret != BNXT_TF_RC_SUCCESS)
+		goto err;
+
+	params->parent_flow = true;
+	bnxt_ulp_init_mapper_params(&mparms, params,
+				    BNXT_ULP_FDB_TYPE_REGULAR);
+	mparms.tun_idx = tun_idx;
+
+	/* Call the ulp mapper to create the flow in the hardware. */
+	ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
+	if (ret)
+		goto err;
+
+	/* Store the tunnel dmac in the tunnel cache table and use it while
+	 * programming tunnel flow F2.
+	 */
+	memcpy(tun_entry->t_dmac,
+	       &params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
+	       RTE_ETHER_ADDR_LEN);
+
+	tun_entry->valid = true;
+	tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
+	tun_entry->outer_tun_flow_id = params->fid;
+
+	/* F1 and it's related F2s are correlated based on
+	 * Tunnel Destination IP Address.
+	 */
+	if (tun_entry->t_dst_ip_valid)
+		goto done;
+	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
+		memcpy(&tun_entry->t_dst_ip,
+		       &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
+		       sizeof(rte_be32_t));
+	else
+		memcpy(tun_entry->t_dst_ip6,
+		       &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
+		       sizeof(tun_entry->t_dst_ip6));
+	tun_entry->t_dst_ip_valid = true;
+
+done:
+	return BNXT_TF_RC_FID;
+
+err:
+	memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
+	return BNXT_TF_RC_ERROR;
+}
+
+/* This function programs the inner tunnel flow in the hardware. */
+static void
+ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry)
+{
+	struct bnxt_ulp_mapper_create_parms mparms = { 0 };
+	struct ulp_rte_parser_params *params;
+	int ret;
+
+	/* F2 doesn't have tunnel dmac, use the tunnel dmac that was
+	 * stored during F1 programming.
+	 */
+	params = &tun_entry->first_inner_tun_params;
+	memcpy(&params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
+	       tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
+	params->parent_fid = tun_entry->outer_tun_flow_id;
+	params->fid = tun_entry->first_inner_tun_flow_id;
+
+	bnxt_ulp_init_mapper_params(&mparms, params,
+				    BNXT_ULP_FDB_TYPE_REGULAR);
+
+	ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
+	if (ret)
+		PMD_DRV_LOG(ERR, "Failed to create F2 flow.");
+}
+
+/* This function either install outer tunnel flow & inner tunnel flow
+ * or just the outer tunnel flow based on the flow state.
+ */
+static int32_t
+ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
+			     struct bnxt_tun_cache_entry *tun_entry,
+			     uint16_t tun_idx)
+{
+	enum bnxt_ulp_tun_flow_state flow_state;
+	int ret;
+
+	flow_state = tun_entry->state;
+	ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
+	if (ret)
+		return ret;
+
+	/* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing
+	 * F1, that means F2 is not deferred. Hence, no need to install F2.
+	 */
+	if (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)
+		ulp_install_inner_tun_flow(tun_entry);
+
+	return 0;
+}
+
+/* This function will be called if inner tunnel flow request comes before
+ * outer tunnel flow request.
+ */
+static int32_t
+ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
+				      struct bnxt_tun_cache_entry *tun_entry)
+{
+	int ret;
+
+	ret = ulp_matcher_pattern_match(params, &params->class_id);
+	if (ret != BNXT_TF_RC_SUCCESS)
+		return BNXT_TF_RC_ERROR;
+
+	ret = ulp_matcher_action_match(params, &params->act_tmpl);
+	if (ret != BNXT_TF_RC_SUCCESS)
+		return BNXT_TF_RC_ERROR;
+
+	/* If Tunnel F2 flow comes first then we can't install it in the
+	 * hardware, because, F2 flow will not have L2 context information.
+	 * So, just cache the F2 information and program it in the context
+	 * of F1 flow installation.
+	 */
+	memcpy(&tun_entry->first_inner_tun_params, params,
+	       sizeof(struct ulp_rte_parser_params));
+
+	tun_entry->first_inner_tun_flow_id = params->fid;
+	tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
+
+	/* F1 and it's related F2s are correlated based on
+	 * Tunnel Destination IP Address. It could be already set, if
+	 * the inner flow got offloaded first.
+	 */
+	if (tun_entry->t_dst_ip_valid)
+		goto done;
+	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
+		memcpy(&tun_entry->t_dst_ip,
+		       &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
+		       sizeof(rte_be32_t));
+	else
+		memcpy(tun_entry->t_dst_ip6,
+		       &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
+		       sizeof(tun_entry->t_dst_ip6));
+	tun_entry->t_dst_ip_valid = true;
+
+done:
+	return BNXT_TF_RC_FID;
+}
+
+/* This function will be called if inner tunnel flow request comes after
+ * the outer tunnel flow request.
+ */
+static int32_t
+ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
+				struct bnxt_tun_cache_entry *tun_entry)
+{
+	memcpy(&params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
+	       tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
+
+	params->parent_fid = tun_entry->outer_tun_flow_id;
+
+	return BNXT_TF_RC_NORMAL;
+}
+
+static int32_t
+ulp_get_tun_entry(struct ulp_rte_parser_params *params,
+		  struct bnxt_tun_cache_entry **tun_entry,
+		  uint16_t *tun_idx)
+{
+	int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
+	struct bnxt_tun_cache_entry *tun_tbl;
+	bool tun_entry_found = false, free_entry_found = false;
+
+	tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
+	if (!tun_tbl)
+		return BNXT_TF_RC_ERROR;
+
+	for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
+		if (!memcmp(&tun_tbl[i].t_dst_ip,
+			    &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
+			    sizeof(rte_be32_t)) ||
+		    !memcmp(&tun_tbl[i].t_dst_ip6,
+			    &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
+			    16)) {
+			tun_entry_found = true;
+			break;
+		}
+
+		if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
+			first_free_entry = i;
+			free_entry_found = true;
+		}
+	}
+
+	if (tun_entry_found) {
+		*tun_entry = &tun_tbl[i];
+		*tun_idx = i;
+	} else {
+		if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
+			return BNXT_TF_RC_ERROR;
+		*tun_entry = &tun_tbl[first_free_entry];
+		*tun_idx = first_free_entry;
+	}
+
+	return 0;
+}
+
+int32_t
+ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
+{
+	bool outer_tun_sig, inner_tun_sig, first_inner_tun_flow;
+	bool outer_tun_reject, inner_tun_reject, outer_tun_flow, inner_tun_flow;
+	enum bnxt_ulp_tun_flow_state flow_state;
+	struct bnxt_tun_cache_entry *tun_entry;
+	uint32_t l3_tun, l3_tun_decap;
+	uint16_t tun_idx;
+	int rc;
+
+	/* Computational fields that indicate it's a TUNNEL DECAP flow */
+	l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
+	l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
+					   BNXT_ULP_CF_IDX_L3_TUN_DECAP);
+	if (!l3_tun)
+		return BNXT_TF_RC_NORMAL;
+
+	rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
+	if (rc == BNXT_TF_RC_ERROR)
+		return rc;
+
+	flow_state = tun_entry->state;
+	/* Outer tunnel flow validation */
+	outer_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);
+	outer_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);
+	outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
+						      outer_tun_sig);
+
+	/* Inner tunnel flow validation */
+	inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
+	first_inner_tun_flow = BNXT_FIRST_INNER_TUN_FLOW(flow_state,
+							 inner_tun_sig);
+	inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
+	inner_tun_reject = BNXT_REJECT_INNER_TUN_FLOW(flow_state,
+						      inner_tun_sig);
+
+	if (outer_tun_reject) {
+		tun_entry->outer_tun_rej_cnt++;
+		BNXT_TF_DBG(ERR,
+			    "Tunnel F1 flow rejected, COUNT: %d\n",
+			    tun_entry->outer_tun_rej_cnt);
+	/* Inner tunnel flow is rejected if it comes between first inner
+	 * tunnel flow and outer flow requests.
+	 */
+	} else if (inner_tun_reject) {
+		tun_entry->inner_tun_rej_cnt++;
+		BNXT_TF_DBG(ERR,
+			    "Tunnel F2 flow rejected, COUNT: %d\n",
+			    tun_entry->inner_tun_rej_cnt);
+	}
+
+	if (outer_tun_reject || inner_tun_reject)
+		return BNXT_TF_RC_ERROR;
+	else if (first_inner_tun_flow)
+		return ulp_post_process_first_inner_tun_flow(params, tun_entry);
+	else if (outer_tun_flow)
+		return ulp_post_process_outer_tun_flow(params, tun_entry,
+						       tun_idx);
+	else if (inner_tun_flow)
+		return ulp_post_process_inner_tun_flow(params, tun_entry);
+	else
+		return BNXT_TF_RC_NORMAL;
+}
+
+void
+ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
+{
+	memset(&tun_tbl[tun_idx], 0,
+		sizeof(struct bnxt_tun_cache_entry));
+}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_tun.h b/drivers/net/bnxt/tf_ulp/ulp_tun.h
new file mode 100644
index 0000000..ad70ae6
--- /dev/null
+++ b/drivers/net/bnxt/tf_ulp/ulp_tun.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_TUN_H_
+#define _BNXT_TUN_H_
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include "rte_ethdev.h"
+
+#include "ulp_template_db_enum.h"
+#include "ulp_template_struct.h"
+
+#define	BNXT_OUTER_TUN_SIGNATURE(l3_tun, params)		\
+	((l3_tun) &&					\
+	 ULP_BITMAP_ISSET((params)->act_bitmap.bits,	\
+			  BNXT_ULP_ACTION_BIT_JUMP))
+#define	BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params)		\
+	((l3_tun) && (l3_tun_decap) &&					\
+	 !ULP_BITMAP_ISSET((params)->hdr_bitmap.bits,			\
+			   BNXT_ULP_HDR_BIT_O_ETH))
+
+#define	BNXT_FIRST_INNER_TUN_FLOW(state, inner_tun_sig)	\
+	((state) == BNXT_ULP_FLOW_STATE_NORMAL && (inner_tun_sig))
+#define	BNXT_INNER_TUN_FLOW(state, inner_tun_sig)		\
+	((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (inner_tun_sig))
+#define	BNXT_OUTER_TUN_FLOW(outer_tun_sig)		((outer_tun_sig))
+
+/* It is invalid to get another outer flow offload request
+ * for the same tunnel, while the outer flow is already offloaded.
+ */
+#define	BNXT_REJECT_OUTER_TUN_FLOW(state, outer_tun_sig)	\
+	((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (outer_tun_sig))
+/* It is invalid to get another inner flow offload request
+ * for the same tunnel, while the outer flow is not yet offloaded.
+ */
+#define	BNXT_REJECT_INNER_TUN_FLOW(state, inner_tun_sig)	\
+	((state) == BNXT_ULP_FLOW_STATE_TUN_I_CACHED && (inner_tun_sig))
+
+#define	ULP_TUN_O_DMAC_HDR_FIELD_INDEX	1
+#define	ULP_TUN_O_IPV4_DIP_INDEX	19
+#define	ULP_TUN_O_IPV6_DIP_INDEX	17
+
+/* When a flow offload request comes the following state transitions
+ * happen based on the order in which the outer & inner flow offload
+ * requests arrive.
+ *
+ * If inner tunnel flow offload request arrives first then the flow
+ * state will change from BNXT_ULP_FLOW_STATE_NORMAL to
+ * BNXT_ULP_FLOW_STATE_TUN_I_CACHED and the following outer tunnel
+ * flow offload request will change the state of the flow to
+ * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from BNXT_ULP_FLOW_STATE_TUN_I_CACHED.
+ *
+ * If outer tunnel flow offload request arrives first then the flow state
+ * will change from BNXT_ULP_FLOW_STATE_NORMAL to
+ * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD.
+ *
+ * Once the flow state is in BNXT_ULP_FLOW_STATE_TUN_O_OFFLD, any inner
+ * tunnel flow offload requests after that point will be treated as a
+ * normal flow and the tunnel flow state remains in
+ * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD
+ */
+enum bnxt_ulp_tun_flow_state {
+	BNXT_ULP_FLOW_STATE_NORMAL = 0,
+	BNXT_ULP_FLOW_STATE_TUN_O_OFFLD,
+	BNXT_ULP_FLOW_STATE_TUN_I_CACHED
+};
+
+struct bnxt_tun_cache_entry {
+	enum bnxt_ulp_tun_flow_state	state;
+	bool				valid;
+	bool				t_dst_ip_valid;
+	uint8_t				t_dmac[RTE_ETHER_ADDR_LEN];
+	union {
+		rte_be32_t		t_dst_ip;
+		uint8_t			t_dst_ip6[16];
+	};
+	uint32_t			outer_tun_flow_id;
+	uint32_t			first_inner_tun_flow_id;
+	uint16_t			outer_tun_rej_cnt;
+	uint16_t			inner_tun_rej_cnt;
+	struct ulp_rte_parser_params	first_inner_tun_params;
+};
+
+void
+ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx);
+
+#endif
-- 
2.7.4


  parent reply index

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-17  6:27 [dpdk-dev] [PATCH 00/14] bnxt patches Venkat Duvvuru
2020-10-17  6:27 ` [dpdk-dev] [PATCH 01/14] net/bnxt: device cleanup of FW Venkat Duvvuru
2020-10-17  6:27 ` [dpdk-dev] [PATCH 02/14] net/bnxt: add stingray support Venkat Duvvuru
2020-10-17  6:27 ` [dpdk-dev] [PATCH 03/14] net/bnxt: changes to support 2 table scopes Venkat Duvvuru
2020-10-17  6:27 ` [dpdk-dev] [PATCH 04/14] net/bnxt: map table scope API Venkat Duvvuru
2020-10-17  6:28 ` [dpdk-dev] [PATCH 05/14] net/bnxt: table scope to PF Mapping for SR and Wh+ Venkat Duvvuru
2020-10-17  6:28 ` [dpdk-dev] [PATCH 06/14] net/bnxt: add build option for EM slot allocation Venkat Duvvuru
2020-10-17  6:28 ` [dpdk-dev] [PATCH 07/14] net/bnxt: update SR ULP resource counts Venkat Duvvuru
2020-10-17  6:28 ` [dpdk-dev] [PATCH 08/14] net/bnxt: fix infinite loop in flow query count API Venkat Duvvuru
2020-10-17  6:28 ` [dpdk-dev] [PATCH 09/14] net/bnxt: add support for parent flow accumulation counters Venkat Duvvuru
2020-10-17  6:28 ` [dpdk-dev] [PATCH 10/14] net/bnxt: use cfa pair alloc for configuring reps Venkat Duvvuru
2020-10-17  6:28 ` [dpdk-dev] [PATCH 11/14] net/bnxt: add mapper support for wildcard TCAM entry Venkat Duvvuru
2020-10-17  6:28 ` [dpdk-dev] [PATCH 12/14] net/bnxt: refactor flow id allocation Venkat Duvvuru
2020-10-17  6:28 ` [dpdk-dev] [PATCH 13/14] net/bnxt: add support for VXLAN decap templates Venkat Duvvuru
2020-10-17  6:28 ` Venkat Duvvuru [this message]
2020-10-20 21:55 ` [dpdk-dev] [PATCH v2 00/11] bnxt fixes and enhancements to TRUFLOW support Ajit Khaparde
2020-10-20 21:55   ` [dpdk-dev] [PATCH v2 01/11] net/bnxt: add stingray support to core layer Ajit Khaparde
2020-10-21 18:07     ` Ferruh Yigit
2020-10-21 18:11       ` Ajit Khaparde
2020-10-22  9:11         ` Ferruh Yigit
2020-10-23  5:10           ` Ajit Khaparde
2020-10-20 21:55   ` [dpdk-dev] [PATCH v2 02/11] net/bnxt: changes to support two table scopes Ajit Khaparde
2020-10-20 21:55   ` [dpdk-dev] [PATCH v2 03/11] net/bnxt: add table scope to PF Mapping Ajit Khaparde
2020-10-20 21:55   ` [dpdk-dev] [PATCH v2 04/11] net/bnxt: update ULP resource counts Ajit Khaparde
2020-10-20 21:55   ` [dpdk-dev] [PATCH v2 05/11] net/bnxt: fix infinite loop in flow query count Ajit Khaparde
2020-10-20 21:55   ` [dpdk-dev] [PATCH v2 06/11] net/bnxt: add support for flow counter accumulation Ajit Khaparde
2020-10-20 21:55   ` [dpdk-dev] [PATCH v2 07/11] net/bnxt: change HWRM command to create reps Ajit Khaparde
2020-10-20 21:55   ` [dpdk-dev] [PATCH v2 08/11] net/bnxt: add mapper support for wildcard TCAM Ajit Khaparde
2020-10-20 21:55   ` [dpdk-dev] [PATCH v2 09/11] net/bnxt: refactor flow id allocation Ajit Khaparde
2020-10-20 21:55   ` [dpdk-dev] [PATCH v2 10/11] net/bnxt: add support for VXLAN decap templates Ajit Khaparde
2020-10-20 21:55   ` [dpdk-dev] [PATCH v2 11/11] net/bnxt: add VXLAN decap offload support Ajit Khaparde
2020-10-21  5:31   ` [dpdk-dev] [PATCH v2 00/11] bnxt fixes and enhancements to TRUFLOW support Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 " Ajit Khaparde
2020-10-23  5:08     ` Ajit Khaparde
2020-10-26  3:56       ` [dpdk-dev] [PATCH v4 00/15] bnxt fixes and enhancements Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 01/15] net/bnxt: add stingray support to core layer Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 02/15] net/bnxt: support two table scopes Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 03/15] net/bnxt: add table scope to PF Mapping Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 04/15] net/bnxt: update ULP resource counts Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 05/15] net/bnxt: fix flow query count Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 06/15] net/bnxt: add hierarchical flow counters Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 07/15] net/bnxt: modify HWRM command to create reps Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 08/15] net/bnxt: add mapper support for wildcard TCAM Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 09/15] net/bnxt: refactor flow id allocation Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 10/15] net/bnxt: add VXLAN decap templates Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 11/15] net/bnxt: add VXLAN decap offload support Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 12/15] net/bnxt: increase the size of Rx CQ Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 13/15] net/bnxt: fix to reset mbuf data offset Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 14/15] net/bnxt: set thread safe flow ops flag Ajit Khaparde
2020-10-26  3:56         ` [dpdk-dev] [PATCH v4 15/15] net/bnxt: fix Rx performance by removing spinlock Ajit Khaparde
2020-10-26 17:42         ` [dpdk-dev] [PATCH v4 00/15] bnxt fixes and enhancements Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 01/11] net/bnxt: add stingray support to core layer Ajit Khaparde
2020-10-23 10:54     ` Ferruh Yigit
2020-10-23 16:32       ` Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 02/11] net/bnxt: changes to support two table scopes Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 03/11] net/bnxt: add table scope to PF Mapping Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 04/11] net/bnxt: update ULP resource counts Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 05/11] net/bnxt: fix infinite loop in flow query count Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 06/11] net/bnxt: add support for flow counter accumulation Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 07/11] net/bnxt: change HWRM command to create reps Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 08/11] net/bnxt: add mapper support for wildcard TCAM Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 09/11] net/bnxt: refactor flow id allocation Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 10/11] net/bnxt: add support for VXLAN decap templates Ajit Khaparde
2020-10-22 22:05   ` [dpdk-dev] [PATCH v3 11/11] net/bnxt: add VXLAN decap offload support Ajit Khaparde

Reply instructions:

You may reply publically to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1602916089-18576-15-git-send-email-venkatkumar.duvvuru@broadcom.com \
    --to=venkatkumar.duvvuru@broadcom.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

DPDK patches and discussions

Archives are clonable:
	git clone --mirror http://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ http://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev


Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/ public-inbox