* [dpdk-dev] [PATCH 01/20] net/bnxt: add shadow tcam capability with search
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 02/20] net/bnxt: nat global registers support Somnath Kotur
` (19 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
- Add TCAM shadow tables for searching
- Add Search API to allow reuse of TCAM entries
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Randy Schacher <stuart.schacher@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_core.c | 73 +++
drivers/net/bnxt/tf_core/tf_core.h | 101 ++++
drivers/net/bnxt/tf_core/tf_device_p4.c | 2 +-
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 885 +++++++++++++++++++++++++++++-
drivers/net/bnxt/tf_core/tf_shadow_tcam.h | 258 ++++-----
drivers/net/bnxt/tf_core/tf_tcam.c | 300 +++++++++-
drivers/net/bnxt/tf_core/tf_tcam.h | 31 +-
7 files changed, 1449 insertions(+), 201 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index 97e7952..ca3280b 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -608,6 +608,79 @@ tf_search_identifier(struct tf *tfp,
}
int
+tf_search_tcam_entry(struct tf *tfp,
+ struct tf_search_tcam_entry_parms *parms)
+{
+ int rc;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_tcam_alloc_search_parms sparms;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ memset(&sparms, 0, sizeof(struct tf_tcam_alloc_search_parms));
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ if (dev->ops->tf_dev_alloc_search_tcam == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ sparms.dir = parms->dir;
+ sparms.type = parms->tcam_tbl_type;
+ sparms.key = parms->key;
+ sparms.key_size = TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits);
+ sparms.mask = parms->mask;
+ sparms.priority = parms->priority;
+ sparms.alloc = parms->alloc;
+
+ /* Result is an in/out and so no need to copy during outputs */
+ sparms.result = parms->result;
+ sparms.result_size =
+ TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits);
+
+ rc = dev->ops->tf_dev_alloc_search_tcam(tfp, &sparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: TCAM allocation failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Copy the outputs */
+ parms->hit = sparms.hit;
+ parms->search_status = sparms.search_status;
+ parms->ref_cnt = sparms.ref_cnt;
+ parms->idx = sparms.idx;
+
+ return 0;
+}
+
+int
tf_alloc_tcam_entry(struct tf *tfp,
struct tf_alloc_tcam_entry_parms *parms)
{
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index 67415ad..349a1f1 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -291,6 +291,18 @@ enum tf_tcam_tbl_type {
};
/**
+ * TCAM SEARCH STATUS
+ */
+enum tf_tcam_search_status {
+ /** The entry was not found, but an idx was allocated if requested. */
+ MISS,
+ /** The entry was found, and the result/idx are valid */
+ HIT,
+ /** The entry was not found and the table is full */
+ REJECT
+};
+
+/**
* EM Resources
* These defines are provisioned during
* tf_open_session()
@@ -949,6 +961,8 @@ int tf_free_tbl_scope(struct tf *tfp,
/**
* @page tcam TCAM Access
*
+ * @ref tf_search_tcam_entry
+ *
* @ref tf_alloc_tcam_entry
*
* @ref tf_set_tcam_entry
@@ -958,6 +972,93 @@ int tf_free_tbl_scope(struct tf *tfp,
* @ref tf_free_tcam_entry
*/
+/**
+ * tf_search_tcam_entry parameter definition (experimental)
+ */
+struct tf_search_tcam_entry_parms {
+ /**
+ * [in] receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] TCAM table type
+ */
+ enum tf_tcam_tbl_type tcam_tbl_type;
+ /**
+ * [in] Key data to match on
+ */
+ uint8_t *key;
+ /**
+ * [in] key size in bits
+ */
+ uint16_t key_sz_in_bits;
+ /**
+ * [in] Mask data to match on
+ */
+ uint8_t *mask;
+ /**
+ * [in] Priority of entry requested (definition TBD)
+ */
+ uint32_t priority;
+ /**
+ * [in] Allocate on miss.
+ */
+ uint8_t alloc;
+ /**
+ * [out] Set if matching entry found
+ */
+ uint8_t hit;
+ /**
+ * [out] Search result status (hit, miss, reject)
+ */
+ enum tf_tcam_search_status search_status;
+ /**
+ * [out] Current refcnt after allocation
+ */
+ uint16_t ref_cnt;
+ /**
+ * [in out] The result data from the search is copied here
+ */
+ uint8_t *result;
+ /**
+ * [in out] result size in bits for the result data
+ */
+ uint16_t result_sz_in_bits;
+ /**
+ * [out] Index found
+ */
+ uint16_t idx;
+};
+
+/**
+ * search TCAM entry (experimental)
+ *
+ * Search for a TCAM entry
+ *
+ * This function searches the shadow copy of the TCAM table for a matching
+ * entry. Key and mask must match for hit to be set. Only TruFlow core data
+ * is accessed. If shadow_copy is not enabled, an error is returned.
+ *
+ * Implementation:
+ *
+ * A hash is performed on the key/mask data and mapped to a shadow copy entry
+ * where the full key/mask is populated. If the full key/mask matches the
+ * entry, hit is set, ref_cnt is incremented, and search_status indicates what
+ * action the caller can take regarding setting the entry.
+ *
+ * search_status should be used as follows:
+ * - On Miss, the caller should create a result and call tf_set_tcam_entry with
+ * returned index.
+ *
+ * - On Reject, the hash table is full and the entry cannot be added.
+ *
+ * - On Hit, the result data is returned to the caller. Additionally, the
+ * ref_cnt is updated.
+ *
+ * Also returns success or failure code.
+ */
+int tf_search_tcam_entry(struct tf *tfp,
+ struct tf_search_tcam_entry_parms *parms);
/**
* tf_alloc_tcam_entry parameter definition
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c
index f38c38e..afb6098 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c
@@ -133,7 +133,7 @@ const struct tf_dev_ops tf_dev_ops_p4 = {
.tf_dev_get_bulk_tbl = tf_tbl_bulk_get,
.tf_dev_alloc_tcam = tf_tcam_alloc,
.tf_dev_free_tcam = tf_tcam_free,
- .tf_dev_alloc_search_tcam = NULL,
+ .tf_dev_alloc_search_tcam = tf_tcam_alloc_search,
.tf_dev_set_tcam = tf_tcam_set,
.tf_dev_get_tcam = NULL,
.tf_dev_insert_int_em_entry = tf_em_insert_int_entry,
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index c61b833..51aae4f 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -3,61 +3,902 @@
* All rights reserved.
*/
-#include <rte_common.h>
-
+#include "tf_common.h"
+#include "tf_util.h"
+#include "tfp.h"
#include "tf_shadow_tcam.h"
/**
- * Shadow tcam DB element
+ * The implementation includes 3 tables per tcam table type.
+ * - hash table
+ * - sized so that a minimum of 4 slots per shadow entry are available to
+ * minimize the likelihood of collisions.
+ * - shadow key table
+ * - sized to the number of entries requested and is directly indexed
+ * - the index is zero based and is the tcam index - the base address
+ * - the key and mask are stored in the key table.
+ * - The stored key is the AND of the key/mask in order to eliminate the need
+ * to compare both the key and mask.
+ * - shadow result table
+ * - the result table is stored separately since it only needs to be accessed
+ * when the key matches.
+ * - the result has a back pointer to the hash table via the hb handle. The
+ * hb handle is a 32 bit represention of the hash with a valid bit, bucket
+ * element index, and the hash index. It is necessary to store the hb handle
+ * with the result since subsequent removes only provide the tcam index.
+ *
+ * - Max entries is limited in the current implementation since bit 15 is the
+ * valid bit in the hash table.
+ * - A 16bit hash is calculated and masked based on the number of entries
+ * - 64b wide bucket is used and broken into 4x16bit elements.
+ * This decision is based on quicker bucket scanning to determine if any
+ * elements are in use.
+ * - bit 15 of each bucket element is the valid, this is done to prevent having
+ * to read the larger key/result data for determining VALID. It also aids
+ * in the more efficient scanning of the bucket for slot usage.
*/
-struct tf_shadow_tcam_element {
- /**
- * Hash table
- */
- void *hash;
- /**
- * Reference count, array of number of tcam entries
- */
- uint16_t *ref_count;
+/*
+ * The maximum number of shadow entries supported. The value also doubles as
+ * the maximum number of hash buckets. There are only 15 bits of data per
+ * bucket to point to the shadow tables.
+ */
+#define TF_SHADOW_TCAM_ENTRIES_MAX (1 << 15)
+
+/* The number of elements(BE) per hash bucket (HB) */
+#define TF_SHADOW_TCAM_HB_NUM_ELEM (4)
+#define TF_SHADOW_TCAM_BE_VALID (1 << 15)
+#define TF_SHADOW_TCAM_BE_IS_VALID(be) (((be) & TF_SHADOW_TCAM_BE_VALID) != 0)
+
+/**
+ * The hash bucket handle is 32b
+ * - bit 31, the Valid bit
+ * - bit 29-30, the element
+ * - bits 0-15, the hash idx (is masked based on the allocated size)
+ */
+#define TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
+#define TF_SHADOW_TCAM_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
+ ((be) << 29) | (idx))
+
+#define TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
+ (TF_SHADOW_TCAM_HB_NUM_ELEM - 1))
+
+#define TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
+ (ctxt)->hash_ctxt.hid_mask)
+
+/**
+ * The idx provided by the caller is within a region, so currently the base is
+ * either added or subtracted from the idx to ensure it can be used as a
+ * compressed index
+ */
+
+/* Convert the tcam index to a shadow index */
+#define TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Convert the shadow index to a tcam index */
+#define TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Simple helper masks for clearing en element from the bucket */
+#define TF_SHADOW_TCAM_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
+#define TF_SHADOW_TCAM_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
+#define TF_SHADOW_TCAM_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
+#define TF_SHADOW_TCAM_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
+
+/**
+ * This should be coming from external, but for now it is assumed that no key
+ * is greater than 1K bits and no result is bigger than 128 bits. This makes
+ * allocation of the hash table easier without having to allocate on the fly.
+ */
+#define TF_SHADOW_TCAM_MAX_KEY_SZ 128
+#define TF_SHADOW_TCAM_MAX_RESULT_SZ 16
+
+/*
+ * Local only defines for the internal data.
+ */
+
+/**
+ * tf_shadow_tcam_shadow_key_entry is the key/mask entry of the key table.
+ * The key stored in the table is the masked version of the key. This is done
+ * to eliminate the need of comparing both the key and mask.
+ */
+struct tf_shadow_tcam_shadow_key_entry {
+ uint8_t key[TF_SHADOW_TCAM_MAX_KEY_SZ];
+ uint8_t mask[TF_SHADOW_TCAM_MAX_KEY_SZ];
};
/**
- * Shadow tcam DB definition
+ * tf_shadow_tcam_shadow_result_entry is the result table entry.
+ * The result table writes are broken into two phases:
+ * - The search phase, which stores the hb_handle and key size and
+ * - The set phase, which writes the result, refcnt, and result size
+ */
+struct tf_shadow_tcam_shadow_result_entry {
+ uint8_t result[TF_SHADOW_TCAM_MAX_RESULT_SZ];
+ uint16_t result_size;
+ uint16_t key_size;
+ uint32_t refcnt;
+ uint32_t hb_handle;
+};
+
+/**
+ * tf_shadow_tcam_shadow_ctxt holds all information for accessing the key and
+ * result tables.
+ */
+struct tf_shadow_tcam_shadow_ctxt {
+ struct tf_shadow_tcam_shadow_key_entry *sh_key_tbl;
+ struct tf_shadow_tcam_shadow_result_entry *sh_res_tbl;
+ uint32_t base_addr;
+ uint16_t num_entries;
+ uint16_t alloc_idx;
+};
+
+/**
+ * tf_shadow_tcam_hash_ctxt holds all information related to accessing the hash
+ * table.
+ */
+struct tf_shadow_tcam_hash_ctxt {
+ uint64_t *hashtbl;
+ uint16_t hid_mask;
+ uint16_t hash_entries;
+};
+
+/**
+ * tf_shadow_tcam_ctxt holds the hash and shadow tables for the current shadow
+ * tcam db. This structure is per tcam table type as each tcam table has it's
+ * own shadow and hash table.
+ */
+struct tf_shadow_tcam_ctxt {
+ struct tf_shadow_tcam_shadow_ctxt shadow_ctxt;
+ struct tf_shadow_tcam_hash_ctxt hash_ctxt;
+};
+
+/**
+ * tf_shadow_tcam_db is the allocated db structure returned as an opaque
+ * void * pointer to the caller during create db. It holds the pointers for
+ * each tcam associated with the db.
*/
struct tf_shadow_tcam_db {
- /**
- * The DB consists of an array of elements
- */
- struct tf_shadow_tcam_element *db;
+ /* Each context holds the shadow and hash table information */
+ struct tf_shadow_tcam_ctxt *ctxt[TF_TCAM_TBL_TYPE_MAX];
+};
+
+/* CRC polynomial 0xedb88320 */
+static const uint32_t tf_shadow_tcam_crc32tbl[] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
+/**
+ * Returns the number of entries in the contexts shadow table.
+ */
+static inline uint16_t
+tf_shadow_tcam_sh_num_entries_get(struct tf_shadow_tcam_ctxt *ctxt)
+{
+ return ctxt->shadow_ctxt.num_entries;
+}
+
+/**
+ * Compare the give key with the key in the shadow table.
+ *
+ * Returns 0 if the keys match
+ */
+static int
+tf_shadow_tcam_key_cmp(struct tf_shadow_tcam_ctxt *ctxt,
+ uint8_t *key,
+ uint8_t *mask,
+ uint16_t sh_idx,
+ uint16_t size)
+{
+ if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
+ sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !key || !mask)
+ return -1;
+
+ return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
+}
+
+/**
+ * Copies the shadow result to the result.
+ *
+ * Returns 0 on failure
+ */
+static void *
+tf_shadow_tcam_res_cpy(struct tf_shadow_tcam_ctxt *ctxt,
+ uint8_t *result,
+ uint16_t sh_idx,
+ uint16_t size)
+{
+ if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !result)
+ return 0;
+
+ if (ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result_size != size)
+ return 0;
+
+ return memcpy(result,
+ ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result,
+ size);
+}
+
+/**
+ * Using a software based CRC function for now, but will look into using hw
+ * assisted in the future.
+ */
+static uint32_t
+tf_shadow_tcam_crc32_calc(uint8_t *key, uint32_t len)
+{
+ uint32_t crc = ~0U;
+
+ while (len--)
+ crc = tf_shadow_tcam_crc32tbl[(crc ^ key[len]) & 0xff] ^
+ (crc >> 8);
+
+ return ~crc;
+}
+
+/**
+ * Free the memory associated with the context.
+ */
+static void
+tf_shadow_tcam_ctxt_delete(struct tf_shadow_tcam_ctxt *ctxt)
+{
+ if (!ctxt)
+ return;
+
+ tfp_free(ctxt->hash_ctxt.hashtbl);
+ tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
+ tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
+}
+
+/**
+ * The TF Shadow TCAM context is per TCAM and holds all information relating to
+ * managing the shadow and search capability. This routine allocated data that
+ * needs to be deallocated by the tf_shadow_tcam_ctxt_delete prior when deleting
+ * the shadow db.
+ */
+static int
+tf_shadow_tcam_ctxt_create(struct tf_shadow_tcam_ctxt *ctxt,
+ uint16_t num_entries,
+ uint16_t base_addr)
+{
+ struct tfp_calloc_parms cparms;
+ uint16_t hash_size = 1;
+ uint16_t hash_mask;
+ int rc;
+
+ /* Hash table is a power of two that holds the number of entries */
+ if (num_entries > TF_SHADOW_TCAM_ENTRIES_MAX) {
+ TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
+ num_entries,
+ TF_SHADOW_TCAM_ENTRIES_MAX);
+ return -ENOMEM;
+ }
+
+ while (hash_size < num_entries)
+ hash_size = hash_size << 1;
+
+ hash_mask = hash_size - 1;
+
+ /* Allocate the hash table */
+ cparms.nitems = hash_size;
+ cparms.size = sizeof(uint64_t);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->hash_ctxt.hashtbl = cparms.mem_va;
+ ctxt->hash_ctxt.hid_mask = hash_mask;
+ ctxt->hash_ctxt.hash_entries = hash_size;
+
+ /* allocate the shadow tables */
+ /* allocate the shadow key table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tcam_shadow_key_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
+
+ /* allocate the shadow result table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tcam_shadow_result_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
+
+ ctxt->shadow_ctxt.num_entries = num_entries;
+ ctxt->shadow_ctxt.base_addr = base_addr;
+
+ return 0;
+error:
+ tf_shadow_tcam_ctxt_delete(ctxt);
+
+ return -ENOMEM;
+}
+
+/**
+ * Get a shadow TCAM context given the db and the TCAM type
+ */
+static struct tf_shadow_tcam_ctxt *
+tf_shadow_tcam_ctxt_get(struct tf_shadow_tcam_db *shadow_db,
+ enum tf_tcam_tbl_type type)
+{
+ if (type >= TF_TCAM_TBL_TYPE_MAX ||
+ !shadow_db ||
+ !shadow_db->ctxt[type])
+ return NULL;
+
+ return shadow_db->ctxt[type];
+}
+
+/**
+ * Sets the hash entry into the table given the TCAM context, hash bucket
+ * handle, and shadow index.
+ */
+static inline int
+tf_shadow_tcam_set_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
+ uint32_t hb_handle,
+ uint16_t sh_idx)
+{
+ uint16_t hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ uint16_t be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
+ uint64_t entry = sh_idx | TF_SHADOW_TCAM_BE_VALID;
+
+ if (hid >= ctxt->hash_ctxt.hash_entries)
+ return -EINVAL;
+
+ ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
+ return 0;
+}
+
+/**
+ * Clears the hash entry given the TCAM context and hash bucket handle.
+ */
+static inline void
+tf_shadow_tcam_clear_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
+ uint32_t hb_handle)
+{
+ uint16_t hid, be;
+ uint64_t *bucket;
+
+ if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hb_handle))
+ return;
+
+ hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
+ bucket = &ctxt->hash_ctxt.hashtbl[hid];
+
+ switch (be) {
+ case 0:
+ *bucket = TF_SHADOW_TCAM_BE0_MASK_CLEAR(*bucket);
+ break;
+ case 1:
+ *bucket = TF_SHADOW_TCAM_BE1_MASK_CLEAR(*bucket);
+ break;
+ case 2:
+ *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
+ break;
+ case 3:
+ *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
+ break;
+ }
+}
+
+/**
+ * Clears the shadow key and result entries given the TCAM context and
+ * shadow index.
+ */
+static void
+tf_shadow_tcam_clear_sh_entry(struct tf_shadow_tcam_ctxt *ctxt,
+ uint16_t sh_idx)
+{
+ struct tf_shadow_tcam_shadow_key_entry *sk_entry;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+
+ if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt))
+ return;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
+
+ /*
+ * memset key/result to zero for now, possibly leave the data alone
+ * in the future and rely on the valid bit in the hash table.
+ */
+ memset(sk_entry, 0, sizeof(struct tf_shadow_tcam_shadow_key_entry));
+ memset(sr_entry, 0, sizeof(struct tf_shadow_tcam_shadow_result_entry));
+}
+
+/**
+ * Binds the allocated tcam index with the hash and shadow tables.
+ * The entry will be incomplete until the set has happened with the result
+ * data.
+ */
int
-tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms __rte_unused)
+tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms)
{
+ int rc;
+ int i;
+ uint16_t idx, klen;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_shadow_tcam_db *shadow_db;
+ struct tf_shadow_tcam_shadow_key_entry *sk_entry;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+ uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
+
+ if (!parms || !TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(parms->hb_handle) ||
+ !parms->key || !parms->mask) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, parms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tcam_tbl_2_str(parms->type));
+ return -EINVAL;
+ }
+
+ memset(tkey, 0, sizeof(tkey));
+ idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, parms->idx);
+ klen = parms->key_size;
+ if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) ||
+ klen > TF_SHADOW_TCAM_MAX_KEY_SZ) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
+ tf_dir_2_str(parms->dir),
+ tf_tcam_tbl_2_str(parms->type),
+ klen,
+ TF_SHADOW_TCAM_MAX_KEY_SZ, idx);
+
+ return -EINVAL;
+ }
+
+ rc = tf_shadow_tcam_set_hash_entry(ctxt, parms->hb_handle, idx);
+ if (rc)
+ return -EINVAL;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /*
+ * Write the masked key to the table for more efficient comparisons
+ * later.
+ */
+ for (i = 0; i < klen; i++)
+ tkey[i] = parms->key[i] & parms->mask[i];
+
+ memcpy(sk_entry->key, tkey, klen);
+ memcpy(sk_entry->mask, parms->mask, klen);
+
+ /* Write the result table */
+ sr_entry->key_size = parms->key_size;
+ sr_entry->hb_handle = parms->hb_handle;
+
return 0;
}
+/**
+ * Deletes hash/shadow information if no more references.
+ *
+ * Returns 0 - The caller should delete the tcam entry in hardware.
+ * Returns non-zero - The number of references to the entry
+ */
int
-tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms __rte_unused)
+tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms)
{
+ uint16_t idx;
+ uint32_t hb_handle;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_shadow_tcam_db *shadow_db;
+ struct tf_tcam_free_parms *fparms;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->fparms) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ fparms = parms->fparms;
+
+ /*
+ * Initialize the reference count to zero. It will only be changed if
+ * non-zero.
+ */
+ fparms->ref_cnt = 0;
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, fparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tcam_tbl_2_str(fparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, fparms->idx);
+ if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
+ tf_tcam_tbl_2_str(fparms->type),
+ fparms->idx,
+ tf_shadow_tcam_sh_num_entries_get(ctxt));
+ return 0;
+ }
+
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+ if (sr_entry->refcnt <= 1) {
+ hb_handle = sr_entry->hb_handle;
+ tf_shadow_tcam_clear_hash_entry(ctxt, hb_handle);
+ tf_shadow_tcam_clear_sh_entry(ctxt, idx);
+ } else {
+ sr_entry->refcnt--;
+ fparms->ref_cnt = sr_entry->refcnt;
+ }
+
return 0;
}
int
-tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms __rte_unused)
+tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms)
{
+ uint16_t len;
+ uint8_t rcopy;
+ uint64_t bucket;
+ uint32_t i, hid32;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_shadow_tcam_db *shadow_db;
+ uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
+ struct tf_tcam_alloc_search_parms *sparms;
+ uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
+ uint32_t be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "tcam search with invalid parms\n");
+ return -EINVAL;
+ }
+
+ memset(tkey, 0, sizeof(tkey));
+ sparms = parms->sparms;
+
+ /* Initialize return values to invalid */
+ sparms->hit = 0;
+ sparms->search_status = REJECT;
+ parms->hb_handle = 0;
+ sparms->ref_cnt = 0;
+ /* see if caller wanted the result */
+ rcopy = sparms->result && sparms->result_size;
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(ERR, "%s Unable to get tcam mgr context\n",
+ tf_tcam_tbl_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ hid_mask = ctxt->hash_ctxt.hid_mask;
+
+ len = sparms->key_size;
+
+ if (len > TF_SHADOW_TCAM_MAX_KEY_SZ ||
+ !sparms->key || !sparms->mask || !len) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p : %p\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type),
+ len,
+ sparms->key,
+ sparms->mask);
+ return -EINVAL;
+ }
+
+ /* Combine the key and mask */
+ for (i = 0; i < len; i++)
+ tkey[i] = sparms->key[i] & sparms->mask[i];
+
+ /*
+ * Calculate the crc32
+ * Fold it to create a 16b value
+ * Reduce it to fit the table
+ */
+ hid32 = tf_shadow_tcam_crc32_calc(tkey, len);
+ hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
+ hb_idx = hid16 & hid_mask;
+
+ bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
+
+ if (!bucket) {
+ /* empty bucket means a miss and available entry */
+ sparms->search_status = MISS;
+ parms->hb_handle = TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, 0);
+ sparms->idx = 0;
+ return 0;
+ }
+
+ /* Set the avail to max so we can detect when there is an avail entry */
+ be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
+ for (i = 0; i < TF_SHADOW_TCAM_HB_NUM_ELEM; i++) {
+ shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
+ be_valid = TF_SHADOW_TCAM_BE_IS_VALID(shtbl_idx);
+ if (!be_valid) {
+ /* The element is avail, keep going */
+ be_avail = i;
+ continue;
+ }
+ /* There is a valid entry, compare it */
+ shtbl_key = shtbl_idx & ~TF_SHADOW_TCAM_BE_VALID;
+ if (!tf_shadow_tcam_key_cmp(ctxt,
+ sparms->key,
+ sparms->mask,
+ shtbl_key,
+ sparms->key_size)) {
+ /*
+ * It matches, increment the ref count if the caller
+ * requested allocation and return the info
+ */
+ if (sparms->alloc)
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt + 1;
+
+ sparms->hit = 1;
+ sparms->search_status = HIT;
+ parms->hb_handle =
+ TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, i);
+ sparms->idx = TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt,
+ shtbl_key);
+ sparms->ref_cnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
+
+ /* copy the result, if caller wanted it. */
+ if (rcopy &&
+ !tf_shadow_tcam_res_cpy(ctxt,
+ sparms->result,
+ shtbl_key,
+ sparms->result_size)) {
+ /*
+ * Should never get here, possible memory
+ * corruption or something unexpected.
+ */
+ TFP_DRV_LOG(ERR, "Error copying result\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+ }
+
+ /* No hits, return avail entry if exists */
+ if (be_avail < TF_SHADOW_TCAM_HB_NUM_ELEM) {
+ parms->hb_handle =
+ TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, be_avail);
+ sparms->search_status = MISS;
+ sparms->hit = 0;
+ sparms->idx = 0;
+ } else {
+ sparms->search_status = REJECT;
+ }
+
return 0;
}
int
-tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms __rte_unused)
+tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms)
{
+ uint16_t idx;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_tcam_set_parms *sparms;
+ struct tf_shadow_tcam_db *shadow_db;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "Null parms\n");
+ return -EINVAL;
+ }
+
+ sparms = parms->sparms;
+ if (!sparms->result || !sparms->result_size) {
+ TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ /* We aren't tracking this table, so return success */
+ TFP_DRV_LOG(DEBUG, "%s Unable to get tcam mgr context\n",
+ tf_tcam_tbl_2_str(sparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, sparms->idx);
+ if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type),
+ sparms->idx);
+ return -EINVAL;
+ }
+
+ /* Write the result table, the key/hash has been written already */
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /*
+ * If the handle is not valid, the bind was never called. We aren't
+ * tracking this entry.
+ */
+ if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
+ return 0;
+
+ if (sparms->result_size > TF_SHADOW_TCAM_MAX_RESULT_SZ) {
+ TFP_DRV_LOG(ERR, "%s:%s Result length %d > %d\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type),
+ sparms->result_size,
+ TF_SHADOW_TCAM_MAX_RESULT_SZ);
+ return -EINVAL;
+ }
+
+ memcpy(sr_entry->result, sparms->result, sparms->result_size);
+ sr_entry->result_size = sparms->result_size;
+ sr_entry->refcnt = 1;
+
return 0;
}
int
-tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms __rte_unused)
+tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms)
+{
+ struct tf_shadow_tcam_db *shadow_db;
+ int i;
+
+ TF_CHECK_PARMS1(parms);
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ if (!shadow_db) {
+ TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
+ return 0;
+}
+
+/**
+ * Allocate the TCAM resources for search and allocate
+ *
+ */
+int tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms)
{
+ int rc;
+ int i;
+ uint16_t base;
+ struct tfp_calloc_parms cparms;
+ struct tf_shadow_tcam_db *shadow_db = NULL;
+
+ TF_CHECK_PARMS1(parms);
+
+ /* Build the shadow DB per the request */
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tcam_db);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+ shadow_db = (void *)cparms.mem_va;
+
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ /* If the element didn't request an allocation no need
+ * to create a pool nor verify if we got a reservation.
+ */
+ if (!parms->cfg->alloc_cnt[i]) {
+ shadow_db->ctxt[i] = NULL;
+ continue;
+ }
+
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tcam_ctxt);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+
+ shadow_db->ctxt[i] = cparms.mem_va;
+ base = parms->cfg->base_addr[i];
+ rc = tf_shadow_tcam_ctxt_create(shadow_db->ctxt[i],
+ parms->cfg->alloc_cnt[i],
+ base);
+ if (rc)
+ goto error;
+ }
+
+ *parms->shadow_db = (void *)shadow_db;
+
+ TFP_DRV_LOG(INFO,
+ "TF SHADOW TCAM - initialized\n");
+
return 0;
+error:
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
+ return -ENOMEM;
}
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.h b/drivers/net/bnxt/tf_core/tf_shadow_tcam.h
index e2c4e06..75c146a 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.h
@@ -8,232 +8,188 @@
#include "tf_core.h"
-struct tf;
-
-/**
- * The Shadow tcam module provides shadow DB handling for tcam based
- * TF types. A shadow DB provides the capability that allows for reuse
- * of TF resources.
- *
- * A Shadow tcam DB is intended to be used by the Tcam module only.
- */
-
/**
- * Shadow DB configuration information for a single tcam type.
- *
- * During Device initialization the HCAPI device specifics are learned
- * and as well as the RM DB creation. From that those initial steps
- * this structure can be populated.
+ * Shadow DB configuration information
*
- * NOTE:
- * If used in an array of tcam types then such array must be ordered
- * by the TF type is represents.
+ * The shadow configuration is for all tcam table types for a direction
*/
struct tf_shadow_tcam_cfg_parms {
/**
- * TF tcam type
+ * [in] The number of elements in the alloc_cnt and base_addr
+ * For now, it should always be equal to TF_TCAM_TBL_TYPE_MAX
*/
- enum tf_tcam_tbl_type type;
-
+ int num_entries;
/**
- * Number of entries the Shadow DB needs to hold
+ * [in] Resource allocation count array
+ * This array content originates from the tf_session_resources
+ * that is passed in on session open
+ * Array size is TF_TCAM_TBL_TYPE_MAX
*/
- int num_entries;
-
+ uint16_t *alloc_cnt;
/**
- * Element width for this table type
+ * [in] The base index for each tcam table
*/
- int element_width;
+ uint16_t base_addr[TF_TCAM_TBL_TYPE_MAX];
};
/**
- * Shadow tcam DB creation parameters
+ * Shadow TCAM DB creation parameters. The shadow db for this direction
+ * is returned
*/
struct tf_shadow_tcam_create_db_parms {
/**
- * [in] Configuration information for the shadow db
+ * [in] Receive or transmit direction
*/
- struct tf_shadow_tcam_cfg_parms *cfg;
+ enum tf_dir dir;
/**
- * [in] Number of elements in the parms structure
+ * [in] Configuration information for the shadow db
*/
- uint16_t num_elements;
+ struct tf_shadow_tcam_cfg_parms *cfg;
/**
* [out] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
+ void **shadow_db;
};
/**
- * Shadow tcam DB free parameters
+ * Create the shadow db for a single direction
+ *
+ * The returned shadow db must be free using the free db API when no longer
+ * needed
*/
-struct tf_shadow_tcam_free_db_parms {
- /**
- * Shadow tcam DB handle
- */
- void *tf_shadow_tcam_db;
-};
+int
+tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms);
/**
- * Shadow tcam search parameters
+ * Shadow TCAM free parameters
*/
-struct tf_shadow_tcam_search_parms {
+struct tf_shadow_tcam_free_db_parms {
/**
* [in] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
- /**
- * [in] TCAM tbl type
- */
- enum tf_tcam_tbl_type type;
- /**
- * [in] Pointer to entry blob value in remap table to match
- */
- uint8_t *entry;
- /**
- * [in] Size of the entry blob passed in bytes
- */
- uint16_t entry_sz;
- /**
- * [out] Index of the found element returned if hit
- */
- uint16_t *index;
- /**
- * [out] Reference count incremented if hit
- */
- uint16_t *ref_cnt;
+ void *shadow_db;
};
/**
- * Shadow tcam insert parameters
+ * Free all resources associated with the shadow db
+ */
+int
+tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms);
+
+/**
+ * Shadow TCAM bind index parameters
*/
-struct tf_shadow_tcam_insert_parms {
+struct tf_shadow_tcam_bind_index_parms {
/**
* [in] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
+ void *shadow_db;
/**
- * [in] TCAM tbl type
+ * [in] receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] TCAM table type
*/
enum tf_tcam_tbl_type type;
/**
- * [in] Pointer to entry blob value in remap table to match
+ * [in] index of the entry to program
*/
- uint8_t *entry;
+ uint16_t idx;
/**
- * [in] Size of the entry blob passed in bytes
+ * [in] struct containing key
*/
- uint16_t entry_sz;
+ uint8_t *key;
/**
- * [in] Entry to update
+ * [in] struct containing mask fields
*/
- uint16_t index;
+ uint8_t *mask;
/**
- * [out] Reference count after insert
+ * [in] key size in bits (if search)
*/
- uint16_t *ref_cnt;
+ uint16_t key_size;
+ /**
+ * [in] The hash bucket handled returned from the search
+ */
+ uint32_t hb_handle;
};
/**
- * Shadow tcam remove parameters
+ * Binds the allocated tcam index with the hash and shadow tables
*/
-struct tf_shadow_tcam_remove_parms {
+int
+tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms);
+
+/**
+ * Shadow TCAM insert parameters
+ */
+struct tf_shadow_tcam_insert_parms {
/**
* [in] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
- /**
- * [in] TCAM tbl type
- */
- enum tf_tcam_tbl_type type;
- /**
- * [in] Entry to update
- */
- uint16_t index;
+ void *shadow_db;
/**
- * [out] Reference count after removal
+ * [in] The set parms from tf core
*/
- uint16_t *ref_cnt;
+ struct tf_tcam_set_parms *sparms;
};
/**
- * @page shadow_tcam Shadow tcam DB
- *
- * @ref tf_shadow_tcam_create_db
- *
- * @ref tf_shadow_tcam_free_db
- *
- * @reg tf_shadow_tcam_search
- *
- * @reg tf_shadow_tcam_insert
- *
- * @reg tf_shadow_tcam_remove
- */
-
-/**
- * Creates and fills a Shadow tcam DB. The DB is indexed per the
- * parms structure.
- *
- * [in] parms
- * Pointer to create db parameters
+ * Set the entry into the tcam manager hash and shadow tables
*
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * The search must have been used prior to setting the entry so that the
+ * hash has been calculated and duplicate entries will not be added
*/
-int tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms);
+int
+tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms);
/**
- * Closes the Shadow tcam DB and frees all allocated
- * resources per the associated database.
- *
- * [in] parms
- * Pointer to the free DB parameters
- *
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * Shadow TCAM remove parameters
*/
-int tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms);
+struct tf_shadow_tcam_remove_parms {
+ /**
+ * [in] Shadow tcam DB handle
+ */
+ void *shadow_db;
+ /**
+ * [inout] The set parms from tf core
+ */
+ struct tf_tcam_free_parms *fparms;
+};
/**
- * Search Shadow tcam db for matching result
- *
- * [in] parms
- * Pointer to the search parameters
+ * Remove the entry from the tcam hash and shadow tables
*
- * Returns
- * - (0) if successful, element was found.
- * - (-EINVAL) on failure.
+ * The search must have been used prior to setting the entry so that the
+ * hash has been calculated and duplicate entries will not be added
*/
-int tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms);
+int
+tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms);
/**
- * Inserts an element into the Shadow tcam DB. Will fail if the
- * elements ref_count is different from 0. Ref_count after insert will
- * be incremented.
- *
- * [in] parms
- * Pointer to insert parameters
- *
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * Shadow TCAM search parameters
*/
-int tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms);
+struct tf_shadow_tcam_search_parms {
+ /**
+ * [in] Shadow tcam DB handle
+ */
+ void *shadow_db;
+ /**
+ * [inout] The search parameters from tf core
+ */
+ struct tf_tcam_alloc_search_parms *sparms;
+ /**
+ * [out] The hash handle to use for the set
+ */
+ uint32_t hb_handle;
+};
/**
- * Removes an element from the Shadow tcam DB. Will fail if the
- * elements ref_count is 0. Ref_count after removal will be
- * decremented.
+ * Search for an entry in the tcam hash/shadow tables
*
- * [in] parms
- * Pointer to remove parameter
- *
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * If there is a miss, but there is room for insertion, the hb_handle returned
+ * is used for insertion during the bind index API
*/
-int tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms);
-
-#endif /* _TF_SHADOW_TCAM_H_ */
+int
+tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms);
+#endif
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.c b/drivers/net/bnxt/tf_core/tf_tcam.c
index cbfaa94..7679d09 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_tcam.c
@@ -14,6 +14,7 @@
#include "tfp.h"
#include "tf_session.h"
#include "tf_msg.h"
+#include "tf_shadow_tcam.h"
struct tf;
@@ -25,7 +26,7 @@ static void *tcam_db[TF_DIR_MAX];
/**
* TCAM Shadow DBs
*/
-/* static void *shadow_tcam_db[TF_DIR_MAX]; */
+static void *shadow_tcam_db[TF_DIR_MAX];
/**
* Init flag, set on bind and cleared on unbind
@@ -35,16 +36,22 @@ static uint8_t init;
/**
* Shadow init flag, set on bind and cleared on unbind
*/
-/* static uint8_t shadow_init; */
+static uint8_t shadow_init;
int
tf_tcam_bind(struct tf *tfp,
struct tf_tcam_cfg_parms *parms)
{
int rc;
- int i;
+ int i, d;
+ struct tf_rm_alloc_info info;
+ struct tf_rm_free_db_parms fparms;
+ struct tf_rm_create_db_parms db_cfg;
struct tf_tcam_resources *tcam_cnt;
- struct tf_rm_create_db_parms db_cfg = { 0 };
+ struct tf_shadow_tcam_free_db_parms fshadow;
+ struct tf_rm_get_alloc_info_parms ainfo;
+ struct tf_shadow_tcam_cfg_parms shadow_cfg;
+ struct tf_shadow_tcam_create_db_parms shadow_cdb;
TF_CHECK_PARMS2(tfp, parms);
@@ -62,29 +69,91 @@ tf_tcam_bind(struct tf *tfp,
return -EINVAL;
}
+ memset(&db_cfg, 0, sizeof(db_cfg));
+
db_cfg.type = TF_DEVICE_MODULE_TYPE_TCAM;
db_cfg.num_elements = parms->num_elements;
db_cfg.cfg = parms->cfg;
- for (i = 0; i < TF_DIR_MAX; i++) {
- db_cfg.dir = i;
- db_cfg.alloc_cnt = parms->resources->tcam_cnt[i].cnt;
- db_cfg.rm_db = &tcam_db[i];
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ db_cfg.dir = d;
+ db_cfg.alloc_cnt = parms->resources->tcam_cnt[d].cnt;
+ db_cfg.rm_db = &tcam_db[d];
rc = tf_rm_create_db(tfp, &db_cfg);
if (rc) {
TFP_DRV_LOG(ERR,
"%s: TCAM DB creation failed\n",
- tf_dir_2_str(i));
+ tf_dir_2_str(d));
return rc;
}
}
+ /* Initialize the TCAM manager. */
+ if (parms->shadow_copy) {
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ memset(&shadow_cfg, 0, sizeof(shadow_cfg));
+ memset(&shadow_cdb, 0, sizeof(shadow_cdb));
+ /* Get the base addresses of the tcams for tcam mgr */
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ memset(&info, 0, sizeof(info));
+
+ if (!parms->resources->tcam_cnt[d].cnt[i])
+ continue;
+ ainfo.rm_db = tcam_db[d];
+ ainfo.db_index = i;
+ ainfo.info = &info;
+ rc = tf_rm_get_info(&ainfo);
+ if (rc)
+ goto error;
+
+ shadow_cfg.base_addr[i] = info.entry.start;
+ }
+
+ /* Create the shadow db */
+ shadow_cfg.alloc_cnt =
+ parms->resources->tcam_cnt[d].cnt;
+ shadow_cfg.num_entries = parms->num_elements;
+
+ shadow_cdb.shadow_db = &shadow_tcam_db[d];
+ shadow_cdb.cfg = &shadow_cfg;
+ rc = tf_shadow_tcam_create_db(&shadow_cdb);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "TCAM MGR DB creation failed "
+ "rc=%d\n", rc);
+ goto error;
+ }
+ }
+ shadow_init = 1;
+ }
+
init = 1;
TFP_DRV_LOG(INFO,
"TCAM - initialized\n");
return 0;
+error:
+ for (i = 0; i < TF_DIR_MAX; i++) {
+ memset(&fparms, 0, sizeof(fparms));
+ fparms.dir = i;
+ fparms.rm_db = tcam_db[i];
+ /* Ignoring return here since we are in the error case */
+ (void)tf_rm_free_db(tfp, &fparms);
+
+ if (parms->shadow_copy) {
+ fshadow.shadow_db = shadow_tcam_db[i];
+ tf_shadow_tcam_free_db(&fshadow);
+ shadow_tcam_db[i] = NULL;
+ }
+
+ tcam_db[i] = NULL;
+ }
+
+ shadow_init = 0;
+ init = 0;
+
+ return rc;
}
int
@@ -92,7 +161,8 @@ tf_tcam_unbind(struct tf *tfp)
{
int rc;
int i;
- struct tf_rm_free_db_parms fparms = { 0 };
+ struct tf_rm_free_db_parms fparms;
+ struct tf_shadow_tcam_free_db_parms fshadow;
TF_CHECK_PARMS1(tfp);
@@ -104,6 +174,7 @@ tf_tcam_unbind(struct tf *tfp)
}
for (i = 0; i < TF_DIR_MAX; i++) {
+ memset(&fparms, 0, sizeof(fparms));
fparms.dir = i;
fparms.rm_db = tcam_db[i];
rc = tf_rm_free_db(tfp, &fparms);
@@ -111,8 +182,17 @@ tf_tcam_unbind(struct tf *tfp)
return rc;
tcam_db[i] = NULL;
+
+ if (shadow_init) {
+ memset(&fshadow, 0, sizeof(fshadow));
+
+ fshadow.shadow_db = shadow_tcam_db[i];
+ tf_shadow_tcam_free_db(&fshadow);
+ shadow_tcam_db[i] = NULL;
+ }
}
+ shadow_init = 0;
init = 0;
return 0;
@@ -125,7 +205,7 @@ tf_tcam_alloc(struct tf *tfp,
int rc;
struct tf_session *tfs;
struct tf_dev_info *dev;
- struct tf_rm_allocate_parms aparms = { 0 };
+ struct tf_rm_allocate_parms aparms;
uint16_t num_slice_per_row = 1;
TF_CHECK_PARMS2(tfp, parms);
@@ -165,6 +245,8 @@ tf_tcam_alloc(struct tf *tfp,
return rc;
/* Allocate requested element */
+ memset(&aparms, 0, sizeof(aparms));
+
aparms.rm_db = tcam_db[parms->dir];
aparms.db_index = parms->type;
aparms.priority = parms->priority;
@@ -202,11 +284,12 @@ tf_tcam_free(struct tf *tfp,
int rc;
struct tf_session *tfs;
struct tf_dev_info *dev;
- struct tf_rm_is_allocated_parms aparms = { 0 };
- struct tf_rm_free_parms fparms = { 0 };
- struct tf_rm_get_hcapi_parms hparms = { 0 };
+ struct tf_rm_is_allocated_parms aparms;
+ struct tf_rm_free_parms fparms;
+ struct tf_rm_get_hcapi_parms hparms;
uint16_t num_slice_per_row = 1;
int allocated = 0;
+ struct tf_shadow_tcam_remove_parms shparms;
TF_CHECK_PARMS2(tfp, parms);
@@ -245,6 +328,8 @@ tf_tcam_free(struct tf *tfp,
return rc;
/* Check if element is in use */
+ memset(&aparms, 0, sizeof(aparms));
+
aparms.rm_db = tcam_db[parms->dir];
aparms.db_index = parms->type;
aparms.index = parms->idx / num_slice_per_row;
@@ -262,7 +347,37 @@ tf_tcam_free(struct tf *tfp,
return -EINVAL;
}
+ /*
+ * The Shadow mgmt, if enabled, determines if the entry needs
+ * to be deleted.
+ */
+ if (shadow_init) {
+ shparms.shadow_db = shadow_tcam_db[parms->dir];
+ shparms.fparms = parms;
+ rc = tf_shadow_tcam_remove(&shparms);
+ if (rc) {
+ /*
+ * Should not get here, log it and let the entry be
+ * deleted.
+ */
+ TFP_DRV_LOG(ERR, "%s: Shadow free fail, "
+ "type:%d index:%d deleting the entry.\n",
+ tf_dir_2_str(parms->dir),
+ parms->type,
+ parms->idx);
+ } else {
+ /*
+ * If the entry still has references, just return the
+ * ref count to the caller. No need to remove entry
+ * from rm or hw
+ */
+ if (parms->ref_cnt >= 1)
+ return rc;
+ }
+ }
+
/* Free requested element */
+ memset(&fparms, 0, sizeof(fparms));
fparms.rm_db = tcam_db[parms->dir];
fparms.db_index = parms->type;
fparms.index = parms->idx / num_slice_per_row;
@@ -291,7 +406,8 @@ tf_tcam_free(struct tf *tfp,
rc = tf_rm_free(&fparms);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s: Free failed, type:%d, index:%d\n",
+ "%s: Free failed, type:%d, "
+ "index:%d\n",
tf_dir_2_str(parms->dir),
parms->type,
fparms.index);
@@ -302,6 +418,8 @@ tf_tcam_free(struct tf *tfp,
}
/* Convert TF type to HCAPI RM type */
+ memset(&hparms, 0, sizeof(hparms));
+
hparms.rm_db = tcam_db[parms->dir];
hparms.db_index = parms->type;
hparms.hcapi_type = &parms->hcapi_type;
@@ -326,9 +444,131 @@ tf_tcam_free(struct tf *tfp,
}
int
-tf_tcam_alloc_search(struct tf *tfp __rte_unused,
- struct tf_tcam_alloc_search_parms *parms __rte_unused)
+tf_tcam_alloc_search(struct tf *tfp,
+ struct tf_tcam_alloc_search_parms *parms)
{
+ struct tf_shadow_tcam_search_parms sparms;
+ struct tf_shadow_tcam_bind_index_parms bparms;
+ struct tf_tcam_alloc_parms aparms;
+ struct tf_tcam_free_parms fparms;
+ uint16_t num_slice_per_row = 1;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ int rc;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ if (!init) {
+ TFP_DRV_LOG(ERR,
+ "%s: No TCAM DBs created\n",
+ tf_dir_2_str(parms->dir));
+ return -EINVAL;
+ }
+
+ if (!shadow_init || !shadow_tcam_db[parms->dir]) {
+ TFP_DRV_LOG(ERR, "%s: TCAM Shadow not initialized for %s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tcam_tbl_2_str(parms->type));
+ return -EINVAL;
+ }
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc)
+ return rc;
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc)
+ return rc;
+
+ if (dev->ops->tf_dev_get_tcam_slice_info == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Need to retrieve row size etc */
+ rc = dev->ops->tf_dev_get_tcam_slice_info(tfp,
+ parms->type,
+ parms->key_size,
+ &num_slice_per_row);
+ if (rc)
+ return rc;
+
+ /*
+ * Prep the shadow search, reusing the parms from original search
+ * instead of copying them. Shadow will update output in there.
+ */
+ memset(&sparms, 0, sizeof(sparms));
+ sparms.sparms = parms;
+ sparms.shadow_db = shadow_tcam_db[parms->dir];
+
+ rc = tf_shadow_tcam_search(&sparms);
+ if (rc)
+ return rc;
+
+ /*
+ * The app didn't request us to alloc the entry, so return now.
+ * The hit should have been updated in the original search parm.
+ */
+ if (!parms->alloc || parms->search_status != MISS)
+ return rc;
+
+ /* Caller desires an allocate on miss */
+ if (dev->ops->tf_dev_alloc_tcam == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+ memset(&aparms, 0, sizeof(aparms));
+ aparms.dir = parms->dir;
+ aparms.type = parms->type;
+ aparms.key_size = parms->key_size;
+ aparms.priority = parms->priority;
+ rc = dev->ops->tf_dev_alloc_tcam(tfp, &aparms);
+ if (rc)
+ return rc;
+
+ /* Successful allocation, attempt to add it to the shadow */
+ memset(&bparms, 0, sizeof(bparms));
+ bparms.dir = parms->dir;
+ bparms.shadow_db = shadow_tcam_db[parms->dir];
+ bparms.type = parms->type;
+ bparms.key = parms->key;
+ bparms.mask = parms->mask;
+ bparms.key_size = parms->key_size;
+ bparms.idx = aparms.idx;
+ bparms.hb_handle = sparms.hb_handle;
+ rc = tf_shadow_tcam_bind_index(&bparms);
+ if (rc) {
+ /* Error binding entry, need to free the allocated idx */
+ if (dev->ops->tf_dev_free_tcam == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ fparms.dir = parms->dir;
+ fparms.type = parms->type;
+ fparms.idx = aparms.idx;
+ rc = dev->ops->tf_dev_free_tcam(tfp, &fparms);
+ if (rc)
+ return rc;
+ }
+
+ /* Add the allocated index to output and done */
+ parms->idx = aparms.idx;
+
return 0;
}
@@ -339,8 +579,9 @@ tf_tcam_set(struct tf *tfp __rte_unused,
int rc;
struct tf_session *tfs;
struct tf_dev_info *dev;
- struct tf_rm_is_allocated_parms aparms = { 0 };
- struct tf_rm_get_hcapi_parms hparms = { 0 };
+ struct tf_rm_is_allocated_parms aparms;
+ struct tf_rm_get_hcapi_parms hparms;
+ struct tf_shadow_tcam_insert_parms iparms;
uint16_t num_slice_per_row = 1;
int allocated = 0;
@@ -381,6 +622,8 @@ tf_tcam_set(struct tf *tfp __rte_unused,
return rc;
/* Check if element is in use */
+ memset(&aparms, 0, sizeof(aparms));
+
aparms.rm_db = tcam_db[parms->dir];
aparms.db_index = parms->type;
aparms.index = parms->idx / num_slice_per_row;
@@ -399,6 +642,8 @@ tf_tcam_set(struct tf *tfp __rte_unused,
}
/* Convert TF type to HCAPI RM type */
+ memset(&hparms, 0, sizeof(hparms));
+
hparms.rm_db = tcam_db[parms->dir];
hparms.db_index = parms->type;
hparms.hcapi_type = &parms->hcapi_type;
@@ -419,6 +664,23 @@ tf_tcam_set(struct tf *tfp __rte_unused,
return rc;
}
+ /* Successfully added to hw, now for shadow if enabled. */
+ if (!shadow_init || !shadow_tcam_db[parms->dir])
+ return 0;
+
+ iparms.shadow_db = shadow_tcam_db[parms->dir];
+ iparms.sparms = parms;
+ rc = tf_shadow_tcam_insert(&iparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: %s: Entry %d set failed, rc:%s",
+ tf_dir_2_str(parms->dir),
+ tf_tcam_tbl_2_str(parms->type),
+ parms->idx,
+ strerror(-rc));
+ return rc;
+ }
+
return 0;
}
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.h b/drivers/net/bnxt/tf_core/tf_tcam.h
index ee5bacc..4722ce0 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_tcam.h
@@ -104,19 +104,19 @@ struct tf_tcam_alloc_search_parms {
*/
enum tf_tcam_tbl_type type;
/**
- * [in] Enable search for matching entry
+ * [in] Type of HCAPI
*/
- uint8_t search_enable;
+ uint16_t hcapi_type;
/**
- * [in] Key data to match on (if search)
+ * [in] Key data to match on
*/
uint8_t *key;
/**
- * [in] key size (if search)
+ * [in] key size in bits
*/
uint16_t key_size;
/**
- * [in] Mask data to match on (if search)
+ * [in] Mask data to match on
*/
uint8_t *mask;
/**
@@ -124,16 +124,31 @@ struct tf_tcam_alloc_search_parms {
*/
uint32_t priority;
/**
- * [out] If search, set if matching entry found
+ * [in] Allocate on miss.
+ */
+ uint8_t alloc;
+ /**
+ * [out] Set if matching entry found
*/
uint8_t hit;
/**
+ * [out] Search result status (hit, miss, reject)
+ */
+ enum tf_tcam_search_status search_status;
+ /**
* [out] Current refcnt after allocation
*/
uint16_t ref_cnt;
/**
- * [out] Idx allocated
- *
+ * [inout] The result data from the search is copied here
+ */
+ uint8_t *result;
+ /**
+ * [inout] result size in bits for the result data
+ */
+ uint16_t result_size;
+ /**
+ * [out] Index found
*/
uint16_t idx;
};
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 02/20] net/bnxt: nat global registers support
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 01/20] net/bnxt: add shadow tcam capability with search Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 03/20] net/bnxt: parif for offload miss rules Somnath Kotur
` (18 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Add support to enable or disable the NAT global registers.
The NAT feature is enabled in hardware during initialization
and disabled at deinitialization of the application.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 83 ++++++++++++++++++++++++++++++++++++++
drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 4 ++
2 files changed, 87 insertions(+)
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 0869231..7c65a4b 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -597,6 +597,52 @@ ulp_session_deinit(struct bnxt_ulp_session_state *session)
}
/*
+ * Internal api to enable NAT feature.
+ * Set set_flag to 1 to set the value or zero to reset the value.
+ * returns 0 on success.
+ */
+static int32_t
+bnxt_ulp_global_cfg_update(struct bnxt *bp,
+ enum tf_dir dir,
+ enum tf_global_config_type type,
+ uint32_t offset,
+ uint32_t value,
+ uint32_t set_flag)
+{
+ uint32_t global_cfg = 0;
+ int rc;
+ struct tf_global_cfg_parms parms;
+
+ /* Initialize the params */
+ parms.dir = dir,
+ parms.type = type,
+ parms.offset = offset,
+ parms.config = (uint8_t *)&global_cfg,
+ parms.config_sz_in_bytes = sizeof(global_cfg);
+
+ rc = tf_get_global_cfg(&bp->tfp, &parms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
+ type, rc);
+ return rc;
+ }
+
+ if (set_flag)
+ global_cfg |= value;
+ else
+ global_cfg &= ~value;
+
+ /* SET the register RE_CFA_REG_ACT_TECT */
+ rc = tf_set_global_cfg(&bp->tfp, &parms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
+ type, rc);
+ return rc;
+ }
+ return rc;
+}
+
+/*
* When a port is initialized by dpdk. This functions is called
* and this function initializes the ULP context and rest of the
* infrastructure associated with it.
@@ -732,6 +778,29 @@ bnxt_ulp_init(struct bnxt *bp)
goto jump_to_error;
}
+ /*
+ * Enable NAT feature. Set the global configuration register
+ * Tunnel encap to enable NAT with the reuse of existing inner
+ * L2 header smac and dmac
+ */
+ rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
+ goto jump_to_error;
+ }
+
+ rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
+ goto jump_to_error;
+ }
+
return rc;
jump_to_error:
@@ -785,6 +854,19 @@ bnxt_ulp_deinit(struct bnxt *bp)
/* Delete the Port database */
ulp_port_db_deinit(bp->ulp_ctx);
+ /* Disable NAT feature */
+ (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC),
+ 0);
+
+ (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC),
+ 0);
+
/* Delete the ulp context and tf session */
ulp_ctx_detach(bp, session);
@@ -942,6 +1024,7 @@ bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
struct bnxt_vf_representor *vfr = dev->data->dev_private;
+
bp = vfr->parent_dev->data->dev_private;
}
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index f9e5e2b..7c95ead 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -14,6 +14,10 @@
#include "ulp_template_db_enum.h"
+/* NAT defines to reuse existing inner L2 SMAC and DMAC */
+#define BNXT_ULP_NAT_INNER_L2_HEADER_SMAC 0x2000
+#define BNXT_ULP_NAT_INNER_L2_HEADER_DMAC 0x100
+
/* defines for the ulp_flags */
#define BNXT_ULP_VF_REP_ENABLED 0x1
#define ULP_VF_REP_IS_ENABLED(flag) ((flag) & BNXT_ULP_VF_REP_ENABLED)
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 03/20] net/bnxt: parif for offload miss rules
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 01/20] net/bnxt: add shadow tcam capability with search Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 02/20] net/bnxt: nat global registers support Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 04/20] net/bnxt: ulp mapper changes to use tcam search Somnath Kotur
` (17 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
For the offload miss rules, the parif miss path needs to be
considered. The higher parif are reserved for handling this.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_def_rules.c | 4 ++--
drivers/net/bnxt/tf_ulp/ulp_port_db.h | 1 +
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 40 ++++++++++++++++++++++++++++++++
3 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
index 4d4f7c4..d86e4c9 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
@@ -12,8 +12,6 @@
#include "ulp_flow_db.h"
#include "ulp_mapper.h"
-#define BNXT_ULP_FREE_PARIF_BASE 11
-
struct bnxt_ulp_def_param_handler {
int32_t (*vfr_func)(struct bnxt_ulp_context *ulp_ctx,
struct ulp_tlv_param *param,
@@ -85,6 +83,8 @@ ulp_set_parif_in_comp_fld(struct bnxt_ulp_context *ulp_ctx,
if (parif_type == BNXT_ULP_PHY_PORT_PARIF) {
idx = BNXT_ULP_CF_IDX_PHY_PORT_PARIF;
+ /* Parif needs to be reset to a free partition */
+ parif += BNXT_ULP_FREE_PARIF_BASE;
} else if (parif_type == BNXT_ULP_DRV_FUNC_PARIF) {
idx = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF;
/* Parif needs to be reset to a free partition */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.h b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
index 393d01b..2b323d1 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
@@ -10,6 +10,7 @@
#define BNXT_PORT_DB_MAX_INTF_LIST 256
#define BNXT_PORT_DB_MAX_FUNC 2048
+#define BNXT_ULP_FREE_PARIF_BASE 11
enum bnxt_ulp_svif_type {
BNXT_ULP_DRV_FUNC_SVIF = 0,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 3891bcd..39f801b 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -159,6 +159,43 @@ bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
}
/*
+ * Function to handle the post processing of the computed
+ * fields for the interface.
+ */
+static void
+bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
+{
+ uint32_t ifindex;
+ uint16_t port_id, parif;
+ enum bnxt_ulp_direction_type dir;
+
+ /* get the direction details */
+ dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
+
+ if (dir == BNXT_ULP_DIR_INGRESS) {
+ /* read the port id details */
+ port_id = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_INCOMING_IF);
+ if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
+ port_id,
+ &ifindex)) {
+ BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
+ return;
+ }
+ /* Set port PARIF */
+ if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_PHY_PORT_PARIF, &parif)) {
+ BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ /* Parif needs to be reset to a free partition */
+ parif += BNXT_ULP_FREE_PARIF_BASE;
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
+ parif);
+ }
+}
+
+/*
* Function to handle the post processing of the parsing details
*/
int32_t
@@ -213,6 +250,9 @@ bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
/* Merge the hdr_fp_bit into the proto header bit */
params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
+ /* Update the computed interface parameters */
+ bnxt_ulp_comp_fld_intf_update(params);
+
/* TBD: Handle the flow rejection scenarios */
return 0;
}
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 04/20] net/bnxt: ulp mapper changes to use tcam search
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (2 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 03/20] net/bnxt: parif for offload miss rules Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 05/20] net/bnxt: add tf hash API Somnath Kotur
` (16 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
modified ulp mappper to use the new tf_search_tcam_entry API.
When search before allocation is requested, mapper calls
tc_search_tcam_entry with the alloc flag.
- On HIT, the result and tcam index is returned.
- On MISS, the tcam index is returned but the result is
created and the tcam entry is set.
- On REJECT, the flow request is rejected.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 106 +++++++++++++++++++++++------------
1 file changed, 71 insertions(+), 35 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 6a727ed..2d3373d 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -690,7 +690,7 @@ ulp_mapper_ident_extract(struct bnxt_ulp_mapper_parms *parms,
{
struct ulp_flow_db_res_params fid_parms;
uint64_t id = 0;
- uint32_t idx;
+ uint32_t idx = 0;
struct tf_search_identifier_parms sparms = { 0 };
struct tf_free_identifier_parms free_parms = { 0 };
struct tf *tfp;
@@ -1292,12 +1292,13 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct tf *tfp;
int32_t rc, trc;
struct tf_alloc_tcam_entry_parms aparms = { 0 };
+ struct tf_search_tcam_entry_parms searchparms = { 0 };
struct tf_set_tcam_entry_parms sparms = { 0 };
struct ulp_flow_db_res_params fid_parms = { 0 };
struct tf_free_tcam_entry_parms free_parms = { 0 };
uint32_t hit = 0;
uint16_t tmplen = 0;
- struct ulp_blob res_blob;
+ uint16_t idx;
/* Skip this if was handled by the cache. */
if (parms->tcam_tbl_opc == BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_SKIP) {
@@ -1352,37 +1353,72 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
}
- aparms.dir = tbl->direction;
- aparms.tcam_tbl_type = tbl->resource_type;
- aparms.search_enable = tbl->srch_b4_alloc;
- aparms.key_sz_in_bits = tbl->key_bit_size;
- aparms.key = ulp_blob_data_get(&key, &tmplen);
- if (tbl->key_bit_size != tmplen) {
- BNXT_TF_DBG(ERR, "Key len (%d) != Expected (%d)\n",
- tmplen, tbl->key_bit_size);
- return -EINVAL;
- }
+ if (!tbl->srch_b4_alloc) {
+ /*
+ * No search for re-use is requested, so simply allocate the
+ * tcam index.
+ */
+ aparms.dir = tbl->direction;
+ aparms.tcam_tbl_type = tbl->resource_type;
+ aparms.search_enable = tbl->srch_b4_alloc;
+ aparms.key_sz_in_bits = tbl->key_bit_size;
+ aparms.key = ulp_blob_data_get(&key, &tmplen);
+ if (tbl->key_bit_size != tmplen) {
+ BNXT_TF_DBG(ERR, "Key len (%d) != Expected (%d)\n",
+ tmplen, tbl->key_bit_size);
+ return -EINVAL;
+ }
- aparms.mask = ulp_blob_data_get(&mask, &tmplen);
- if (tbl->key_bit_size != tmplen) {
- BNXT_TF_DBG(ERR, "Mask len (%d) != Expected (%d)\n",
- tmplen, tbl->key_bit_size);
- return -EINVAL;
- }
+ aparms.mask = ulp_blob_data_get(&mask, &tmplen);
+ if (tbl->key_bit_size != tmplen) {
+ BNXT_TF_DBG(ERR, "Mask len (%d) != Expected (%d)\n",
+ tmplen, tbl->key_bit_size);
+ return -EINVAL;
+ }
- aparms.priority = tbl->priority;
+ aparms.priority = tbl->priority;
- /*
- * All failures after this succeeds require the entry to be freed.
- * cannot return directly on failure, but needs to goto error
- */
- rc = tf_alloc_tcam_entry(tfp, &aparms);
- if (rc) {
- BNXT_TF_DBG(ERR, "tcam alloc failed rc=%d.\n", rc);
- return rc;
- }
+ /*
+ * All failures after this succeeds require the entry to be
+ * freed. cannot return directly on failure, but needs to goto
+ * error.
+ */
+ rc = tf_alloc_tcam_entry(tfp, &aparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "tcam alloc failed rc=%d.\n", rc);
+ return rc;
+ }
+ idx = aparms.idx;
+ hit = aparms.hit;
+ } else {
+ /*
+ * Searching before allocation to see if we already have an
+ * entry. This allows re-use of a constrained resource.
+ */
+ searchparms.dir = tbl->direction;
+ searchparms.tcam_tbl_type = tbl->resource_type;
+ searchparms.key = ulp_blob_data_get(&key, &tmplen);
+ searchparms.key_sz_in_bits = tbl->key_bit_size;
+ searchparms.mask = ulp_blob_data_get(&mask, &tmplen);
+ searchparms.priority = tbl->priority;
+ searchparms.alloc = 1;
+ searchparms.result = ulp_blob_data_get(&data, &tmplen);
+ searchparms.result_sz_in_bits = tbl->result_bit_size;
+
+ rc = tf_search_tcam_entry(tfp, &searchparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "tcam search failed rc=%d\n", rc);
+ return rc;
+ }
- hit = aparms.hit;
+ /* Successful search, check the result */
+ if (searchparms.search_status == REJECT) {
+ BNXT_TF_DBG(ERR, "tcam alloc rejected\n");
+ return -ENOMEM;
+ }
+ idx = searchparms.idx;
+ hit = searchparms.hit;
+ }
/* Build the result */
if (!tbl->srch_b4_alloc || !hit) {
@@ -1430,9 +1466,9 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
}
- sparms.dir = aparms.dir;
- sparms.tcam_tbl_type = aparms.tcam_tbl_type;
- sparms.idx = aparms.idx;
+ sparms.dir = tbl->direction;
+ sparms.tcam_tbl_type = tbl->resource_type;
+ sparms.idx = idx;
/* Already verified the key/mask lengths */
sparms.key = ulp_blob_data_get(&key, &tmplen);
sparms.mask = ulp_blob_data_get(&mask, &tmplen);
@@ -1464,7 +1500,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
rc = -EINVAL;
goto error;
}
- parms->cache_ptr->tcam_idx = aparms.idx;
+ parms->cache_ptr->tcam_idx = idx;
}
/* Mark action */
@@ -1483,7 +1519,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
for (i = 0; i < num_idents; i++) {
rc = ulp_mapper_ident_extract(parms, tbl,
- &idents[i], &res_blob);
+ &idents[i], &data);
if (rc) {
BNXT_TF_DBG(ERR,
"Error in ident extraction\n");
@@ -1501,7 +1537,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
fid_parms.resource_func = tbl->resource_func;
fid_parms.resource_type = tbl->resource_type;
fid_parms.critical_resource = tbl->critical_resource;
- fid_parms.resource_hndl = aparms.idx;
+ fid_parms.resource_hndl = idx;
rc = ulp_flow_db_resource_add(parms->ulp_ctx,
parms->tbl_idx,
parms->fid,
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 05/20] net/bnxt: add tf hash API
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (3 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 04/20] net/bnxt: ulp mapper changes to use tcam search Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 06/20] net/bnxt: skip mark id injection into mbuf Somnath Kotur
` (15 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
Added tf_hash API for common hash uses across tf_core functions
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Farah Smith <farah.smith@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/meson.build | 1 +
drivers/net/bnxt/tf_core/Makefile | 1 +
drivers/net/bnxt/tf_core/tf_hash.c | 106 +++++++++++++++++++++++++++++++++++++
drivers/net/bnxt/tf_core/tf_hash.h | 27 ++++++++++
4 files changed, 135 insertions(+)
create mode 100644 drivers/net/bnxt/tf_core/tf_hash.c
create mode 100644 drivers/net/bnxt/tf_core/tf_hash.h
diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build
index 8529b33..190469e 100644
--- a/drivers/net/bnxt/meson.build
+++ b/drivers/net/bnxt/meson.build
@@ -47,6 +47,7 @@ sources = files('bnxt_cpr.c',
'tf_core/tf_global_cfg.c',
'tf_core/tf_em_host.c',
'tf_core/tf_shadow_identifier.c',
+ 'tf_core/tf_hash.c',
'hcapi/hcapi_cfa_p4.c',
diff --git a/drivers/net/bnxt/tf_core/Makefile b/drivers/net/bnxt/tf_core/Makefile
index cca0e2e..cf6aaec 100644
--- a/drivers/net/bnxt/tf_core/Makefile
+++ b/drivers/net/bnxt/tf_core/Makefile
@@ -32,3 +32,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_util.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_if_tbl.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_global_cfg.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_shadow_identifier.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_hash.c
diff --git a/drivers/net/bnxt/tf_core/tf_hash.c b/drivers/net/bnxt/tf_core/tf_hash.c
new file mode 100644
index 0000000..68476cb
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_hash.c
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#include "tf_hash.h"
+
+/* CRC polynomial 0xedb88320 */
+static const uint32_t tf_hash_crc32tbl[] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+/**
+ * Calculate a crc32 on the buffer with an initial value and len
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32i(uint32_t init, uint8_t *buf, uint32_t len)
+{
+ uint32_t crc = init;
+
+ while (len--)
+ crc = tf_hash_crc32tbl[(crc ^ buf[len]) & 0xff] ^
+ (crc >> 8);
+
+ return crc;
+}
+
+/**
+ * Calculate a crc32 on the buffer with a default initial value
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32(uint8_t *buf, uint32_t len)
+{
+ uint32_t crc = ~0U;
+
+ crc = tf_hash_calc_crc32i(crc, buf, len);
+
+ return ~crc;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_hash.h b/drivers/net/bnxt/tf_core/tf_hash.h
new file mode 100644
index 0000000..6b60aff
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_hash.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TF_HASH_H_
+#define _TF_HASH_H_
+
+#include "tf_core.h"
+
+/**
+ * Calculate a crc32 on the buffer with an initial value and len
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32i(uint32_t init, uint8_t *buf, uint32_t len);
+
+/**
+ * Calculate a crc32 on the buffer with a default initial value
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32(uint8_t *buf, uint32_t len);
+
+#endif
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 06/20] net/bnxt: skip mark id injection into mbuf
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (4 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 05/20] net/bnxt: add tf hash API Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 07/20] net/bnxt: nat template changes Somnath Kotur
` (14 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
When a packet is looped back from VF to VFR, it is marked to identify
the VFR interface. However, this mark_id shouldn't be percolated up to
the OVS as it is internal to pmd.
This patch fixes it by skipping mark injection into mbuf if the packet
is received on VFR interface.
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
drivers/net/bnxt/bnxt_rxr.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index baf73cb..43b1256 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -485,6 +485,9 @@ bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
cfa_code, vfr_flag, &mark_id);
if (!rc) {
+ /* VF to VFR Rx path. So, skip mark_id injection in mbuf */
+ if (vfr_flag && *vfr_flag)
+ return mark_id;
/* Got the mark, write it to the mbuf and return */
mbuf->hash.fdir.hi = mark_id;
mbuf->udata64 = (cfa_code & 0xffffffffull) << 32;
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 07/20] net/bnxt: nat template changes
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (5 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 06/20] net/bnxt: skip mark id injection into mbuf Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 08/20] net/bnxt: configure parif for the egress rules Somnath Kotur
` (13 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
The template is updated to support additional combinations
of NAT actions.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_template_db_act.c | 412 +++++++++++++++---------
drivers/net/bnxt/tf_ulp/ulp_template_db_class.c | 16 +-
drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h | 26 +-
drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c | 4 +-
4 files changed, 285 insertions(+), 173 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
index 0f19e8e..31fe905 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
@@ -12,80 +12,88 @@ uint16_t ulp_act_sig_tbl[BNXT_ULP_ACT_SIG_TBL_MAX_SZ] = {
[BNXT_ULP_ACT_HID_015a] = 1,
[BNXT_ULP_ACT_HID_00eb] = 2,
[BNXT_ULP_ACT_HID_0043] = 3,
- [BNXT_ULP_ACT_HID_01d6] = 4,
- [BNXT_ULP_ACT_HID_015e] = 5,
- [BNXT_ULP_ACT_HID_00ef] = 6,
- [BNXT_ULP_ACT_HID_0047] = 7,
- [BNXT_ULP_ACT_HID_01da] = 8,
- [BNXT_ULP_ACT_HID_025b] = 9,
- [BNXT_ULP_ACT_HID_01ec] = 10,
- [BNXT_ULP_ACT_HID_0144] = 11,
- [BNXT_ULP_ACT_HID_02d7] = 12,
- [BNXT_ULP_ACT_HID_025f] = 13,
- [BNXT_ULP_ACT_HID_01f0] = 14,
- [BNXT_ULP_ACT_HID_0148] = 15,
- [BNXT_ULP_ACT_HID_02db] = 16,
- [BNXT_ULP_ACT_HID_0000] = 17,
- [BNXT_ULP_ACT_HID_0002] = 18,
- [BNXT_ULP_ACT_HID_0800] = 19,
- [BNXT_ULP_ACT_HID_0101] = 20,
- [BNXT_ULP_ACT_HID_0020] = 21,
- [BNXT_ULP_ACT_HID_0901] = 22,
- [BNXT_ULP_ACT_HID_0121] = 23,
- [BNXT_ULP_ACT_HID_0004] = 24,
- [BNXT_ULP_ACT_HID_0804] = 25,
- [BNXT_ULP_ACT_HID_0105] = 26,
- [BNXT_ULP_ACT_HID_0024] = 27,
- [BNXT_ULP_ACT_HID_0905] = 28,
- [BNXT_ULP_ACT_HID_0125] = 29,
- [BNXT_ULP_ACT_HID_0001] = 30,
- [BNXT_ULP_ACT_HID_0005] = 31,
- [BNXT_ULP_ACT_HID_0009] = 32,
- [BNXT_ULP_ACT_HID_000d] = 33,
- [BNXT_ULP_ACT_HID_0021] = 34,
- [BNXT_ULP_ACT_HID_0029] = 35,
- [BNXT_ULP_ACT_HID_0025] = 36,
- [BNXT_ULP_ACT_HID_002d] = 37,
- [BNXT_ULP_ACT_HID_0801] = 38,
- [BNXT_ULP_ACT_HID_0809] = 39,
- [BNXT_ULP_ACT_HID_0805] = 40,
- [BNXT_ULP_ACT_HID_080d] = 41,
- [BNXT_ULP_ACT_HID_0c15] = 42,
- [BNXT_ULP_ACT_HID_0c19] = 43,
- [BNXT_ULP_ACT_HID_02f6] = 44,
- [BNXT_ULP_ACT_HID_04f8] = 45,
- [BNXT_ULP_ACT_HID_01df] = 46,
- [BNXT_ULP_ACT_HID_05e3] = 47,
- [BNXT_ULP_ACT_HID_02fa] = 48,
- [BNXT_ULP_ACT_HID_04fc] = 49,
- [BNXT_ULP_ACT_HID_01e3] = 50,
- [BNXT_ULP_ACT_HID_05e7] = 51,
- [BNXT_ULP_ACT_HID_03f7] = 52,
- [BNXT_ULP_ACT_HID_05f9] = 53,
- [BNXT_ULP_ACT_HID_02e0] = 54,
- [BNXT_ULP_ACT_HID_06e4] = 55,
- [BNXT_ULP_ACT_HID_03fb] = 56,
- [BNXT_ULP_ACT_HID_05fd] = 57,
- [BNXT_ULP_ACT_HID_02e4] = 58,
- [BNXT_ULP_ACT_HID_06e8] = 59,
- [BNXT_ULP_ACT_HID_040d] = 60,
- [BNXT_ULP_ACT_HID_040f] = 61,
- [BNXT_ULP_ACT_HID_0413] = 62,
- [BNXT_ULP_ACT_HID_0c0d] = 63,
- [BNXT_ULP_ACT_HID_0567] = 64,
- [BNXT_ULP_ACT_HID_0a49] = 65,
- [BNXT_ULP_ACT_HID_050e] = 66,
- [BNXT_ULP_ACT_HID_0d0e] = 67,
- [BNXT_ULP_ACT_HID_0668] = 68,
- [BNXT_ULP_ACT_HID_0b4a] = 69,
- [BNXT_ULP_ACT_HID_0411] = 70,
- [BNXT_ULP_ACT_HID_056b] = 71,
- [BNXT_ULP_ACT_HID_0a4d] = 72,
- [BNXT_ULP_ACT_HID_0c11] = 73,
- [BNXT_ULP_ACT_HID_0512] = 74,
- [BNXT_ULP_ACT_HID_0d12] = 75,
- [BNXT_ULP_ACT_HID_066c] = 76,
- [BNXT_ULP_ACT_HID_0b4e] = 77
+ [BNXT_ULP_ACT_HID_03d8] = 4,
+ [BNXT_ULP_ACT_HID_02c1] = 5,
+ [BNXT_ULP_ACT_HID_015e] = 6,
+ [BNXT_ULP_ACT_HID_00ef] = 7,
+ [BNXT_ULP_ACT_HID_0047] = 8,
+ [BNXT_ULP_ACT_HID_03dc] = 9,
+ [BNXT_ULP_ACT_HID_02c5] = 10,
+ [BNXT_ULP_ACT_HID_025b] = 11,
+ [BNXT_ULP_ACT_HID_01ec] = 12,
+ [BNXT_ULP_ACT_HID_0144] = 13,
+ [BNXT_ULP_ACT_HID_04d9] = 14,
+ [BNXT_ULP_ACT_HID_03c2] = 15,
+ [BNXT_ULP_ACT_HID_025f] = 16,
+ [BNXT_ULP_ACT_HID_01f0] = 17,
+ [BNXT_ULP_ACT_HID_0148] = 18,
+ [BNXT_ULP_ACT_HID_04dd] = 19,
+ [BNXT_ULP_ACT_HID_03c6] = 20,
+ [BNXT_ULP_ACT_HID_0000] = 21,
+ [BNXT_ULP_ACT_HID_0002] = 22,
+ [BNXT_ULP_ACT_HID_0800] = 23,
+ [BNXT_ULP_ACT_HID_0101] = 24,
+ [BNXT_ULP_ACT_HID_0020] = 25,
+ [BNXT_ULP_ACT_HID_0901] = 26,
+ [BNXT_ULP_ACT_HID_0121] = 27,
+ [BNXT_ULP_ACT_HID_0004] = 28,
+ [BNXT_ULP_ACT_HID_0804] = 29,
+ [BNXT_ULP_ACT_HID_0105] = 30,
+ [BNXT_ULP_ACT_HID_0024] = 31,
+ [BNXT_ULP_ACT_HID_0905] = 32,
+ [BNXT_ULP_ACT_HID_0125] = 33,
+ [BNXT_ULP_ACT_HID_0001] = 34,
+ [BNXT_ULP_ACT_HID_0005] = 35,
+ [BNXT_ULP_ACT_HID_0009] = 36,
+ [BNXT_ULP_ACT_HID_000d] = 37,
+ [BNXT_ULP_ACT_HID_0021] = 38,
+ [BNXT_ULP_ACT_HID_0029] = 39,
+ [BNXT_ULP_ACT_HID_0025] = 40,
+ [BNXT_ULP_ACT_HID_002d] = 41,
+ [BNXT_ULP_ACT_HID_0801] = 42,
+ [BNXT_ULP_ACT_HID_0809] = 43,
+ [BNXT_ULP_ACT_HID_0805] = 44,
+ [BNXT_ULP_ACT_HID_080d] = 45,
+ [BNXT_ULP_ACT_HID_0c15] = 46,
+ [BNXT_ULP_ACT_HID_0c19] = 47,
+ [BNXT_ULP_ACT_HID_02f6] = 48,
+ [BNXT_ULP_ACT_HID_04f8] = 49,
+ [BNXT_ULP_ACT_HID_01df] = 50,
+ [BNXT_ULP_ACT_HID_07e5] = 51,
+ [BNXT_ULP_ACT_HID_06ce] = 52,
+ [BNXT_ULP_ACT_HID_02fa] = 53,
+ [BNXT_ULP_ACT_HID_04fc] = 54,
+ [BNXT_ULP_ACT_HID_01e3] = 55,
+ [BNXT_ULP_ACT_HID_07e9] = 56,
+ [BNXT_ULP_ACT_HID_06d2] = 57,
+ [BNXT_ULP_ACT_HID_03f7] = 58,
+ [BNXT_ULP_ACT_HID_05f9] = 59,
+ [BNXT_ULP_ACT_HID_02e0] = 60,
+ [BNXT_ULP_ACT_HID_08e6] = 61,
+ [BNXT_ULP_ACT_HID_07cf] = 62,
+ [BNXT_ULP_ACT_HID_03fb] = 63,
+ [BNXT_ULP_ACT_HID_05fd] = 64,
+ [BNXT_ULP_ACT_HID_02e4] = 65,
+ [BNXT_ULP_ACT_HID_08ea] = 66,
+ [BNXT_ULP_ACT_HID_07d3] = 67,
+ [BNXT_ULP_ACT_HID_040d] = 68,
+ [BNXT_ULP_ACT_HID_040f] = 69,
+ [BNXT_ULP_ACT_HID_0413] = 70,
+ [BNXT_ULP_ACT_HID_0c0d] = 71,
+ [BNXT_ULP_ACT_HID_0567] = 72,
+ [BNXT_ULP_ACT_HID_0a49] = 73,
+ [BNXT_ULP_ACT_HID_050e] = 74,
+ [BNXT_ULP_ACT_HID_0d0e] = 75,
+ [BNXT_ULP_ACT_HID_0668] = 76,
+ [BNXT_ULP_ACT_HID_0b4a] = 77,
+ [BNXT_ULP_ACT_HID_0411] = 78,
+ [BNXT_ULP_ACT_HID_056b] = 79,
+ [BNXT_ULP_ACT_HID_0a4d] = 80,
+ [BNXT_ULP_ACT_HID_0c11] = 81,
+ [BNXT_ULP_ACT_HID_0512] = 82,
+ [BNXT_ULP_ACT_HID_0d12] = 83,
+ [BNXT_ULP_ACT_HID_066c] = 84,
+ [BNXT_ULP_ACT_HID_0b4e] = 85
};
struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
@@ -112,14 +120,25 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
.act_tid = 1
},
[4] = {
- .act_hid = BNXT_ULP_ACT_HID_01d6,
+ .act_hid = BNXT_ULP_ACT_HID_03d8,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
[5] = {
+ .act_hid = BNXT_ULP_ACT_HID_02c1,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [6] = {
.act_hid = BNXT_ULP_ACT_HID_015e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -127,7 +146,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [6] = {
+ [7] = {
.act_hid = BNXT_ULP_ACT_HID_00ef,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -136,7 +155,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [7] = {
+ [8] = {
.act_hid = BNXT_ULP_ACT_HID_0047,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -144,16 +163,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [8] = {
- .act_hid = BNXT_ULP_ACT_HID_01da,
+ [9] = {
+ .act_hid = BNXT_ULP_ACT_HID_03dc,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [9] = {
+ [10] = {
+ .act_hid = BNXT_ULP_ACT_HID_02c5,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [11] = {
.act_hid = BNXT_ULP_ACT_HID_025b,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -161,7 +192,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [10] = {
+ [12] = {
.act_hid = BNXT_ULP_ACT_HID_01ec,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -170,7 +201,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [11] = {
+ [13] = {
.act_hid = BNXT_ULP_ACT_HID_0144,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -178,16 +209,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [12] = {
- .act_hid = BNXT_ULP_ACT_HID_02d7,
+ [14] = {
+ .act_hid = BNXT_ULP_ACT_HID_04d9,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [13] = {
+ [15] = {
+ .act_hid = BNXT_ULP_ACT_HID_03c2,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [16] = {
.act_hid = BNXT_ULP_ACT_HID_025f,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -196,7 +239,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [14] = {
+ [17] = {
.act_hid = BNXT_ULP_ACT_HID_01f0,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -206,7 +249,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [15] = {
+ [18] = {
.act_hid = BNXT_ULP_ACT_HID_0148,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -215,51 +258,64 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [16] = {
- .act_hid = BNXT_ULP_ACT_HID_02db,
+ [19] = {
+ .act_hid = BNXT_ULP_ACT_HID_04dd,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [17] = {
+ [20] = {
+ .act_hid = BNXT_ULP_ACT_HID_03c6,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [21] = {
.act_hid = BNXT_ULP_ACT_HID_0000,
.act_sig = { .bits =
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [18] = {
+ [22] = {
.act_hid = BNXT_ULP_ACT_HID_0002,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DROP |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [19] = {
+ [23] = {
.act_hid = BNXT_ULP_ACT_HID_0800,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_POP_VLAN |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [20] = {
+ [24] = {
.act_hid = BNXT_ULP_ACT_HID_0101,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [21] = {
+ [25] = {
.act_hid = BNXT_ULP_ACT_HID_0020,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_DECAP |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [22] = {
+ [26] = {
.act_hid = BNXT_ULP_ACT_HID_0901,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -267,7 +323,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [23] = {
+ [27] = {
.act_hid = BNXT_ULP_ACT_HID_0121,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_DECAP |
@@ -275,14 +331,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [24] = {
+ [28] = {
.act_hid = BNXT_ULP_ACT_HID_0004,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [25] = {
+ [29] = {
.act_hid = BNXT_ULP_ACT_HID_0804,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -290,7 +346,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [26] = {
+ [30] = {
.act_hid = BNXT_ULP_ACT_HID_0105,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -298,7 +354,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [27] = {
+ [31] = {
.act_hid = BNXT_ULP_ACT_HID_0024,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -306,7 +362,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [28] = {
+ [32] = {
.act_hid = BNXT_ULP_ACT_HID_0905,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -315,7 +371,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [29] = {
+ [33] = {
.act_hid = BNXT_ULP_ACT_HID_0125,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -324,14 +380,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [30] = {
+ [34] = {
.act_hid = BNXT_ULP_ACT_HID_0001,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [31] = {
+ [35] = {
.act_hid = BNXT_ULP_ACT_HID_0005,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -339,7 +395,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [32] = {
+ [36] = {
.act_hid = BNXT_ULP_ACT_HID_0009,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -347,7 +403,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [33] = {
+ [37] = {
.act_hid = BNXT_ULP_ACT_HID_000d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -356,7 +412,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [34] = {
+ [38] = {
.act_hid = BNXT_ULP_ACT_HID_0021,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -364,7 +420,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [35] = {
+ [39] = {
.act_hid = BNXT_ULP_ACT_HID_0029,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -373,7 +429,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [36] = {
+ [40] = {
.act_hid = BNXT_ULP_ACT_HID_0025,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -382,7 +438,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [37] = {
+ [41] = {
.act_hid = BNXT_ULP_ACT_HID_002d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -392,7 +448,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [38] = {
+ [42] = {
.act_hid = BNXT_ULP_ACT_HID_0801,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -400,7 +456,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [39] = {
+ [43] = {
.act_hid = BNXT_ULP_ACT_HID_0809,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -409,7 +465,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [40] = {
+ [44] = {
.act_hid = BNXT_ULP_ACT_HID_0805,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -418,7 +474,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [41] = {
+ [45] = {
.act_hid = BNXT_ULP_ACT_HID_080d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -428,14 +484,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [42] = {
+ [46] = {
.act_hid = BNXT_ULP_ACT_HID_0c15,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_ENCAP |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 4
},
- [43] = {
+ [47] = {
.act_hid = BNXT_ULP_ACT_HID_0c19,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_ENCAP |
@@ -443,14 +499,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 4
},
- [44] = {
+ [48] = {
.act_hid = BNXT_ULP_ACT_HID_02f6,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [45] = {
+ [49] = {
.act_hid = BNXT_ULP_ACT_HID_04f8,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
@@ -458,22 +514,33 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [46] = {
+ [50] = {
.act_hid = BNXT_ULP_ACT_HID_01df,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [47] = {
- .act_hid = BNXT_ULP_ACT_HID_05e3,
+ [51] = {
+ .act_hid = BNXT_ULP_ACT_HID_07e5,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [48] = {
+ [52] = {
+ .act_hid = BNXT_ULP_ACT_HID_06ce,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [53] = {
.act_hid = BNXT_ULP_ACT_HID_02fa,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -481,7 +548,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [49] = {
+ [54] = {
.act_hid = BNXT_ULP_ACT_HID_04fc,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -490,7 +557,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [50] = {
+ [55] = {
.act_hid = BNXT_ULP_ACT_HID_01e3,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -498,16 +565,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [51] = {
- .act_hid = BNXT_ULP_ACT_HID_05e7,
+ [56] = {
+ .act_hid = BNXT_ULP_ACT_HID_07e9,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [52] = {
+ [57] = {
+ .act_hid = BNXT_ULP_ACT_HID_06d2,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [58] = {
.act_hid = BNXT_ULP_ACT_HID_03f7,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -515,7 +594,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [53] = {
+ [59] = {
.act_hid = BNXT_ULP_ACT_HID_05f9,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -524,7 +603,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [54] = {
+ [60] = {
.act_hid = BNXT_ULP_ACT_HID_02e0,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -532,16 +611,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [55] = {
- .act_hid = BNXT_ULP_ACT_HID_06e4,
+ [61] = {
+ .act_hid = BNXT_ULP_ACT_HID_08e6,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [56] = {
+ [62] = {
+ .act_hid = BNXT_ULP_ACT_HID_07cf,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [63] = {
.act_hid = BNXT_ULP_ACT_HID_03fb,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -550,7 +641,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [57] = {
+ [64] = {
.act_hid = BNXT_ULP_ACT_HID_05fd,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -560,7 +651,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [58] = {
+ [65] = {
.act_hid = BNXT_ULP_ACT_HID_02e4,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -569,30 +660,43 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [59] = {
- .act_hid = BNXT_ULP_ACT_HID_06e8,
+ [66] = {
+ .act_hid = BNXT_ULP_ACT_HID_08ea,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [60] = {
+ [67] = {
+ .act_hid = BNXT_ULP_ACT_HID_07d3,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [68] = {
.act_hid = BNXT_ULP_ACT_HID_040d,
.act_sig = { .bits =
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [61] = {
+ [69] = {
.act_hid = BNXT_ULP_ACT_HID_040f,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DROP |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [62] = {
+ [70] = {
.act_hid = BNXT_ULP_ACT_HID_0413,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DROP |
@@ -600,14 +704,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [63] = {
+ [71] = {
.act_hid = BNXT_ULP_ACT_HID_0c0d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_POP_VLAN |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [64] = {
+ [72] = {
.act_hid = BNXT_ULP_ACT_HID_0567,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_VLAN_PCP |
@@ -616,7 +720,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [65] = {
+ [73] = {
.act_hid = BNXT_ULP_ACT_HID_0a49,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_VLAN_VID |
@@ -624,14 +728,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [66] = {
+ [74] = {
.act_hid = BNXT_ULP_ACT_HID_050e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [67] = {
+ [75] = {
.act_hid = BNXT_ULP_ACT_HID_0d0e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -639,7 +743,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [68] = {
+ [76] = {
.act_hid = BNXT_ULP_ACT_HID_0668,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -649,7 +753,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [69] = {
+ [77] = {
.act_hid = BNXT_ULP_ACT_HID_0b4a,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -658,14 +762,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [70] = {
+ [78] = {
.act_hid = BNXT_ULP_ACT_HID_0411,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [71] = {
+ [79] = {
.act_hid = BNXT_ULP_ACT_HID_056b,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -675,7 +779,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [72] = {
+ [80] = {
.act_hid = BNXT_ULP_ACT_HID_0a4d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -684,7 +788,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [73] = {
+ [81] = {
.act_hid = BNXT_ULP_ACT_HID_0c11,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -692,7 +796,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [74] = {
+ [82] = {
.act_hid = BNXT_ULP_ACT_HID_0512,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -700,7 +804,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [75] = {
+ [83] = {
.act_hid = BNXT_ULP_ACT_HID_0d12,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -709,7 +813,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [76] = {
+ [84] = {
.act_hid = BNXT_ULP_ACT_HID_066c,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -720,7 +824,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [77] = {
+ [85] = {
.act_hid = BNXT_ULP_ACT_HID_0b4e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
index 200a5a6..9de45cd 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
@@ -17241,7 +17241,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17311,7 +17311,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17325,7 +17325,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17339,7 +17339,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17353,7 +17353,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17367,7 +17367,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17381,7 +17381,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17451,7 +17451,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index b5deaf6..c9fe1bc 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -18,7 +18,7 @@
#define BNXT_ULP_CLASS_HID_SHFTL 31
#define BNXT_ULP_CLASS_HID_MASK 2047
#define BNXT_ULP_ACT_SIG_TBL_MAX_SZ 4096
-#define BNXT_ULP_ACT_MATCH_LIST_MAX_SZ 78
+#define BNXT_ULP_ACT_MATCH_LIST_MAX_SZ 86
#define BNXT_ULP_ACT_HID_LOW_PRIME 7919
#define BNXT_ULP_ACT_HID_HIGH_PRIME 4721
#define BNXT_ULP_ACT_HID_SHFTR 23
@@ -786,19 +786,23 @@ enum bnxt_ulp_act_hid {
BNXT_ULP_ACT_HID_015a = 0x015a,
BNXT_ULP_ACT_HID_00eb = 0x00eb,
BNXT_ULP_ACT_HID_0043 = 0x0043,
- BNXT_ULP_ACT_HID_01d6 = 0x01d6,
+ BNXT_ULP_ACT_HID_03d8 = 0x03d8,
+ BNXT_ULP_ACT_HID_02c1 = 0x02c1,
BNXT_ULP_ACT_HID_015e = 0x015e,
BNXT_ULP_ACT_HID_00ef = 0x00ef,
BNXT_ULP_ACT_HID_0047 = 0x0047,
- BNXT_ULP_ACT_HID_01da = 0x01da,
+ BNXT_ULP_ACT_HID_03dc = 0x03dc,
+ BNXT_ULP_ACT_HID_02c5 = 0x02c5,
BNXT_ULP_ACT_HID_025b = 0x025b,
BNXT_ULP_ACT_HID_01ec = 0x01ec,
BNXT_ULP_ACT_HID_0144 = 0x0144,
- BNXT_ULP_ACT_HID_02d7 = 0x02d7,
+ BNXT_ULP_ACT_HID_04d9 = 0x04d9,
+ BNXT_ULP_ACT_HID_03c2 = 0x03c2,
BNXT_ULP_ACT_HID_025f = 0x025f,
BNXT_ULP_ACT_HID_01f0 = 0x01f0,
BNXT_ULP_ACT_HID_0148 = 0x0148,
- BNXT_ULP_ACT_HID_02db = 0x02db,
+ BNXT_ULP_ACT_HID_04dd = 0x04dd,
+ BNXT_ULP_ACT_HID_03c6 = 0x03c6,
BNXT_ULP_ACT_HID_0000 = 0x0000,
BNXT_ULP_ACT_HID_0002 = 0x0002,
BNXT_ULP_ACT_HID_0800 = 0x0800,
@@ -829,19 +833,23 @@ enum bnxt_ulp_act_hid {
BNXT_ULP_ACT_HID_02f6 = 0x02f6,
BNXT_ULP_ACT_HID_04f8 = 0x04f8,
BNXT_ULP_ACT_HID_01df = 0x01df,
- BNXT_ULP_ACT_HID_05e3 = 0x05e3,
+ BNXT_ULP_ACT_HID_07e5 = 0x07e5,
+ BNXT_ULP_ACT_HID_06ce = 0x06ce,
BNXT_ULP_ACT_HID_02fa = 0x02fa,
BNXT_ULP_ACT_HID_04fc = 0x04fc,
BNXT_ULP_ACT_HID_01e3 = 0x01e3,
- BNXT_ULP_ACT_HID_05e7 = 0x05e7,
+ BNXT_ULP_ACT_HID_07e9 = 0x07e9,
+ BNXT_ULP_ACT_HID_06d2 = 0x06d2,
BNXT_ULP_ACT_HID_03f7 = 0x03f7,
BNXT_ULP_ACT_HID_05f9 = 0x05f9,
BNXT_ULP_ACT_HID_02e0 = 0x02e0,
- BNXT_ULP_ACT_HID_06e4 = 0x06e4,
+ BNXT_ULP_ACT_HID_08e6 = 0x08e6,
+ BNXT_ULP_ACT_HID_07cf = 0x07cf,
BNXT_ULP_ACT_HID_03fb = 0x03fb,
BNXT_ULP_ACT_HID_05fd = 0x05fd,
BNXT_ULP_ACT_HID_02e4 = 0x02e4,
- BNXT_ULP_ACT_HID_06e8 = 0x06e8,
+ BNXT_ULP_ACT_HID_08ea = 0x08ea,
+ BNXT_ULP_ACT_HID_07d3 = 0x07d3,
BNXT_ULP_ACT_HID_040d = 0x040d,
BNXT_ULP_ACT_HID_040f = 0x040f,
BNXT_ULP_ACT_HID_0413 = 0x0413,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c
index 4388a0a..f2e2a59 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c
@@ -259,8 +259,8 @@ struct bnxt_ulp_rte_act_info ulp_act_info[] = {
.proto_act_func = NULL
},
[RTE_FLOW_ACTION_TYPE_DEC_TTL] = {
- .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
- .proto_act_func = ulp_rte_dec_ttl_act_handler
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_dec_ttl_act_handler
},
[RTE_FLOW_ACTION_TYPE_SET_TTL] = {
.act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 08/20] net/bnxt: configure parif for the egress rules
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (6 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 07/20] net/bnxt: nat template changes Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 09/20] net/bnxt: ignore VLAN priority mask Somnath Kotur
` (12 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
The parif for the egress rules need to be dynamically
configured based on the port type.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_def_rules.c | 11 ++---
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 35 ++++++++++++++++
drivers/net/bnxt/tf_ulp/ulp_port_db.c | 2 +
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 54 ++++++++++++++++++++-----
drivers/net/bnxt/tf_ulp/ulp_template_db_act.c | 16 ++++++--
drivers/net/bnxt/tf_ulp/ulp_template_db_class.c | 25 +++++++++---
drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h | 14 ++++---
7 files changed, 123 insertions(+), 34 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
index d86e4c9..ddc6da8 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
@@ -81,17 +81,12 @@ ulp_set_parif_in_comp_fld(struct bnxt_ulp_context *ulp_ctx,
if (rc)
return rc;
- if (parif_type == BNXT_ULP_PHY_PORT_PARIF) {
+ if (parif_type == BNXT_ULP_PHY_PORT_PARIF)
idx = BNXT_ULP_CF_IDX_PHY_PORT_PARIF;
- /* Parif needs to be reset to a free partition */
- parif += BNXT_ULP_FREE_PARIF_BASE;
- } else if (parif_type == BNXT_ULP_DRV_FUNC_PARIF) {
+ else if (parif_type == BNXT_ULP_DRV_FUNC_PARIF)
idx = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF;
- /* Parif needs to be reset to a free partition */
- parif += BNXT_ULP_FREE_PARIF_BASE;
- } else {
+ else
idx = BNXT_ULP_CF_IDX_VF_FUNC_PARIF;
- }
ULP_COMP_FLD_IDX_WR(mapper_params, idx, parif);
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 2d3373d..a071c07 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -998,6 +998,41 @@ ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms,
return -EINVAL;
}
break;
+ case BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF:
+ if (!ulp_operand_read(fld->result_operand,
+ (uint8_t *)&idx,
+ sizeof(uint16_t))) {
+ BNXT_TF_DBG(ERR, "%s key operand read failed.\n", name);
+ return -EINVAL;
+ }
+ idx = tfp_be_to_cpu_16(idx);
+ if (idx >= BNXT_ULP_CF_IDX_LAST) {
+ BNXT_TF_DBG(ERR, "%s invalid index %u\n", name, idx);
+ return -EINVAL;
+ }
+ /* check if the computed field is set */
+ if (ULP_COMP_FLD_IDX_RD(parms, idx))
+ val = fld->result_operand_true;
+ else
+ val = fld->result_operand_false;
+
+ /* read the appropriate computed field */
+ if (!ulp_operand_read(val, (uint8_t *)&idx, sizeof(uint16_t))) {
+ BNXT_TF_DBG(ERR, "%s val operand read failed\n", name);
+ return -EINVAL;
+ }
+ idx = tfp_be_to_cpu_16(idx);
+ if (idx >= BNXT_ULP_CF_IDX_LAST) {
+ BNXT_TF_DBG(ERR, "%s invalid index %u\n", name, idx);
+ return -EINVAL;
+ }
+ val = ulp_blob_push_32(blob, &parms->comp_fld[idx],
+ fld->field_bit_size);
+ if (!val) {
+ BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name);
+ return -EINVAL;
+ }
+ break;
default:
BNXT_TF_DBG(ERR, "invalid result mapper opcode 0x%x\n",
fld->result_opcode);
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.c b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
index 0fc7c0a..3087647 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
@@ -372,6 +372,8 @@ ulp_port_db_parif_get(struct bnxt_ulp_context *ulp_ctxt,
phy_port_id = port_db->ulp_func_id_tbl[func_id].phy_port_id;
*parif = port_db->phy_port_list[phy_port_id].port_parif;
}
+ /* Parif needs to be reset to a free partition */
+ *parif += BNXT_ULP_FREE_PARIF_BASE;
return 0;
}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 39f801b..67f9319 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -167,31 +167,63 @@ bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
{
uint32_t ifindex;
uint16_t port_id, parif;
+ uint32_t mtype;
enum bnxt_ulp_direction_type dir;
/* get the direction details */
dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
+ /* read the port id details */
+ port_id = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_INCOMING_IF);
+ if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
+ port_id,
+ &ifindex)) {
+ BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
+ return;
+ }
+
if (dir == BNXT_ULP_DIR_INGRESS) {
- /* read the port id details */
- port_id = ULP_COMP_FLD_IDX_RD(params,
- BNXT_ULP_CF_IDX_INCOMING_IF);
- if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
- port_id,
- &ifindex)) {
- BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
- return;
- }
/* Set port PARIF */
if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
BNXT_ULP_PHY_PORT_PARIF, &parif)) {
BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
return;
}
- /* Parif needs to be reset to a free partition */
- parif += BNXT_ULP_FREE_PARIF_BASE;
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
parif);
+ } else {
+ /* Get the match port type */
+ mtype = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
+ if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
+ 1);
+ /* Set VF func PARIF */
+ if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_VF_FUNC_PARIF,
+ &parif)) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
+ parif);
+ } else {
+ /* Set DRV func PARIF */
+ if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_DRV_FUNC_PARIF,
+ &parif)) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
+ parif);
+ }
}
}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
index 31fe905..58b581c 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
@@ -1808,11 +1808,19 @@ struct bnxt_ulp_mapper_result_field_info ulp_act_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_ACT_BIT_THEN_CONST_ELSE_CONST,
.result_operand = {
- BNXT_ULP_SYM_DECAP_FUNC_THRU_TUN,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 56) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 48) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 40) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 32) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 24) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 16) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 8) & 0xff,
+ (uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 12,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
index 9de45cd..330c5ec 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
@@ -5058,7 +5058,9 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 2,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
.spec_operand = {0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
@@ -5149,7 +5151,9 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 2,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
.spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
@@ -17054,11 +17058,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index c9fe1bc..f08065b 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -127,11 +127,12 @@ enum bnxt_ulp_cf_idx {
BNXT_ULP_CF_IDX_ACT_PORT_IS_SET = 35,
BNXT_ULP_CF_IDX_ACT_PORT_TYPE = 36,
BNXT_ULP_CF_IDX_MATCH_PORT_TYPE = 37,
- BNXT_ULP_CF_IDX_VF_TO_VF = 38,
- BNXT_ULP_CF_IDX_L3_HDR_CNT = 39,
- BNXT_ULP_CF_IDX_L4_HDR_CNT = 40,
- BNXT_ULP_CF_IDX_VFR_MODE = 41,
- BNXT_ULP_CF_IDX_LAST = 42
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP = 38,
+ BNXT_ULP_CF_IDX_VF_TO_VF = 39,
+ BNXT_ULP_CF_IDX_L3_HDR_CNT = 40,
+ BNXT_ULP_CF_IDX_L4_HDR_CNT = 41,
+ BNXT_ULP_CF_IDX_VFR_MODE = 42,
+ BNXT_ULP_CF_IDX_LAST = 43
};
enum bnxt_ulp_cond_opcode {
@@ -215,7 +216,8 @@ enum bnxt_ulp_mapper_opc {
BNXT_ULP_MAPPER_OPC_SET_TO_ENCAP_ACT_PROP_SZ = 8,
BNXT_ULP_MAPPER_OPC_IF_ACT_BIT_THEN_ACT_PROP_ELSE_CONST = 9,
BNXT_ULP_MAPPER_OPC_IF_ACT_BIT_THEN_CONST_ELSE_CONST = 10,
- BNXT_ULP_MAPPER_OPC_LAST = 11
+ BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF = 11,
+ BNXT_ULP_MAPPER_OPC_LAST = 12
};
enum bnxt_ulp_mark_db_opcode {
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 09/20] net/bnxt: ignore VLAN priority mask
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (7 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 08/20] net/bnxt: configure parif for the egress rules Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 10/20] net/bnxt: add egress template with VLAN tag match Somnath Kotur
` (11 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
This is a work around for the OVS setting offload rules that
are passing vlan priority mask as wild card and currently we
do not support it.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 67f9319..a924769 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -709,8 +709,17 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
vlan_tag |= ~ULP_VLAN_TAG_MASK;
vlan_tag = htons(vlan_tag);
+#ifdef ULP_DONT_IGNORE_TOS
ulp_rte_prsr_mask_copy(params, &idx, &priority,
sizeof(priority));
+#else
+ /*
+ * The priortiy field is ignored since OVS is seting it as
+ * wild card match and it is not supported. This is a work
+ * around and shall be addressed in the future.
+ */
+ idx += 1;
+#endif
ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
sizeof(vlan_tag));
ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 10/20] net/bnxt: add egress template with VLAN tag match
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (8 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 09/20] net/bnxt: ignore VLAN priority mask Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 11/20] net/bnxt: modify tf shadow tcam to use common tf hash Somnath Kotur
` (10 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Added egress template with VLAN tag match
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Shahaji Bhosle <shahaji.bhosle@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_template_db_class.c | 501 +++++++++++++++++++++++-
drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h | 28 +-
2 files changed, 509 insertions(+), 20 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
index 330c5ec..41d1d87 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
@@ -162,7 +162,31 @@ uint16_t ulp_class_sig_tbl[BNXT_ULP_CLASS_SIG_TBL_MAX_SZ] = {
[BNXT_ULP_CLASS_HID_01d1] = 151,
[BNXT_ULP_CLASS_HID_0319] = 152,
[BNXT_ULP_CLASS_HID_01cd] = 153,
- [BNXT_ULP_CLASS_HID_0305] = 154
+ [BNXT_ULP_CLASS_HID_0305] = 154,
+ [BNXT_ULP_CLASS_HID_01e2] = 155,
+ [BNXT_ULP_CLASS_HID_032a] = 156,
+ [BNXT_ULP_CLASS_HID_0650] = 157,
+ [BNXT_ULP_CLASS_HID_0198] = 158,
+ [BNXT_ULP_CLASS_HID_01de] = 159,
+ [BNXT_ULP_CLASS_HID_0316] = 160,
+ [BNXT_ULP_CLASS_HID_066c] = 161,
+ [BNXT_ULP_CLASS_HID_01a4] = 162,
+ [BNXT_ULP_CLASS_HID_01c2] = 163,
+ [BNXT_ULP_CLASS_HID_030a] = 164,
+ [BNXT_ULP_CLASS_HID_0670] = 165,
+ [BNXT_ULP_CLASS_HID_01b8] = 166,
+ [BNXT_ULP_CLASS_HID_003e] = 167,
+ [BNXT_ULP_CLASS_HID_02f6] = 168,
+ [BNXT_ULP_CLASS_HID_078c] = 169,
+ [BNXT_ULP_CLASS_HID_0044] = 170,
+ [BNXT_ULP_CLASS_HID_01d2] = 171,
+ [BNXT_ULP_CLASS_HID_031a] = 172,
+ [BNXT_ULP_CLASS_HID_0660] = 173,
+ [BNXT_ULP_CLASS_HID_01a8] = 174,
+ [BNXT_ULP_CLASS_HID_01ce] = 175,
+ [BNXT_ULP_CLASS_HID_0306] = 176,
+ [BNXT_ULP_CLASS_HID_067c] = 177,
+ [BNXT_ULP_CLASS_HID_01b4] = 178
};
struct bnxt_ulp_class_match_info ulp_class_match_list[] = {
@@ -2833,6 +2857,382 @@ struct bnxt_ulp_class_match_info ulp_class_match_list[] = {
BNXT_ULP_MATCH_TYPE_BITMASK_EM },
.class_tid = 21,
.wc_pri = 11
+ },
+ [155] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01e2,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 12
+ },
+ [156] = {
+ .class_hid = BNXT_ULP_CLASS_HID_032a,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 13
+ },
+ [157] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0650,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 14
+ },
+ [158] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0198,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 15
+ },
+ [159] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01de,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 16
+ },
+ [160] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0316,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 17
+ },
+ [161] = {
+ .class_hid = BNXT_ULP_CLASS_HID_066c,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 18
+ },
+ [162] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01a4,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 19
+ },
+ [163] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01c2,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 20
+ },
+ [164] = {
+ .class_hid = BNXT_ULP_CLASS_HID_030a,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 21
+ },
+ [165] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0670,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 22
+ },
+ [166] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01b8,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 23
+ },
+ [167] = {
+ .class_hid = BNXT_ULP_CLASS_HID_003e,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 24
+ },
+ [168] = {
+ .class_hid = BNXT_ULP_CLASS_HID_02f6,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 25
+ },
+ [169] = {
+ .class_hid = BNXT_ULP_CLASS_HID_078c,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 26
+ },
+ [170] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0044,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 27
+ },
+ [171] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01d2,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 28
+ },
+ [172] = {
+ .class_hid = BNXT_ULP_CLASS_HID_031a,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 29
+ },
+ [173] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0660,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 30
+ },
+ [174] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01a8,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 31
+ },
+ [175] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01ce,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 32
+ },
+ [176] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0306,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 33
+ },
+ [177] = {
+ .class_hid = BNXT_ULP_CLASS_HID_067c,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 34
+ },
+ [178] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01b4,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 35
}
};
@@ -3236,7 +3636,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE,
- .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW,
+ .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
.srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
@@ -3255,7 +3655,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE,
- .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW,
+ .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
.srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
@@ -3346,7 +3746,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE,
- .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW,
+ .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
.srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
@@ -12534,8 +12934,18 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 12,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
- .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_HDR_FIELD,
+ .mask_operand = {
+ (BNXT_ULP_HF21_IDX_OO_VLAN_VID >> 8) & 0xff,
+ BNXT_ULP_HF21_IDX_OO_VLAN_VID & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_HDR_FIELD,
+ .spec_operand = {
+ (BNXT_ULP_HF21_IDX_OO_VLAN_VID >> 8) & 0xff,
+ BNXT_ULP_HF21_IDX_OO_VLAN_VID & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 12,
@@ -12594,8 +13004,15 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 2,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
- .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_COMP_FIELD,
+ .spec_operand = {
+ (BNXT_ULP_CF_IDX_O_VTAG_NUM >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_O_VTAG_NUM & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 2,
@@ -16307,11 +16724,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
@@ -16498,11 +16926,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
@@ -16689,7 +17128,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
+ .result_operand = {
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
@@ -16876,11 +17330,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index f08065b..ac651f6 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -11,7 +11,7 @@
#define BNXT_ULP_LOG2_MAX_NUM_DEV 2
#define BNXT_ULP_CACHE_TBL_MAX_SZ 4
#define BNXT_ULP_CLASS_SIG_TBL_MAX_SZ 2048
-#define BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ 155
+#define BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ 179
#define BNXT_ULP_CLASS_HID_LOW_PRIME 7919
#define BNXT_ULP_CLASS_HID_HIGH_PRIME 7907
#define BNXT_ULP_CLASS_HID_SHFTR 32
@@ -781,7 +781,31 @@ enum bnxt_ulp_class_hid {
BNXT_ULP_CLASS_HID_01d1 = 0x01d1,
BNXT_ULP_CLASS_HID_0319 = 0x0319,
BNXT_ULP_CLASS_HID_01cd = 0x01cd,
- BNXT_ULP_CLASS_HID_0305 = 0x0305
+ BNXT_ULP_CLASS_HID_0305 = 0x0305,
+ BNXT_ULP_CLASS_HID_01e2 = 0x01e2,
+ BNXT_ULP_CLASS_HID_032a = 0x032a,
+ BNXT_ULP_CLASS_HID_0650 = 0x0650,
+ BNXT_ULP_CLASS_HID_0198 = 0x0198,
+ BNXT_ULP_CLASS_HID_01de = 0x01de,
+ BNXT_ULP_CLASS_HID_0316 = 0x0316,
+ BNXT_ULP_CLASS_HID_066c = 0x066c,
+ BNXT_ULP_CLASS_HID_01a4 = 0x01a4,
+ BNXT_ULP_CLASS_HID_01c2 = 0x01c2,
+ BNXT_ULP_CLASS_HID_030a = 0x030a,
+ BNXT_ULP_CLASS_HID_0670 = 0x0670,
+ BNXT_ULP_CLASS_HID_01b8 = 0x01b8,
+ BNXT_ULP_CLASS_HID_003e = 0x003e,
+ BNXT_ULP_CLASS_HID_02f6 = 0x02f6,
+ BNXT_ULP_CLASS_HID_078c = 0x078c,
+ BNXT_ULP_CLASS_HID_0044 = 0x0044,
+ BNXT_ULP_CLASS_HID_01d2 = 0x01d2,
+ BNXT_ULP_CLASS_HID_031a = 0x031a,
+ BNXT_ULP_CLASS_HID_0660 = 0x0660,
+ BNXT_ULP_CLASS_HID_01a8 = 0x01a8,
+ BNXT_ULP_CLASS_HID_01ce = 0x01ce,
+ BNXT_ULP_CLASS_HID_0306 = 0x0306,
+ BNXT_ULP_CLASS_HID_067c = 0x067c,
+ BNXT_ULP_CLASS_HID_01b4 = 0x01b4
};
enum bnxt_ulp_act_hid {
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 11/20] net/bnxt: modify tf shadow tcam to use common tf hash
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (9 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 10/20] net/bnxt: add egress template with VLAN tag match Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 12/20] net/bnxt: added shadow table capability with search Somnath Kotur
` (9 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
Removed the hash calculation from tf_shadow_tcam in favor of using a new
common implementation.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Farah Smith <farah.smith@broadcom.com>
Reviewed-by: Ajit Kumar Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 77 +------------------------------
1 file changed, 2 insertions(+), 75 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index 51aae4f..beaea03 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -7,6 +7,7 @@
#include "tf_util.h"
#include "tfp.h"
#include "tf_shadow_tcam.h"
+#include "tf_hash.h"
/**
* The implementation includes 3 tables per tcam table type.
@@ -164,74 +165,6 @@ struct tf_shadow_tcam_db {
struct tf_shadow_tcam_ctxt *ctxt[TF_TCAM_TBL_TYPE_MAX];
};
-/* CRC polynomial 0xedb88320 */
-static const uint32_t tf_shadow_tcam_crc32tbl[] = {
- 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
- 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
- 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
- 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
- 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
- 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
- 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
- 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
- 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
- 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
- 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
- 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
- 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
- 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
- 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
- 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
- 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
- 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
- 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
- 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
- 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
- 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
- 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
- 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
- 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
- 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
- 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
- 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
- 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
- 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
- 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
- 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
- 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
- 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
- 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
- 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
- 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
- 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
- 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
- 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
- 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
- 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
- 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
- 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
- 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
- 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
- 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
- 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
- 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
- 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
- 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
- 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
- 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
- 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
- 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
- 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
- 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
- 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
- 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
- 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
- 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
- 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
- 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
- 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
-};
-
/**
* Returns the number of entries in the contexts shadow table.
*/
@@ -289,13 +222,7 @@ tf_shadow_tcam_res_cpy(struct tf_shadow_tcam_ctxt *ctxt,
static uint32_t
tf_shadow_tcam_crc32_calc(uint8_t *key, uint32_t len)
{
- uint32_t crc = ~0U;
-
- while (len--)
- crc = tf_shadow_tcam_crc32tbl[(crc ^ key[len]) & 0xff] ^
- (crc >> 8);
-
- return ~crc;
+ return tf_hash_calc_crc32(key, len);
}
/**
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 12/20] net/bnxt: added shadow table capability with search
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (10 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 11/20] net/bnxt: modify tf shadow tcam to use common tf hash Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 13/20] net/bnxt: ulp mapper changes to use tbl search Somnath Kotur
` (8 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
- Added Index Table shadow tables for searching
- Added Search API to allow reuse of Table entries
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Farah Smith <farah.smith@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_core.c | 66 ++-
drivers/net/bnxt/tf_core/tf_core.h | 79 ++-
drivers/net/bnxt/tf_core/tf_device_p4.c | 2 +-
drivers/net/bnxt/tf_core/tf_shadow_tbl.c | 768 +++++++++++++++++++++++++++++-
drivers/net/bnxt/tf_core/tf_shadow_tbl.h | 124 ++---
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 6 +
drivers/net/bnxt/tf_core/tf_tbl.c | 246 +++++++++-
drivers/net/bnxt/tf_core/tf_tbl.h | 22 +-
drivers/net/bnxt/tf_core/tf_tcam.h | 2 +-
9 files changed, 1211 insertions(+), 104 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index ca3280b..0dbde1d 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -75,7 +75,6 @@ tf_open_session(struct tf *tfp,
/* Session vs session client is decided in
* tf_session_open_session()
*/
- printf("TF_OPEN, %s\n", parms->ctrl_chan_name);
rc = tf_session_open_session(tfp, &oparms);
/* Logging handled by tf_session_open_session */
if (rc)
@@ -954,6 +953,71 @@ tf_alloc_tbl_entry(struct tf *tfp,
}
int
+tf_search_tbl_entry(struct tf *tfp,
+ struct tf_search_tbl_entry_parms *parms)
+{
+ int rc;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_tbl_alloc_search_parms sparms;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ if (dev->ops->tf_dev_alloc_search_tbl == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ memset(&sparms, 0, sizeof(struct tf_tbl_alloc_search_parms));
+ sparms.dir = parms->dir;
+ sparms.type = parms->type;
+ sparms.result = parms->result;
+ sparms.result_sz_in_bytes = parms->result_sz_in_bytes;
+ sparms.alloc = parms->alloc;
+ sparms.tbl_scope_id = parms->tbl_scope_id;
+ rc = dev->ops->tf_dev_alloc_search_tbl(tfp, &sparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: TBL allocation failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Return the outputs from the search */
+ parms->hit = sparms.hit;
+ parms->search_status = sparms.search_status;
+ parms->ref_cnt = sparms.ref_cnt;
+ parms->idx = sparms.idx;
+
+ return 0;
+}
+
+int
tf_free_tbl_entry(struct tf *tfp,
struct tf_free_tbl_entry_parms *parms)
{
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index 349a1f1..db10935 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -291,9 +291,9 @@ enum tf_tcam_tbl_type {
};
/**
- * TCAM SEARCH STATUS
+ * SEARCH STATUS
*/
-enum tf_tcam_search_status {
+enum tf_search_status {
/** The entry was not found, but an idx was allocated if requested. */
MISS,
/** The entry was found, and the result/idx are valid */
@@ -1011,7 +1011,7 @@ struct tf_search_tcam_entry_parms {
/**
* [out] Search result status (hit, miss, reject)
*/
- enum tf_tcam_search_status search_status;
+ enum tf_search_status search_status;
/**
* [out] Current refcnt after allocation
*/
@@ -1288,6 +1288,79 @@ int tf_free_tcam_entry(struct tf *tfp,
/**
* tf_alloc_tbl_entry parameter definition
*/
+struct tf_search_tbl_entry_parms {
+ /**
+ * [in] Receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] Type of the allocation
+ */
+ enum tf_tbl_type type;
+ /**
+ * [in] Table scope identifier (ignored unless TF_TBL_TYPE_EXT)
+ */
+ uint32_t tbl_scope_id;
+ /**
+ * [in] Result data to search for
+ */
+ uint8_t *result;
+ /**
+ * [in] Result data size in bytes
+ */
+ uint16_t result_sz_in_bytes;
+ /**
+ * [in] Allocate on miss.
+ */
+ uint8_t alloc;
+ /**
+ * [out] Set if matching entry found
+ */
+ uint8_t hit;
+ /**
+ * [out] Search result status (hit, miss, reject)
+ */
+ enum tf_search_status search_status;
+ /**
+ * [out] Current ref count after allocation
+ */
+ uint16_t ref_cnt;
+ /**
+ * [out] Idx of allocated entry or found entry
+ */
+ uint32_t idx;
+};
+
+/**
+ * search Table Entry (experimental)
+ *
+ * This function searches the shadow copy of an index table for a matching
+ * entry. The result data must match for hit to be set. Only TruFlow core
+ * data is accessed. If shadow_copy is not enabled, an error is returned.
+ *
+ * Implementation:
+ *
+ * A hash is performed on the result data and mappe3d to a shadow copy entry
+ * where the result is populated. If the result matches the entry, hit is set,
+ * ref_cnt is incremented (if alloc), and the search status indicates what
+ * action the caller can take regarding setting the entry.
+ *
+ * search status should be used as follows:
+ * - On MISS, the caller should set the result into the returned index.
+ *
+ * - On REJECT, the caller should reject the flow since there are no resources.
+ *
+ * - On Hit, the matching index is returned to the caller. Additionally, the
+ * ref_cnt is updated.
+ *
+ * Also returns success or failure code.
+ */
+int tf_search_tbl_entry(struct tf *tfp,
+ struct tf_search_tbl_entry_parms *parms);
+
+/**
+ * tf_alloc_tbl_entry parameter definition
+ */
struct tf_alloc_tbl_entry_parms {
/**
* [in] Receive or transmit direction
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c
index afb6098..fe8dec3 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c
@@ -126,7 +126,7 @@ const struct tf_dev_ops tf_dev_ops_p4 = {
.tf_dev_alloc_ext_tbl = tf_tbl_ext_alloc,
.tf_dev_free_tbl = tf_tbl_free,
.tf_dev_free_ext_tbl = tf_tbl_ext_free,
- .tf_dev_alloc_search_tbl = NULL,
+ .tf_dev_alloc_search_tbl = tf_tbl_alloc_search,
.tf_dev_set_tbl = tf_tbl_set,
.tf_dev_set_ext_tbl = tf_tbl_ext_common_set,
.tf_dev_get_tbl = tf_tbl_get,
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
index 8f2b6de..019a26e 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
@@ -3,61 +3,785 @@
* All rights reserved.
*/
-#include <rte_common.h>
-
+#include "tf_common.h"
+#include "tf_util.h"
+#include "tfp.h"
+#include "tf_core.h"
#include "tf_shadow_tbl.h"
+#include "tf_hash.h"
/**
- * Shadow table DB element
+ * The implementation includes 3 tables per table table type.
+ * - hash table
+ * - sized so that a minimum of 4 slots per shadow entry are available to
+ * minimize the likelihood of collisions.
+ * - shadow key table
+ * - sized to the number of entries requested and is directly indexed
+ * - the index is zero based and is the table index - the base address
+ * - the data associated with the entry is stored in the key table.
+ * - The stored key is actually the data associated with the entry.
+ * - shadow result table
+ * - the result table is stored separately since it only needs to be accessed
+ * when the key matches.
+ * - the result has a back pointer to the hash table via the hb handle. The
+ * hb handle is a 32 bit represention of the hash with a valid bit, bucket
+ * element index, and the hash index. It is necessary to store the hb handle
+ * with the result since subsequent removes only provide the table index.
+ *
+ * - Max entries is limited in the current implementation since bit 15 is the
+ * valid bit in the hash table.
+ * - A 16bit hash is calculated and masked based on the number of entries
+ * - 64b wide bucket is used and broken into 4x16bit elements.
+ * This decision is based on quicker bucket scanning to determine if any
+ * elements are in use.
+ * - bit 15 of each bucket element is the valid, this is done to prevent having
+ * to read the larger key/result data for determining VALID. It also aids
+ * in the more efficient scanning of the bucket for slot usage.
*/
-struct tf_shadow_tbl_element {
- /**
- * Hash table
- */
- void *hash;
- /**
- * Reference count, array of number of table type entries
- */
- uint16_t *ref_count;
+/*
+ * The maximum number of shadow entries supported. The value also doubles as
+ * the maximum number of hash buckets. There are only 15 bits of data per
+ * bucket to point to the shadow tables.
+ */
+#define TF_SHADOW_ENTRIES_MAX (1 << 15)
+
+/* The number of elements(BE) per hash bucket (HB) */
+#define TF_SHADOW_HB_NUM_ELEM (4)
+#define TF_SHADOW_BE_VALID (1 << 15)
+#define TF_SHADOW_BE_IS_VALID(be) (((be) & TF_SHADOW_BE_VALID) != 0)
+
+/**
+ * The hash bucket handle is 32b
+ * - bit 31, the Valid bit
+ * - bit 29-30, the element
+ * - bits 0-15, the hash idx (is masked based on the allocated size)
+ */
+#define TF_SHADOW_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
+#define TF_SHADOW_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
+ ((be) << 29) | (idx))
+
+#define TF_SHADOW_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
+ (TF_SHADOW_HB_NUM_ELEM - 1))
+
+#define TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
+ (ctxt)->hash_ctxt.hid_mask)
+
+/**
+ * The idx provided by the caller is within a region, so currently the base is
+ * either added or subtracted from the idx to ensure it can be used as a
+ * compressed index
+ */
+
+/* Convert the table index to a shadow index */
+#define TF_SHADOW_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Convert the shadow index to a tbl index */
+#define TF_SHADOW_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Simple helper masks for clearing en element from the bucket */
+#define TF_SHADOW_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
+#define TF_SHADOW_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
+#define TF_SHADOW_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
+#define TF_SHADOW_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
+
+/**
+ * This should be coming from external, but for now it is assumed that no key
+ * is greater than 512 bits (64B). This makes allocation of the key table
+ * easier without having to allocate on the fly.
+ */
+#define TF_SHADOW_MAX_KEY_SZ 64
+
+/*
+ * Local only defines for the internal data.
+ */
+
+/**
+ * tf_shadow_tbl_shadow_key_entry is the key entry of the key table.
+ * The key stored in the table is the result data of the index table.
+ */
+struct tf_shadow_tbl_shadow_key_entry {
+ uint8_t key[TF_SHADOW_MAX_KEY_SZ];
+};
+
+/**
+ * tf_shadow_tbl_shadow_result_entry is the result table entry.
+ * The result table writes are broken into two phases:
+ * - The search phase, which stores the hb_handle and key size and
+ * - The set phase, which writes the refcnt
+ */
+struct tf_shadow_tbl_shadow_result_entry {
+ uint16_t key_size;
+ uint32_t refcnt;
+ uint32_t hb_handle;
+};
+
+/**
+ * tf_shadow_tbl_shadow_ctxt holds all information for accessing the key and
+ * result tables.
+ */
+struct tf_shadow_tbl_shadow_ctxt {
+ struct tf_shadow_tbl_shadow_key_entry *sh_key_tbl;
+ struct tf_shadow_tbl_shadow_result_entry *sh_res_tbl;
+ uint32_t base_addr;
+ uint16_t num_entries;
+ uint16_t alloc_idx;
+};
+
+/**
+ * tf_shadow_tbl_hash_ctxt holds all information related to accessing the hash
+ * table.
+ */
+struct tf_shadow_tbl_hash_ctxt {
+ uint64_t *hashtbl;
+ uint16_t hid_mask;
+ uint16_t hash_entries;
};
/**
- * Shadow table DB definition
+ * tf_shadow_tbl_ctxt holds the hash and shadow tables for the current shadow
+ * table db. This structure is per table table type as each table table has
+ * it's own shadow and hash table.
+ */
+struct tf_shadow_tbl_ctxt {
+ struct tf_shadow_tbl_shadow_ctxt shadow_ctxt;
+ struct tf_shadow_tbl_hash_ctxt hash_ctxt;
+};
+
+/**
+ * tf_shadow_tbl_db is the allocated db structure returned as an opaque
+ * void * pointer to the caller during create db. It holds the pointers for
+ * each table associated with the db.
*/
struct tf_shadow_tbl_db {
- /**
- * The DB consists of an array of elements
- */
- struct tf_shadow_tbl_element *db;
+ /* Each context holds the shadow and hash table information */
+ struct tf_shadow_tbl_ctxt *ctxt[TF_TBL_TYPE_MAX];
};
+/**
+ * Simple routine that decides what table types can be searchable.
+ *
+ */
+static int tf_shadow_tbl_is_searchable(enum tf_tbl_type type)
+{
+ int rc = 0;
+
+ switch (type) {
+ case TF_TBL_TYPE_ACT_ENCAP_8B:
+ case TF_TBL_TYPE_ACT_ENCAP_16B:
+ case TF_TBL_TYPE_ACT_ENCAP_32B:
+ case TF_TBL_TYPE_ACT_ENCAP_64B:
+ case TF_TBL_TYPE_ACT_SP_SMAC:
+ case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
+ case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
+ case TF_TBL_TYPE_ACT_MODIFY_IPV4:
+ case TF_TBL_TYPE_ACT_MODIFY_SPORT:
+ case TF_TBL_TYPE_ACT_MODIFY_DPORT:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ };
+
+ return rc;
+}
+
+/**
+ * Returns the number of entries in the contexts shadow table.
+ */
+static inline uint16_t
+tf_shadow_tbl_sh_num_entries_get(struct tf_shadow_tbl_ctxt *ctxt)
+{
+ return ctxt->shadow_ctxt.num_entries;
+}
+
+/**
+ * Compare the give key with the key in the shadow table.
+ *
+ * Returns 0 if the keys match
+ */
+static int
+tf_shadow_tbl_key_cmp(struct tf_shadow_tbl_ctxt *ctxt,
+ uint8_t *key,
+ uint16_t sh_idx,
+ uint16_t size)
+{
+ if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
+ sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt) || !key)
+ return -1;
+
+ return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
+}
+
+/**
+ * Free the memory associated with the context.
+ */
+static void
+tf_shadow_tbl_ctxt_delete(struct tf_shadow_tbl_ctxt *ctxt)
+{
+ if (!ctxt)
+ return;
+
+ tfp_free(ctxt->hash_ctxt.hashtbl);
+ tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
+ tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
+}
+
+/**
+ * The TF Shadow TBL context is per TBL and holds all information relating to
+ * managing the shadow and search capability. This routine allocated data that
+ * needs to be deallocated by the tf_shadow_tbl_ctxt_delete prior when deleting
+ * the shadow db.
+ */
+static int
+tf_shadow_tbl_ctxt_create(struct tf_shadow_tbl_ctxt *ctxt,
+ uint16_t num_entries,
+ uint16_t base_addr)
+{
+ struct tfp_calloc_parms cparms;
+ uint16_t hash_size = 1;
+ uint16_t hash_mask;
+ int rc;
+
+ /* Hash table is a power of two that holds the number of entries */
+ if (num_entries > TF_SHADOW_ENTRIES_MAX) {
+ TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
+ num_entries,
+ TF_SHADOW_ENTRIES_MAX);
+ return -ENOMEM;
+ }
+
+ while (hash_size < num_entries)
+ hash_size = hash_size << 1;
+
+ hash_mask = hash_size - 1;
+
+ /* Allocate the hash table */
+ cparms.nitems = hash_size;
+ cparms.size = sizeof(uint64_t);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->hash_ctxt.hashtbl = cparms.mem_va;
+ ctxt->hash_ctxt.hid_mask = hash_mask;
+ ctxt->hash_ctxt.hash_entries = hash_size;
+
+ /* allocate the shadow tables */
+ /* allocate the shadow key table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tbl_shadow_key_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
+
+ /* allocate the shadow result table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tbl_shadow_result_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
+
+ ctxt->shadow_ctxt.num_entries = num_entries;
+ ctxt->shadow_ctxt.base_addr = base_addr;
+
+ return 0;
+error:
+ tf_shadow_tbl_ctxt_delete(ctxt);
+
+ return -ENOMEM;
+}
+
+/**
+ * Get a shadow table context given the db and the table type
+ */
+static struct tf_shadow_tbl_ctxt *
+tf_shadow_tbl_ctxt_get(struct tf_shadow_tbl_db *shadow_db,
+ enum tf_tbl_type type)
+{
+ if (type >= TF_TBL_TYPE_MAX ||
+ !shadow_db ||
+ !shadow_db->ctxt[type])
+ return NULL;
+
+ return shadow_db->ctxt[type];
+}
+
+/**
+ * Sets the hash entry into the table given the table context, hash bucket
+ * handle, and shadow index.
+ */
+static inline int
+tf_shadow_tbl_set_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint32_t hb_handle,
+ uint16_t sh_idx)
+{
+ uint16_t hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ uint16_t be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
+ uint64_t entry = sh_idx | TF_SHADOW_BE_VALID;
+
+ if (hid >= ctxt->hash_ctxt.hash_entries)
+ return -EINVAL;
+
+ ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
+ return 0;
+}
+
+/**
+ * Clears the hash entry given the TBL context and hash bucket handle.
+ */
+static inline void
+tf_shadow_tbl_clear_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint32_t hb_handle)
+{
+ uint16_t hid, be;
+ uint64_t *bucket;
+
+ if (!TF_SHADOW_HB_HANDLE_IS_VALID(hb_handle))
+ return;
+
+ hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
+ bucket = &ctxt->hash_ctxt.hashtbl[hid];
+
+ switch (be) {
+ case 0:
+ *bucket = TF_SHADOW_BE0_MASK_CLEAR(*bucket);
+ break;
+ case 1:
+ *bucket = TF_SHADOW_BE1_MASK_CLEAR(*bucket);
+ break;
+ case 2:
+ *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
+ break;
+ case 3:
+ *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
+ break;
+ default:
+ /*
+ * Since the BE_GET masks non-inclusive bits, this will not
+ * happen.
+ */
+ break;
+ }
+}
+
+/**
+ * Clears the shadow key and result entries given the table context and
+ * shadow index.
+ */
+static void
+tf_shadow_tbl_clear_sh_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint16_t sh_idx)
+{
+ struct tf_shadow_tbl_shadow_key_entry *sk_entry;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt))
+ return;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
+
+ /*
+ * memset key/result to zero for now, possibly leave the data alone
+ * in the future and rely on the valid bit in the hash table.
+ */
+ memset(sk_entry, 0, sizeof(struct tf_shadow_tbl_shadow_key_entry));
+ memset(sr_entry, 0, sizeof(struct tf_shadow_tbl_shadow_result_entry));
+}
+
+/**
+ * Binds the allocated tbl index with the hash and shadow tables.
+ * The entry will be incomplete until the set has happened with the result
+ * data.
+ */
int
-tf_shadow_tbl_create_db(struct tf_shadow_tbl_create_db_parms *parms __rte_unused)
+tf_shadow_tbl_bind_index(struct tf_shadow_tbl_bind_index_parms *parms)
{
+ int rc;
+ uint16_t idx, len;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_shadow_tbl_db *shadow_db;
+ struct tf_shadow_tbl_shadow_key_entry *sk_entry;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (!parms || !TF_SHADOW_HB_HANDLE_IS_VALID(parms->hb_handle) ||
+ !parms->data) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, parms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tbl_type_2_str(parms->type));
+ return -EINVAL;
+ }
+
+ idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, parms->idx);
+ len = parms->data_sz_in_bytes;
+ if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt) ||
+ len > TF_SHADOW_MAX_KEY_SZ) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ len,
+ TF_SHADOW_MAX_KEY_SZ, idx);
+
+ return -EINVAL;
+ }
+
+ rc = tf_shadow_tbl_set_hash_entry(ctxt, parms->hb_handle, idx);
+ if (rc)
+ return -EINVAL;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /* For tables, the data is the key */
+ memcpy(sk_entry->key, parms->data, len);
+
+ /* Write the result table */
+ sr_entry->key_size = len;
+ sr_entry->hb_handle = parms->hb_handle;
+ sr_entry->refcnt = 1;
+
return 0;
}
+/**
+ * Deletes hash/shadow information if no more references.
+ *
+ * Returns 0 - The caller should delete the table entry in hardware.
+ * Returns non-zero - The number of references to the entry
+ */
int
-tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms __rte_unused)
+tf_shadow_tbl_remove(struct tf_shadow_tbl_remove_parms *parms)
{
+ uint16_t idx;
+ uint32_t hb_handle;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_shadow_tbl_db *shadow_db;
+ struct tf_tbl_free_parms *fparms;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->fparms) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ fparms = parms->fparms;
+ if (!tf_shadow_tbl_is_searchable(fparms->type))
+ return 0;
+ /*
+ * Initialize the ref count to zero. The default would be to remove
+ * the entry.
+ */
+ fparms->ref_cnt = 0;
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, fparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tbl_type_2_str(fparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, fparms->idx);
+ if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
+ tf_tbl_type_2_str(fparms->type),
+ fparms->idx,
+ tf_shadow_tbl_sh_num_entries_get(ctxt));
+ return 0;
+ }
+
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+ if (sr_entry->refcnt <= 1) {
+ hb_handle = sr_entry->hb_handle;
+ tf_shadow_tbl_clear_hash_entry(ctxt, hb_handle);
+ tf_shadow_tbl_clear_sh_entry(ctxt, idx);
+ } else {
+ sr_entry->refcnt--;
+ fparms->ref_cnt = sr_entry->refcnt;
+ }
+
return 0;
}
int
-tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms __rte_unused)
+tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms)
{
+ uint16_t len;
+ uint64_t bucket;
+ uint32_t i, hid32;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_shadow_tbl_db *shadow_db;
+ uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
+ struct tf_tbl_alloc_search_parms *sparms;
+ uint32_t be_avail = TF_SHADOW_HB_NUM_ELEM;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "tbl search with invalid parms\n");
+ return -EINVAL;
+ }
+
+ sparms = parms->sparms;
+ /* Check that caller was supposed to call search */
+ if (!tf_shadow_tbl_is_searchable(sparms->type))
+ return -EINVAL;
+
+ /* Initialize return values to invalid */
+ sparms->hit = 0;
+ sparms->search_status = REJECT;
+ parms->hb_handle = 0;
+ sparms->ref_cnt = 0;
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(ERR, "%s Unable to get tbl mgr context\n",
+ tf_tbl_type_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ len = sparms->result_sz_in_bytes;
+ if (len > TF_SHADOW_MAX_KEY_SZ || !sparms->result || !len) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tbl_type_2_str(sparms->type),
+ len,
+ sparms->result);
+ return -EINVAL;
+ }
+
+ /*
+ * Calculate the crc32
+ * Fold it to create a 16b value
+ * Reduce it to fit the table
+ */
+ hid32 = tf_hash_calc_crc32(sparms->result, len);
+ hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
+ hid_mask = ctxt->hash_ctxt.hid_mask;
+ hb_idx = hid16 & hid_mask;
+
+ bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
+ if (!bucket) {
+ /* empty bucket means a miss and available entry */
+ sparms->search_status = MISS;
+ parms->hb_handle = TF_SHADOW_HB_HANDLE_CREATE(hb_idx, 0);
+ sparms->idx = 0;
+ return 0;
+ }
+
+ /* Set the avail to max so we can detect when there is an avail entry */
+ be_avail = TF_SHADOW_HB_NUM_ELEM;
+ for (i = 0; i < TF_SHADOW_HB_NUM_ELEM; i++) {
+ shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
+ be_valid = TF_SHADOW_BE_IS_VALID(shtbl_idx);
+ if (!be_valid) {
+ /* The element is avail, keep going */
+ be_avail = i;
+ continue;
+ }
+ /* There is a valid entry, compare it */
+ shtbl_key = shtbl_idx & ~TF_SHADOW_BE_VALID;
+ if (!tf_shadow_tbl_key_cmp(ctxt,
+ sparms->result,
+ shtbl_key,
+ len)) {
+ /*
+ * It matches, increment the ref count if the caller
+ * requested allocation and return the info
+ */
+ if (sparms->alloc)
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt + 1;
+
+ sparms->hit = 1;
+ sparms->search_status = HIT;
+ parms->hb_handle =
+ TF_SHADOW_HB_HANDLE_CREATE(hb_idx, i);
+ sparms->idx = TF_SHADOW_SHIDX_TO_IDX(ctxt, shtbl_key);
+ sparms->ref_cnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
+
+ return 0;
+ }
+ }
+
+ /* No hits, return avail entry if exists */
+ if (be_avail < TF_SHADOW_HB_NUM_ELEM) {
+ /*
+ * There is an available hash entry, so return MISS and the
+ * hash handle for the subsequent bind.
+ */
+ parms->hb_handle = TF_SHADOW_HB_HANDLE_CREATE(hb_idx, be_avail);
+ sparms->search_status = MISS;
+ sparms->hit = 0;
+ sparms->idx = 0;
+ } else {
+ /* No room for the entry in the hash table, must REJECT */
+ sparms->search_status = REJECT;
+ }
+
return 0;
}
int
-tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms __rte_unused)
+tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms)
{
+ uint16_t idx;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_tbl_set_parms *sparms;
+ struct tf_shadow_tbl_db *shadow_db;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "Null parms\n");
+ return -EINVAL;
+ }
+
+ sparms = parms->sparms;
+ if (!sparms->data || !sparms->data_sz_in_bytes) {
+ TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tbl_type_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ /* We aren't tracking this table, so return success */
+ TFP_DRV_LOG(DEBUG, "%s Unable to get tbl mgr context\n",
+ tf_tbl_type_2_str(sparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, sparms->idx);
+ if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tbl_type_2_str(sparms->type),
+ sparms->idx);
+ return -EINVAL;
+ }
+
+ /* Write the result table, the key/hash has been written already */
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /*
+ * If the handle is not valid, the bind was never called. We aren't
+ * tracking this entry.
+ */
+ if (!TF_SHADOW_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
+ return 0;
+
+ sr_entry->refcnt = 1;
+
return 0;
}
int
-tf_shadow_tbl_remove(struct tf_shadow_tbl_remove_parms *parms __rte_unused)
+tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms)
{
+ struct tf_shadow_tbl_db *shadow_db;
+ int i;
+
+ TF_CHECK_PARMS1(parms);
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ if (!shadow_db) {
+ TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tbl_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
return 0;
}
+
+/**
+ * Allocate the table resources for search and allocate
+ *
+ */
+int tf_shadow_tbl_create_db(struct tf_shadow_tbl_create_db_parms *parms)
+{
+ int rc;
+ int i;
+ uint16_t base;
+ struct tfp_calloc_parms cparms;
+ struct tf_shadow_tbl_db *shadow_db = NULL;
+
+ TF_CHECK_PARMS1(parms);
+
+ /* Build the shadow DB per the request */
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tbl_db);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+ shadow_db = (void *)cparms.mem_va;
+
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ /* If the element didn't request an allocation no need
+ * to create a pool nor verify if we got a reservation.
+ */
+ if (!parms->cfg->alloc_cnt[i] ||
+ !tf_shadow_tbl_is_searchable(i)) {
+ shadow_db->ctxt[i] = NULL;
+ continue;
+ }
+
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tbl_ctxt);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+
+ shadow_db->ctxt[i] = cparms.mem_va;
+ base = parms->cfg->base_addr[i];
+ rc = tf_shadow_tbl_ctxt_create(shadow_db->ctxt[i],
+ parms->cfg->alloc_cnt[i],
+ base);
+ if (rc)
+ goto error;
+ }
+
+ *parms->shadow_db = (void *)shadow_db;
+
+ TFP_DRV_LOG(INFO,
+ "TF SHADOW TABLE - initialized\n");
+
+ return 0;
+error:
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tbl_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
+ return -ENOMEM;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h b/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
index dfd336e..e73381f 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
@@ -8,8 +8,6 @@
#include "tf_core.h"
-struct tf;
-
/**
* The Shadow Table module provides shadow DB handling for table based
* TF types. A shadow DB provides the capability that allows for reuse
@@ -32,19 +30,22 @@ struct tf;
*/
struct tf_shadow_tbl_cfg_parms {
/**
- * TF Table type
+ * [in] The number of elements in the alloc_cnt and base_addr
+ * For now, it should always be equal to TF_TBL_TYPE_MAX
*/
- enum tf_tbl_type type;
+ int num_entries;
/**
- * Number of entries the Shadow DB needs to hold
+ * [in] Resource allocation count array
+ * This array content originates from the tf_session_resources
+ * that is passed in on session open
+ * Array size is TF_TBL_TYPE_MAX
*/
- int num_entries;
-
+ uint16_t *alloc_cnt;
/**
- * Element width for this table type
+ * [in] The base index for each table
*/
- int element_width;
+ uint16_t base_addr[TF_TBL_TYPE_MAX];
};
/**
@@ -52,17 +53,17 @@ struct tf_shadow_tbl_cfg_parms {
*/
struct tf_shadow_tbl_create_db_parms {
/**
- * [in] Configuration information for the shadow db
+ * [in] Receive or transmit direction
*/
- struct tf_shadow_tbl_cfg_parms *cfg;
+ enum tf_dir dir;
/**
- * [in] Number of elements in the parms structure
+ * [in] Configuration information for the shadow db
*/
- uint16_t num_elements;
+ struct tf_shadow_tbl_cfg_parms *cfg;
/**
* [out] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void **shadow_db;
};
/**
@@ -70,9 +71,9 @@ struct tf_shadow_tbl_create_db_parms {
*/
struct tf_shadow_tbl_free_db_parms {
/**
- * Shadow table DB handle
+ * [in] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
};
/**
@@ -82,79 +83,77 @@ struct tf_shadow_tbl_search_parms {
/**
* [in] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
/**
- * [in] Table type
+ * [inout] The search parms from tf core
*/
- enum tf_tbl_type type;
- /**
- * [in] Pointer to entry blob value in remap table to match
- */
- uint8_t *entry;
- /**
- * [in] Size of the entry blob passed in bytes
- */
- uint16_t entry_sz;
- /**
- * [out] Index of the found element returned if hit
- */
- uint16_t *index;
+ struct tf_tbl_alloc_search_parms *sparms;
/**
* [out] Reference count incremented if hit
*/
- uint16_t *ref_cnt;
+ uint32_t hb_handle;
};
/**
- * Shadow table insert parameters
+ * Shadow Table bind index parameters
*/
-struct tf_shadow_tbl_insert_parms {
+struct tf_shadow_tbl_bind_index_parms {
/**
- * [in] Shadow table DB handle
+ * [in] Shadow tcam DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
/**
- * [in] Tbl type
+ * [in] receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] TCAM table type
*/
enum tf_tbl_type type;
/**
- * [in] Pointer to entry blob value in remap table to match
+ * [in] index of the entry to program
*/
- uint8_t *entry;
+ uint16_t idx;
/**
- * [in] Size of the entry blob passed in bytes
+ * [in] struct containing key
*/
- uint16_t entry_sz;
+ uint8_t *data;
/**
- * [in] Entry to update
+ * [in] data size in bytes
*/
- uint16_t index;
+ uint16_t data_sz_in_bytes;
/**
- * [out] Reference count after insert
+ * [in] The hash bucket handled returned from the search
*/
- uint16_t *ref_cnt;
+ uint32_t hb_handle;
};
/**
- * Shadow table remove parameters
+ * Shadow table insert parameters
*/
-struct tf_shadow_tbl_remove_parms {
+struct tf_shadow_tbl_insert_parms {
/**
* [in] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
/**
- * [in] Tbl type
+ * [in] The insert parms from tf core
*/
- enum tf_tbl_type type;
+ struct tf_tbl_set_parms *sparms;
+};
+
+/**
+ * Shadow table remove parameters
+ */
+struct tf_shadow_tbl_remove_parms {
/**
- * [in] Entry to update
+ * [in] Shadow table DB handle
*/
- uint16_t index;
+ void *shadow_db;
/**
- * [out] Reference count after removal
+ * [in] The free parms from tf core
*/
- uint16_t *ref_cnt;
+ struct tf_tbl_free_parms *fparms;
};
/**
@@ -206,10 +205,27 @@ int tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms);
* Returns
* - (0) if successful, element was found.
* - (-EINVAL) on failure.
+ *
+ * If there is a miss, but there is room for insertion, the hb_handle returned
+ * is used for insertion during the bind index API
*/
int tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms);
/**
+ * Bind Shadow table db hash and result tables with result from search/alloc
+ *
+ * [in] parms
+ * Pointer to the search parameters
+ *
+ * Returns
+ * - (0) if successful
+ * - (-EINVAL) on failure.
+ *
+ * This is only called after a MISS in the search returns a hb_handle
+ */
+int tf_shadow_tbl_bind_index(struct tf_shadow_tbl_bind_index_parms *parms);
+
+/**
* Inserts an element into the Shadow table DB. Will fail if the
* elements ref_count is different from 0. Ref_count after insert will
* be incremented.
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index beaea03..a0130d6 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -373,6 +373,12 @@ tf_shadow_tcam_clear_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
case 3:
*bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
break;
+ default:
+ /*
+ * Since the BE_GET masks non-inclusive bits, this will not
+ * happen.
+ */
+ break;
}
}
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.c b/drivers/net/bnxt/tf_core/tf_tbl.c
index 9ebaa34..bec5210 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_tbl.c
@@ -13,6 +13,9 @@
#include "tf_util.h"
#include "tf_msg.h"
#include "tfp.h"
+#include "tf_shadow_tbl.h"
+#include "tf_session.h"
+#include "tf_device.h"
struct tf;
@@ -25,7 +28,7 @@ static void *tbl_db[TF_DIR_MAX];
/**
* Table Shadow DBs
*/
-/* static void *shadow_tbl_db[TF_DIR_MAX]; */
+static void *shadow_tbl_db[TF_DIR_MAX];
/**
* Init flag, set on bind and cleared on unbind
@@ -35,14 +38,19 @@ static uint8_t init;
/**
* Shadow init flag, set on bind and cleared on unbind
*/
-/* static uint8_t shadow_init; */
+static uint8_t shadow_init;
int
tf_tbl_bind(struct tf *tfp,
struct tf_tbl_cfg_parms *parms)
{
- int rc;
- int i;
+ int rc, d, i;
+ struct tf_rm_alloc_info info;
+ struct tf_rm_free_db_parms fparms;
+ struct tf_shadow_tbl_free_db_parms fshadow;
+ struct tf_rm_get_alloc_info_parms ainfo;
+ struct tf_shadow_tbl_cfg_parms shadow_cfg;
+ struct tf_shadow_tbl_create_db_parms shadow_cdb;
struct tf_rm_create_db_parms db_cfg = { 0 };
TF_CHECK_PARMS2(tfp, parms);
@@ -58,26 +66,86 @@ tf_tbl_bind(struct tf *tfp,
db_cfg.num_elements = parms->num_elements;
db_cfg.cfg = parms->cfg;
- for (i = 0; i < TF_DIR_MAX; i++) {
- db_cfg.dir = i;
- db_cfg.alloc_cnt = parms->resources->tbl_cnt[i].cnt;
- db_cfg.rm_db = &tbl_db[i];
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ db_cfg.dir = d;
+ db_cfg.alloc_cnt = parms->resources->tbl_cnt[d].cnt;
+ db_cfg.rm_db = &tbl_db[d];
rc = tf_rm_create_db(tfp, &db_cfg);
if (rc) {
TFP_DRV_LOG(ERR,
"%s: Table DB creation failed\n",
- tf_dir_2_str(i));
+ tf_dir_2_str(d));
return rc;
}
}
+ /* Initialize the Shadow Table. */
+ if (parms->shadow_copy) {
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ memset(&shadow_cfg, 0, sizeof(shadow_cfg));
+ memset(&shadow_cdb, 0, sizeof(shadow_cdb));
+ /* Get the base addresses of the tables */
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ memset(&info, 0, sizeof(info));
+
+ if (!parms->resources->tbl_cnt[d].cnt[i])
+ continue;
+ ainfo.rm_db = tbl_db[d];
+ ainfo.db_index = i;
+ ainfo.info = &info;
+ rc = tf_rm_get_info(&ainfo);
+ if (rc)
+ goto error;
+
+ shadow_cfg.base_addr[i] = info.entry.start;
+ }
+
+ /* Create the shadow db */
+ shadow_cfg.alloc_cnt =
+ parms->resources->tbl_cnt[d].cnt;
+ shadow_cfg.num_entries = parms->num_elements;
+
+ shadow_cdb.shadow_db = &shadow_tbl_db[d];
+ shadow_cdb.cfg = &shadow_cfg;
+ rc = tf_shadow_tbl_create_db(&shadow_cdb);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Shadow TBL DB creation failed "
+ "rc=%d\n", rc);
+ goto error;
+ }
+ }
+ shadow_init = 1;
+ }
+
init = 1;
TFP_DRV_LOG(INFO,
"Table Type - initialized\n");
return 0;
+error:
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ memset(&fparms, 0, sizeof(fparms));
+ fparms.dir = d;
+ fparms.rm_db = tbl_db[d];
+ /* Ignoring return here since we are in the error case */
+ (void)tf_rm_free_db(tfp, &fparms);
+
+ if (parms->shadow_copy) {
+ fshadow.shadow_db = shadow_tbl_db[d];
+ tf_shadow_tbl_free_db(&fshadow);
+ shadow_tbl_db[d] = NULL;
+ }
+
+ tbl_db[d] = NULL;
+ }
+
+ shadow_init = 0;
+ init = 0;
+
+ return rc;
}
int
@@ -86,6 +154,7 @@ tf_tbl_unbind(struct tf *tfp)
int rc;
int i;
struct tf_rm_free_db_parms fparms = { 0 };
+ struct tf_shadow_tbl_free_db_parms fshadow;
TF_CHECK_PARMS1(tfp);
@@ -104,9 +173,17 @@ tf_tbl_unbind(struct tf *tfp)
return rc;
tbl_db[i] = NULL;
+
+ if (shadow_init) {
+ memset(&fshadow, 0, sizeof(fshadow));
+ fshadow.shadow_db = shadow_tbl_db[i];
+ tf_shadow_tbl_free_db(&fshadow);
+ shadow_tbl_db[i] = NULL;
+ }
}
init = 0;
+ shadow_init = 0;
return 0;
}
@@ -153,6 +230,7 @@ tf_tbl_free(struct tf *tfp __rte_unused,
int rc;
struct tf_rm_is_allocated_parms aparms = { 0 };
struct tf_rm_free_parms fparms = { 0 };
+ struct tf_shadow_tbl_remove_parms shparms;
int allocated = 0;
TF_CHECK_PARMS2(tfp, parms);
@@ -182,6 +260,36 @@ tf_tbl_free(struct tf *tfp __rte_unused,
return -EINVAL;
}
+ /*
+ * The Shadow mgmt, if enabled, determines if the entry needs
+ * to be deleted.
+ */
+ if (shadow_init) {
+ memset(&shparms, 0, sizeof(shparms));
+ shparms.shadow_db = shadow_tbl_db[parms->dir];
+ shparms.fparms = parms;
+ rc = tf_shadow_tbl_remove(&shparms);
+ if (rc) {
+ /*
+ * Should not get here, log it and let the entry be
+ * deleted.
+ */
+ TFP_DRV_LOG(ERR, "%s: Shadow free fail, "
+ "type:%d index:%d deleting the entry.\n",
+ tf_dir_2_str(parms->dir),
+ parms->type,
+ parms->idx);
+ } else {
+ /*
+ * If the entry still has references, just return the
+ * ref count to the caller. No need to remove entry
+ * from rm.
+ */
+ if (parms->ref_cnt >= 1)
+ return rc;
+ }
+ }
+
/* Free requested element */
fparms.rm_db = tbl_db[parms->dir];
fparms.db_index = parms->type;
@@ -200,10 +308,124 @@ tf_tbl_free(struct tf *tfp __rte_unused,
}
int
-tf_tbl_alloc_search(struct tf *tfp __rte_unused,
- struct tf_tbl_alloc_search_parms *parms __rte_unused)
+tf_tbl_alloc_search(struct tf *tfp,
+ struct tf_tbl_alloc_search_parms *parms)
{
- return 0;
+ int rc, frc;
+ uint32_t idx;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_tbl_alloc_parms aparms;
+ struct tf_shadow_tbl_search_parms sparms;
+ struct tf_shadow_tbl_bind_index_parms bparms;
+ struct tf_tbl_free_parms fparms;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ if (!shadow_init || !shadow_tbl_db[parms->dir]) {
+ TFP_DRV_LOG(ERR, "%s: Shadow TBL not initialized.\n",
+ tf_dir_2_str(parms->dir));
+ return -EINVAL;
+ }
+
+ memset(&sparms, 0, sizeof(sparms));
+ sparms.sparms = parms;
+ sparms.shadow_db = shadow_tbl_db[parms->dir];
+ rc = tf_shadow_tbl_search(&sparms);
+ if (rc)
+ return rc;
+
+ /*
+ * The app didn't request us to alloc the entry, so return now.
+ * The hit should have been updated in the original search parm.
+ */
+ if (!parms->alloc || parms->search_status != MISS)
+ return rc;
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Allocate the index */
+ if (dev->ops->tf_dev_alloc_tbl == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return -EOPNOTSUPP;
+ }
+
+ memset(&aparms, 0, sizeof(aparms));
+ aparms.dir = parms->dir;
+ aparms.type = parms->type;
+ aparms.tbl_scope_id = parms->tbl_scope_id;
+ aparms.idx = &idx;
+ rc = dev->ops->tf_dev_alloc_tbl(tfp, &aparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Table allocation failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Bind the allocated index to the data */
+ memset(&bparms, 0, sizeof(bparms));
+ bparms.shadow_db = shadow_tbl_db[parms->dir];
+ bparms.dir = parms->dir;
+ bparms.type = parms->type;
+ bparms.idx = idx;
+ bparms.data = parms->result;
+ bparms.data_sz_in_bytes = parms->result_sz_in_bytes;
+ bparms.hb_handle = sparms.hb_handle;
+ rc = tf_shadow_tbl_bind_index(&bparms);
+ if (rc) {
+ /* Error binding entry, need to free the allocated idx */
+ if (dev->ops->tf_dev_free_tbl == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ memset(&fparms, 0, sizeof(fparms));
+ fparms.dir = parms->dir;
+ fparms.type = parms->type;
+ fparms.idx = idx;
+ frc = dev->ops->tf_dev_free_tbl(tfp, &fparms);
+ if (frc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed free index allocated during "
+ "search. rc=%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-frc));
+ /* return the original failure. */
+ return rc;
+ }
+ }
+
+ parms->idx = idx;
+
+ return rc;
}
int
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.h b/drivers/net/bnxt/tf_core/tf_tbl.h
index f20e8d7..930fcc3 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl.h
+++ b/drivers/net/bnxt/tf_core/tf_tbl.h
@@ -144,29 +144,31 @@ struct tf_tbl_alloc_search_parms {
*/
uint32_t tbl_scope_id;
/**
- * [in] Enable search for matching entry. If the table type is
- * internal the shadow copy will be searched before
- * alloc. Session must be configured with shadow copy enabled.
- */
- uint8_t search_enable;
- /**
- * [in] Result data to search for (if search_enable)
+ * [in] Result data to search for
*/
uint8_t *result;
/**
- * [in] Result data size in bytes (if search_enable)
+ * [in] Result data size in bytes
*/
uint16_t result_sz_in_bytes;
/**
+ * [in] Whether or not to allocate on MISS, 1 is allocate.
+ */
+ uint8_t alloc;
+ /**
* [out] If search_enable, set if matching entry found
*/
uint8_t hit;
/**
- * [out] Current ref count after allocation (if search_enable)
+ * [out] The status of the search (REJECT, MISS, HIT)
+ */
+ enum tf_search_status search_status;
+ /**
+ * [out] Current ref count after allocation
*/
uint16_t ref_cnt;
/**
- * [out] Idx of allocated entry or found entry (if search_enable)
+ * [out] Idx of allocated entry or found entry
*/
uint32_t idx;
};
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.h b/drivers/net/bnxt/tf_core/tf_tcam.h
index 4722ce0..ffa0a94 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_tcam.h
@@ -134,7 +134,7 @@ struct tf_tcam_alloc_search_parms {
/**
* [out] Search result status (hit, miss, reject)
*/
- enum tf_tcam_search_status search_status;
+ enum tf_search_status search_status;
/**
* [out] Current refcnt after allocation
*/
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 13/20] net/bnxt: ulp mapper changes to use tbl search
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (11 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 12/20] net/bnxt: added shadow table capability with search Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 14/20] net/bnxt: fix port default rule create and destroy Somnath Kotur
` (7 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
modified ulp mappper to use the new tf_search_tbl_entry API.
When search before allocation is requested, mapper calls
tc_search_tbl_entry with the alloc flag.
- On HIT, the result and table index is returned.
- On MISS, the table index is returned but the result is
created and the table entry is set.
- On REJECT, the flow request is rejected.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 75 ++++++++++++++++++++++++------------
1 file changed, 51 insertions(+), 24 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index a071c07..4dee659 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -1764,9 +1764,10 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct ulp_blob data;
uint64_t idx = 0;
uint16_t tmplen;
- uint32_t i, num_flds;
+ uint32_t i, num_flds, index, hit;
int32_t rc = 0, trc = 0;
struct tf_alloc_tbl_entry_parms aparms = { 0 };
+ struct tf_search_tbl_entry_parms srchparms = { 0 };
struct tf_set_tbl_entry_parms sparms = { 0 };
struct tf_free_tbl_entry_parms free_parms = { 0 };
uint32_t tbl_scope_id;
@@ -1868,33 +1869,59 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
return 0; /* success */
}
+ index = 0;
+ hit = 0;
/* Perform the tf table allocation by filling the alloc params */
- aparms.dir = tbl->direction;
- aparms.type = tbl->resource_type;
- aparms.search_enable = tbl->srch_b4_alloc;
- aparms.result = ulp_blob_data_get(&data, &tmplen);
- aparms.result_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
- aparms.tbl_scope_id = tbl_scope_id;
-
- /* All failures after the alloc succeeds require a free */
- rc = tf_alloc_tbl_entry(tfp, &aparms);
- if (rc) {
- BNXT_TF_DBG(ERR, "Alloc table[%d][%s] failed rc=%d\n",
- aparms.type,
- (aparms.dir == TF_DIR_RX) ? "RX" : "TX",
- rc);
- return rc;
- }
+ if (tbl->srch_b4_alloc) {
+ memset(&srchparms, 0, sizeof(srchparms));
+ srchparms.dir = tbl->direction;
+ srchparms.type = tbl->resource_type;
+ srchparms.alloc = 1;
+ srchparms.result = ulp_blob_data_get(&data, &tmplen);
+ srchparms.result_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
+ srchparms.tbl_scope_id = tbl_scope_id;
+ rc = tf_search_tbl_entry(tfp, &srchparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Alloc table[%s][%s] failed rc=%d\n",
+ tf_tbl_type_2_str(tbl->resource_type),
+ tf_dir_2_str(tbl->direction), rc);
+ return rc;
+ }
+ if (srchparms.search_status == REJECT) {
+ BNXT_TF_DBG(ERR, "Alloc table[%s][%s] rejected.\n",
+ tf_tbl_type_2_str(tbl->resource_type),
+ tf_dir_2_str(tbl->direction));
+ return -ENOMEM;
+ }
+ index = srchparms.idx;
+ hit = srchparms.hit;
+ } else {
+ aparms.dir = tbl->direction;
+ aparms.type = tbl->resource_type;
+ aparms.search_enable = tbl->srch_b4_alloc;
+ aparms.result = ulp_blob_data_get(&data, &tmplen);
+ aparms.result_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
+ aparms.tbl_scope_id = tbl_scope_id;
+ /* All failures after the alloc succeeds require a free */
+ rc = tf_alloc_tbl_entry(tfp, &aparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Alloc table[%s][%s] failed rc=%d\n",
+ tf_tbl_type_2_str(tbl->resource_type),
+ tf_dir_2_str(tbl->direction), rc);
+ return rc;
+ }
+ index = aparms.idx;
+ }
/*
* calculate the idx for the result record, for external EM the offset
* needs to be shifted accordingly. If external non-inline table types
* are used then need to revisit this logic.
*/
- if (aparms.type == TF_TBL_TYPE_EXT)
- idx = TF_ACT_REC_OFFSET_2_PTR(aparms.idx);
+ if (tbl->resource_type == TF_TBL_TYPE_EXT)
+ idx = TF_ACT_REC_OFFSET_2_PTR(index);
else
- idx = aparms.idx;
+ idx = index;
/* Always storing values in Regfile in BE */
idx = tfp_cpu_to_be_64(idx);
@@ -1908,12 +1935,12 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
/* Perform the tf table set by filling the set params */
- if (!tbl->srch_b4_alloc || !aparms.hit) {
+ if (!tbl->srch_b4_alloc || !hit) {
sparms.dir = tbl->direction;
sparms.type = tbl->resource_type;
sparms.data = ulp_blob_data_get(&data, &tmplen);
sparms.data_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
- sparms.idx = aparms.idx;
+ sparms.idx = index;
sparms.tbl_scope_id = tbl_scope_id;
rc = tf_set_tbl_entry(tfp, &sparms);
@@ -1933,7 +1960,7 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
fid_parms.resource_func = tbl->resource_func;
fid_parms.resource_type = tbl->resource_type;
fid_parms.resource_sub_type = tbl->resource_sub_type;
- fid_parms.resource_hndl = aparms.idx;
+ fid_parms.resource_hndl = index;
fid_parms.critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO;
rc = ulp_flow_db_resource_add(parms->ulp_ctx,
@@ -1960,7 +1987,7 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
*/
free_parms.dir = tbl->direction;
free_parms.type = tbl->resource_type;
- free_parms.idx = aparms.idx;
+ free_parms.idx = index;
free_parms.tbl_scope_id = tbl_scope_id;
trc = tf_free_tbl_entry(tfp, &free_parms);
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 14/20] net/bnxt: fix port default rule create and destroy
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (12 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 13/20] net/bnxt: ulp mapper changes to use tbl search Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 15/20] net/bnxt: delete VF FW rules when a representor is created Somnath Kotur
` (6 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Currently, the flow_ids of port_to_app/app_to_port & tx_cfa_action
for the first port are getting over-written by the second port because
these fields are stored in the ulp context which is common across the
ports.
This patch fixes the problem by having per port structure to store these
fields.
Fixes: 9f702636d7ba ("net/bnxt: add port default rules for ingress and egress")
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/bnxt.h | 5 +-
drivers/net/bnxt/bnxt_ethdev.c | 81 ++--------------------
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 6 +-
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 12 +++-
drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 14 +++-
drivers/net/bnxt/tf_ulp/ulp_def_rules.c | 116 ++++++++++++++++++++++++++++++++
drivers/net/bnxt/tf_ulp/ulp_flow_db.c | 2 +-
drivers/net/bnxt/tf_ulp/ulp_flow_db.h | 2 +-
9 files changed, 151 insertions(+), 88 deletions(-)
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 50f93ff..41e7ae5 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -784,6 +784,7 @@ struct bnxt {
struct bnxt_flow_stat_info *flow_stat;
uint8_t flow_xstat;
uint16_t max_num_kflows;
+ uint16_t tx_cfa_action;
};
#define BNXT_FC_TIMER 1 /* Timer freq in Sec Flow Counters */
@@ -797,7 +798,7 @@ struct bnxt_vf_representor {
uint16_t fw_fid;
uint16_t dflt_vnic_id;
uint16_t svif;
- uint32_t vfr_tx_cfa_action;
+ uint16_t vfr_tx_cfa_action;
uint16_t rx_cfa_code;
uint32_t rep2vf_flow_id;
uint32_t vf2rep_flow_id;
@@ -872,6 +873,8 @@ extern int bnxt_logtype_driver;
extern const struct rte_flow_ops bnxt_ulp_rte_flow_ops;
int32_t bnxt_ulp_init(struct bnxt *bp);
void bnxt_ulp_deinit(struct bnxt *bp);
+int32_t bnxt_ulp_create_df_rules(struct bnxt *bp);
+void bnxt_ulp_destroy_df_rules(struct bnxt *bp, bool global);
uint16_t bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type);
uint16_t bnxt_get_svif(uint16_t port_id, bool func_svif,
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 0829493..dfc4b41 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1168,73 +1168,6 @@ static int bnxt_handle_if_change_status(struct bnxt *bp)
return rc;
}
-static int32_t
-bnxt_create_port_app_df_rule(struct bnxt *bp, uint8_t flow_type,
- uint32_t *flow_id)
-{
- uint16_t port_id = bp->eth_dev->data->port_id;
- struct ulp_tlv_param param_list[] = {
- {
- .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
- .length = 2,
- .value = {(port_id >> 8) & 0xff, port_id & 0xff}
- },
- {
- .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
- .length = 0,
- .value = {0}
- }
- };
-
- return ulp_default_flow_create(bp->eth_dev, param_list, flow_type,
- flow_id);
-}
-
-static int32_t
-bnxt_create_df_rules(struct bnxt *bp)
-{
- struct bnxt_ulp_data *cfg_data;
- int rc;
-
- cfg_data = bp->ulp_ctx->cfg_data;
- rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_PORT_TO_VS,
- &cfg_data->port_to_app_flow_id);
- if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to create port to app default rule\n");
- return rc;
- }
-
- BNXT_TF_DBG(DEBUG, "***** created port to app default rule ******\n");
- rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_VS_TO_PORT,
- &cfg_data->app_to_port_flow_id);
- if (!rc) {
- rc = ulp_default_flow_db_cfa_action_get(bp->ulp_ctx,
- cfg_data->app_to_port_flow_id,
- &cfg_data->tx_cfa_action);
- if (rc)
- goto err;
-
- BNXT_TF_DBG(DEBUG,
- "***** created app to port default rule *****\n");
- return 0;
- }
-
-err:
- BNXT_TF_DBG(DEBUG, "Failed to create app to port default rule\n");
- return rc;
-}
-
-static void
-bnxt_destroy_df_rules(struct bnxt *bp)
-{
- struct bnxt_ulp_data *cfg_data;
-
- cfg_data = bp->ulp_ctx->cfg_data;
- ulp_default_flow_destroy(bp->eth_dev, cfg_data->port_to_app_flow_id);
- ulp_default_flow_destroy(bp->eth_dev, cfg_data->app_to_port_flow_id);
-}
-
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
@@ -1296,8 +1229,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
bnxt_schedule_fw_health_check(bp);
pthread_mutex_unlock(&bp->def_cp_lock);
- if (BNXT_TRUFLOW_EN(bp))
- bnxt_ulp_init(bp);
+ bnxt_ulp_init(bp);
return 0;
@@ -1358,6 +1290,9 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
+ bnxt_ulp_destroy_df_rules(bp, false);
+ bnxt_ulp_deinit(bp);
+
bnxt_cancel_fw_health_check(bp);
bnxt_dev_set_link_down_op(eth_dev);
@@ -1403,11 +1338,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
bnxt_cancel_fc_thread(bp);
- if (BNXT_TRUFLOW_EN(bp)) {
- bnxt_destroy_df_rules(bp);
- bnxt_ulp_deinit(bp);
- }
-
if (eth_dev->data->dev_started)
bnxt_dev_stop_op(eth_dev);
@@ -1656,8 +1586,7 @@ static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
if (rc != 0)
vnic->flags = old_flags;
- if (BNXT_TRUFLOW_EN(bp))
- bnxt_create_df_rules(bp);
+ bnxt_ulp_create_df_rules(bp);
return rc;
}
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index a1ab3f3..83a9853 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -29,7 +29,6 @@ struct bnxt_tx_queue {
struct bnxt *bp;
int index;
int tx_wake_thresh;
- uint32_t tx_cfa_action;
uint32_t vfr_tx_cfa_action;
struct bnxt_tx_ring_info *tx_ring;
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index c741c71..1113aca 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -133,8 +133,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST |
PKT_TX_QINQ_PKT) ||
(BNXT_TRUFLOW_EN(txq->bp) &&
- (txq->bp->ulp_ctx->cfg_data->tx_cfa_action ||
- txq->vfr_tx_cfa_action)))
+ (txq->bp->tx_cfa_action || txq->vfr_tx_cfa_action)))
long_bd = true;
nr_bds = long_bd + tx_pkt->nb_segs;
@@ -192,8 +191,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
if (txq->vfr_tx_cfa_action)
cfa_action = txq->vfr_tx_cfa_action;
else
- cfa_action =
- txq->bp->ulp_ctx->cfg_data->tx_cfa_action;
+ cfa_action = txq->bp->tx_cfa_action;
}
/* HW can accelerate only outer vlan in QinQ mode */
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 7c65a4b..2febd58 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -9,9 +9,9 @@
#include <rte_flow_driver.h>
#include <rte_tailq.h>
+#include "bnxt.h"
#include "bnxt_ulp.h"
#include "bnxt_tf_common.h"
-#include "bnxt.h"
#include "tf_core.h"
#include "tf_ext_flow_handle.h"
@@ -381,6 +381,7 @@ ulp_ctx_init(struct bnxt *bp,
(void)ulp_ctx_deinit(bp, session);
return rc;
}
+
bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, session->g_tfp);
return rc;
}
@@ -654,6 +655,9 @@ bnxt_ulp_init(struct bnxt *bp)
bool init;
int rc;
+ if (!BNXT_TRUFLOW_EN(bp))
+ return 0;
+
if (bp->ulp_ctx) {
BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
return -EINVAL;
@@ -822,6 +826,9 @@ bnxt_ulp_deinit(struct bnxt *bp)
struct rte_pci_device *pci_dev;
struct rte_pci_addr *pci_addr;
+ if (!BNXT_TRUFLOW_EN(bp))
+ return;
+
/* Get the session first */
pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
pci_addr = &pci_dev->addr;
@@ -833,6 +840,9 @@ bnxt_ulp_deinit(struct bnxt *bp)
if (!session)
return;
+ /* clean up default flows */
+ bnxt_ulp_destroy_df_rules(bp, true);
+
/* clean up regular flows */
ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE);
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index 7c95ead..d532452 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -22,6 +22,12 @@
#define BNXT_ULP_VF_REP_ENABLED 0x1
#define ULP_VF_REP_IS_ENABLED(flag) ((flag) & BNXT_ULP_VF_REP_ENABLED)
+struct bnxt_ulp_df_rule_info {
+ uint32_t port_to_app_flow_id;
+ uint32_t app_to_port_flow_id;
+ uint8_t valid;
+};
+
struct bnxt_ulp_data {
uint32_t tbl_scope_id;
struct bnxt_ulp_mark_tbl *mark_tbl;
@@ -32,9 +38,7 @@ struct bnxt_ulp_data {
struct bnxt_ulp_port_db *port_db;
struct bnxt_ulp_fc_info *fc_info;
uint32_t ulp_flags;
- uint32_t port_to_app_flow_id;
- uint32_t app_to_port_flow_id;
- uint32_t tx_cfa_action;
+ struct bnxt_ulp_df_rule_info df_rule_info[RTE_MAX_ETHPORTS];
};
struct bnxt_ulp_context {
@@ -175,4 +179,8 @@ int32_t
bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
uint32_t *flags);
+int32_t
+bnxt_ulp_get_df_rule_info(uint8_t port_id, struct bnxt_ulp_context *ulp_ctx,
+ struct bnxt_ulp_df_rule_info *info);
+
#endif /* _BNXT_ULP_H_ */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
index ddc6da8..9fb1a02 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
@@ -392,3 +392,119 @@ ulp_default_flow_destroy(struct rte_eth_dev *eth_dev, uint32_t flow_id)
return rc;
}
+
+void
+bnxt_ulp_destroy_df_rules(struct bnxt *bp, bool global)
+{
+ struct bnxt_ulp_df_rule_info *info;
+ uint8_t port_id;
+
+ if (!BNXT_TRUFLOW_EN(bp) ||
+ BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
+ return;
+
+ if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
+ return;
+
+ /* Delete default rules per port */
+ if (!global) {
+ port_id = bp->eth_dev->data->port_id;
+ info = &bp->ulp_ctx->cfg_data->df_rule_info[port_id];
+ if (!info->valid)
+ return;
+
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->port_to_app_flow_id);
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->app_to_port_flow_id);
+ info->valid = false;
+ return;
+ }
+
+ /* Delete default rules for all ports */
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ info = &bp->ulp_ctx->cfg_data->df_rule_info[port_id];
+ if (!info->valid)
+ continue;
+
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->port_to_app_flow_id);
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->app_to_port_flow_id);
+ info->valid = false;
+ }
+}
+
+static int32_t
+bnxt_create_port_app_df_rule(struct bnxt *bp, uint8_t flow_type,
+ uint32_t *flow_id)
+{
+ uint16_t port_id = bp->eth_dev->data->port_id;
+ struct ulp_tlv_param param_list[] = {
+ {
+ .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
+ .length = 2,
+ .value = {(port_id >> 8) & 0xff, port_id & 0xff}
+ },
+ {
+ .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
+ .length = 0,
+ .value = {0}
+ }
+ };
+
+ return ulp_default_flow_create(bp->eth_dev, param_list, flow_type,
+ flow_id);
+}
+
+int32_t
+bnxt_ulp_create_df_rules(struct bnxt *bp)
+{
+ struct bnxt_ulp_df_rule_info *info;
+ uint8_t port_id;
+ int rc;
+
+ if (!BNXT_TRUFLOW_EN(bp) ||
+ BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
+ return 0;
+
+ port_id = bp->eth_dev->data->port_id;
+ info = &bp->ulp_ctx->cfg_data->df_rule_info[port_id];
+ BNXT_TF_DBG(INFO, "*** creating port to app default rule ***\n");
+ rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_PORT_TO_VS,
+ &info->port_to_app_flow_id);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to create port to app default rule\n");
+ return rc;
+ }
+ BNXT_TF_DBG(INFO, "*** created port to app default rule ***\n");
+
+ bp->tx_cfa_action = 0;
+ BNXT_TF_DBG(INFO, "*** creating app to port default rule ***\n");
+ rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_VS_TO_PORT,
+ &info->app_to_port_flow_id);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to create app to port default rule\n");
+ goto port_to_app_free;
+ }
+
+ rc = ulp_default_flow_db_cfa_action_get(bp->ulp_ctx,
+ info->app_to_port_flow_id,
+ &bp->tx_cfa_action);
+ if (rc)
+ goto app_to_port_free;
+
+ info->valid = true;
+ BNXT_TF_DBG(INFO, "*** created app to port default rule ***\n");
+ return 0;
+
+app_to_port_free:
+ ulp_default_flow_destroy(bp->eth_dev, info->app_to_port_flow_id);
+port_to_app_free:
+ ulp_default_flow_destroy(bp->eth_dev, info->port_to_app_flow_id);
+ info->valid = false;
+
+ return rc;
+}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
index a3cfe54..7144517 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
@@ -962,7 +962,7 @@ ulp_flow_db_resource_hndl_get(struct bnxt_ulp_context *ulp_ctx,
int32_t
ulp_default_flow_db_cfa_action_get(struct bnxt_ulp_context *ulp_ctx,
uint32_t flow_id,
- uint32_t *cfa_action)
+ uint16_t *cfa_action)
{
uint8_t sub_type = BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_VFR_CFA_ACTION;
uint64_t hndl;
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
index 1fc0601..117e250 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
@@ -234,7 +234,7 @@ ulp_flow_db_validate_flow_func(struct bnxt_ulp_context *ulp_ctx,
int32_t
ulp_default_flow_db_cfa_action_get(struct bnxt_ulp_context *ulp_ctx,
uint32_t flow_id,
- uint32_t *cfa_action);
+ uint16_t *cfa_action);
#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
/*
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 15/20] net/bnxt: delete VF FW rules when a representor is created
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (13 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 14/20] net/bnxt: fix port default rule create and destroy Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 16/20] net/bnxt: shadow tcam and tbl reference count modification Somnath Kotur
` (5 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Truflow stack adds VFR to VF and VF to VFR conduits when VF
representor is created. However, in the ingress direction the
VF's fw rules conflict with Truflow rules, resulting in not hitting
the Truflow VFR rules. To fix this, fw is going to remove it’s
VF rules when vf representor is created in Truflow mode and will
restore the removed rules when vf representor is destroyed.
This patch invokes the vf representor alloc and free hwrm commands
as part of which fw will do the above mentioned actions.
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Shahaji Bhosle <shahaji.bhosle@broadcom.com>
---
drivers/net/bnxt/bnxt_hwrm.c | 49 ++++++++++++
drivers/net/bnxt/bnxt_hwrm.h | 2 +
drivers/net/bnxt/bnxt_reps.c | 19 ++++-
drivers/net/bnxt/hsi_struct_def_dpdk.h | 138 +++++++++++++++++++++++++++++++++
4 files changed, 205 insertions(+), 3 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 7ea13a8..f5f0dfe 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -5486,6 +5486,55 @@ int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
return 0;
}
+int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx)
+{
+ struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_vfr_alloc_input req = {0};
+ int rc;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_VFR_ALLOC, BNXT_USE_CHIMP_MB);
+ req.vf_id = rte_cpu_to_le_16(vf_idx);
+ snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
+ bp->eth_dev->data->name, vf_idx);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "VFR %d allocated\n", vf_idx);
+ return rc;
+}
+
+int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx)
+{
+ struct hwrm_cfa_vfr_free_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_vfr_free_input req = {0};
+ int rc;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_VFR_FREE, BNXT_USE_CHIMP_MB);
+ req.vf_id = rte_cpu_to_le_16(vf_idx);
+ snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
+ bp->eth_dev->data->name, vf_idx);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "VFR %d freed\n", vf_idx);
+ return rc;
+}
+
#ifdef RTE_LIBRTE_BNXT_PMD_SYSTEM
int
bnxt_hwrm_oem_cmd(struct bnxt *bp, uint32_t entry_num)
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 01201a7..4a2af13 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -278,4 +278,6 @@ int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp);
int bnxt_hwrm_oem_cmd(struct bnxt *bp, uint32_t entry_num);
int bnxt_clear_one_vnic_filter(struct bnxt *bp,
struct bnxt_filter_info *filter);
+int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx);
+int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx);
#endif
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index c425e69..2f775e0 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -272,7 +272,7 @@ static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
if (rc) {
BNXT_TF_DBG(DEBUG,
"Default flow rule creation for VFR->VF failed!\n");
- return -EIO;
+ goto err;
}
BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VFR->VF! ***\n");
@@ -283,7 +283,7 @@ static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
if (rc) {
BNXT_TF_DBG(DEBUG,
"Failed to get action_ptr for VFR->VF dflt rule\n");
- return -EIO;
+ goto rep2vf_free;
}
BNXT_TF_DBG(DEBUG, "tx_cfa_action = %d\n", vfr->vfr_tx_cfa_action);
rc = ulp_default_flow_create(parent_dev, param_list,
@@ -292,13 +292,24 @@ static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
if (rc) {
BNXT_TF_DBG(DEBUG,
"Default flow rule creation for VF->VFR failed!\n");
- return -EIO;
+ goto rep2vf_free;
}
BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VF->VFR! ***\n");
BNXT_TF_DBG(DEBUG, "vfr2rep_flow_id = %d\n", vfr->vf2rep_flow_id);
+ rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp, vfr->vf_id);
+ if (rc)
+ goto vf2rep_free;
+
return 0;
+
+vf2rep_free:
+ ulp_default_flow_destroy(vfr->parent_dev, vfr->vf2rep_flow_id);
+rep2vf_free:
+ ulp_default_flow_destroy(vfr->parent_dev, vfr->rep2vf_flow_id);
+err:
+ return -EIO;
}
static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
@@ -414,6 +425,8 @@ static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
vfr->vfr_tx_cfa_action = 0;
vfr->rx_cfa_code = 0;
+ rc = bnxt_hwrm_cfa_vfr_free(parent_bp, vfr->vf_id);
+
return rc;
}
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index 598da71..3553935 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -35127,6 +35127,144 @@ struct hwrm_cfa_pair_info_output {
uint8_t valid;
} __rte_packed;
+/**********************
+ * hwrm_cfa_vfr_alloc *
+ **********************/
+
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vfr_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint16_t reserved;
+ uint8_t unused_0[4];
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vfr_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_cfa_vfr_free *
+ *********************/
+
+
+/* hwrm_cfa_vfr_free_input (size:448b/56B) */
+struct hwrm_cfa_vfr_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ uint16_t reserved;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+struct hwrm_cfa_vfr_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+
+
/***************************************
* hwrm_cfa_redirect_query_tunnel_type *
***************************************/
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 16/20] net/bnxt: shadow tcam and tbl reference count modification
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (14 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 15/20] net/bnxt: delete VF FW rules when a representor is created Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 17/20] net/bnxt: tcam table processing support for search and alloc Somnath Kotur
` (4 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
Moved setting the refcnt for shadow tcam and table entries to the
allocation path only. The insert can be called multiple times for
updates and was resetting the refcnt to 1 each time. Now multiple
insertion/modifications will not change the reference count.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_shadow_tbl.c | 2 --
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 2 +-
2 files changed, 1 insertion(+), 3 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
index 019a26e..a4207eb 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
@@ -687,8 +687,6 @@ tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms)
if (!TF_SHADOW_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
return 0;
- sr_entry->refcnt = 1;
-
return 0;
}
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index a0130d6..e2c347a 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -472,6 +472,7 @@ tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms)
/* Write the result table */
sr_entry->key_size = parms->key_size;
sr_entry->hb_handle = parms->hb_handle;
+ sr_entry->refcnt = 1;
return 0;
}
@@ -738,7 +739,6 @@ tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms)
memcpy(sr_entry->result, sparms->result, sparms->result_size);
sr_entry->result_size = sparms->result_size;
- sr_entry->refcnt = 1;
return 0;
}
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 17/20] net/bnxt: tcam table processing support for search and alloc
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (15 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 16/20] net/bnxt: shadow tcam and tbl reference count modification Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 18/20] net/bnxt: added templates for search before alloc Somnath Kotur
` (3 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Added support for tcam table processing to enable the search
and allocate support. This also includes the tcam entry update
support.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 317 ++++++++++++++++---------
drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h | 5 +-
drivers/net/bnxt/tf_ulp/ulp_template_struct.h | 8 +-
3 files changed, 213 insertions(+), 117 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 4dee659..6ac4b0f 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -1317,20 +1317,177 @@ ulp_mapper_mark_vfr_idx_process(struct bnxt_ulp_mapper_parms *parms,
return rc;
}
+/*
+ * Tcam table - create the result blob.
+ * data [out] - the result blob data
+ */
+static int32_t
+ulp_mapper_tcam_tbl_result_create(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl,
+ struct ulp_blob *data)
+{
+ struct bnxt_ulp_mapper_result_field_info *dflds;
+ uint32_t num_dflds;
+ uint32_t encap_flds = 0;
+ uint32_t i;
+ int32_t rc = 0;
+
+ /* Create the result data blob */
+ dflds = ulp_mapper_result_fields_get(tbl, &num_dflds,
+ &encap_flds);
+ if (!dflds || !num_dflds || encap_flds) {
+ BNXT_TF_DBG(ERR, "Failed to get data fields.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_dflds; i++) {
+ rc = ulp_mapper_result_field_process(parms,
+ tbl->direction,
+ &dflds[i],
+ data,
+ "TCAM Result");
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set data fields\n");
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+/* Tcam table scan the identifier list and allocate each identifier */
+static int32_t
+ulp_mapper_tcam_tbl_scan_ident_alloc(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl)
+{
+ struct bnxt_ulp_mapper_ident_info *idents;
+ uint32_t num_idents;
+ uint32_t i;
+
+ /*
+ * Since the cache entry is responsible for allocating
+ * identifiers when in use, allocate the identifiers only
+ * during normal processing.
+ */
+ if (parms->tcam_tbl_opc ==
+ BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL) {
+ idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
+
+ for (i = 0; i < num_idents; i++) {
+ if (ulp_mapper_ident_process(parms, tbl,
+ &idents[i], NULL))
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Tcam table scan the identifier list and extract the identifier from
+ * the result blob.
+ */
+static int32_t
+ulp_mapper_tcam_tbl_scan_ident_extract(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl,
+ struct ulp_blob *data)
+{
+ struct bnxt_ulp_mapper_ident_info *idents;
+ uint32_t num_idents = 0, i;
+ int32_t rc = 0;
+
+ /*
+ * Extract the listed identifiers from the result field,
+ * no need to allocate them.
+ */
+ idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
+ for (i = 0; i < num_idents; i++) {
+ rc = ulp_mapper_ident_extract(parms, tbl, &idents[i], data);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Error in identifier extraction\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
+/* Internal function to write the tcam entry */
+static int32_t
+ulp_mapper_tcam_tbl_entry_write(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl,
+ struct ulp_blob *key,
+ struct ulp_blob *mask,
+ struct ulp_blob *data,
+ uint16_t idx)
+{
+ struct tf_set_tcam_entry_parms sparms = { 0 };
+ struct tf *tfp;
+ uint16_t tmplen;
+ int32_t rc;
+
+ tfp = bnxt_ulp_cntxt_tfp_get(parms->ulp_ctx);
+ if (!tfp) {
+ BNXT_TF_DBG(ERR, "Failed to get truflow pointer\n");
+ return -EINVAL;
+ }
+
+ sparms.dir = tbl->direction;
+ sparms.tcam_tbl_type = tbl->resource_type;
+ sparms.idx = idx;
+ /* Already verified the key/mask lengths */
+ sparms.key = ulp_blob_data_get(key, &tmplen);
+ sparms.mask = ulp_blob_data_get(mask, &tmplen);
+ sparms.key_sz_in_bits = tbl->key_bit_size;
+ sparms.result = ulp_blob_data_get(data, &tmplen);
+
+ if (tbl->result_bit_size != tmplen) {
+ BNXT_TF_DBG(ERR, "Result len (%d) != Expected (%d)\n",
+ tmplen, tbl->result_bit_size);
+ return -EINVAL;
+ }
+ sparms.result_sz_in_bits = tbl->result_bit_size;
+ if (tf_set_tcam_entry(tfp, &sparms)) {
+ BNXT_TF_DBG(ERR, "tcam[%s][%s][%x] write failed.\n",
+ tf_tcam_tbl_2_str(sparms.tcam_tbl_type),
+ tf_dir_2_str(sparms.dir), sparms.idx);
+ return -EIO;
+ }
+ BNXT_TF_DBG(INFO, "tcam[%s][%s][%x] write success.\n",
+ tf_tcam_tbl_2_str(sparms.tcam_tbl_type),
+ tf_dir_2_str(sparms.dir), sparms.idx);
+
+ /* Update cache with TCAM index if the was cache allocated. */
+ if (parms->tcam_tbl_opc ==
+ BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_ALLOC) {
+ if (!parms->cache_ptr) {
+ BNXT_TF_DBG(ERR, "Unable to update cache");
+ return -EINVAL;
+ }
+ parms->cache_ptr->tcam_idx = idx;
+ }
+
+ /* Mark action */
+ rc = ulp_mapper_mark_act_ptr_process(parms, tbl);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "failed mark action processing\n");
+ return rc;
+ }
+
+ return rc;
+}
+
static int32_t
ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct bnxt_ulp_mapper_tbl_info *tbl)
{
struct bnxt_ulp_mapper_class_key_field_info *kflds;
- struct ulp_blob key, mask, data;
+ struct ulp_blob key, mask, data, update_data;
uint32_t i, num_kflds;
struct tf *tfp;
int32_t rc, trc;
struct tf_alloc_tcam_entry_parms aparms = { 0 };
struct tf_search_tcam_entry_parms searchparms = { 0 };
- struct tf_set_tcam_entry_parms sparms = { 0 };
struct ulp_flow_db_res_params fid_parms = { 0 };
struct tf_free_tcam_entry_parms free_parms = { 0 };
+ enum bnxt_ulp_search_before_alloc search_flag;
uint32_t hit = 0;
uint16_t tmplen = 0;
uint16_t idx;
@@ -1358,6 +1515,8 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
!ulp_blob_init(&mask, tbl->key_bit_size,
parms->device_params->byte_order) ||
!ulp_blob_init(&data, tbl->result_bit_size,
+ parms->device_params->byte_order) ||
+ !ulp_blob_init(&update_data, tbl->result_bit_size,
parms->device_params->byte_order)) {
BNXT_TF_DBG(ERR, "blob inits failed.\n");
return -EINVAL;
@@ -1388,7 +1547,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
}
- if (!tbl->srch_b4_alloc) {
+ if (tbl->srch_b4_alloc == BNXT_ULP_SEARCH_BEFORE_ALLOC_NO) {
/*
* No search for re-use is requested, so simply allocate the
* tcam index.
@@ -1455,113 +1614,49 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
hit = searchparms.hit;
}
- /* Build the result */
- if (!tbl->srch_b4_alloc || !hit) {
- struct bnxt_ulp_mapper_result_field_info *dflds;
- struct bnxt_ulp_mapper_ident_info *idents;
- uint32_t num_dflds, num_idents;
- uint32_t encap_flds = 0;
-
- /*
- * Since the cache entry is responsible for allocating
- * identifiers when in use, allocate the identifiers only
- * during normal processing.
- */
- if (parms->tcam_tbl_opc ==
- BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL) {
- idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
-
- for (i = 0; i < num_idents; i++) {
- rc = ulp_mapper_ident_process(parms, tbl,
- &idents[i], NULL);
- /* Already logged the error, just return */
- if (rc)
- goto error;
- }
- }
-
- /* Create the result data blob */
- dflds = ulp_mapper_result_fields_get(tbl, &num_dflds,
- &encap_flds);
- if (!dflds || !num_dflds || encap_flds) {
- BNXT_TF_DBG(ERR, "Failed to get data fields.\n");
- rc = -EINVAL;
- goto error;
- }
-
- for (i = 0; i < num_dflds; i++) {
- rc = ulp_mapper_result_field_process(parms,
- tbl->direction,
- &dflds[i],
- &data,
- "TCAM Result");
- if (rc) {
- BNXT_TF_DBG(ERR, "Failed to set data fields\n");
- goto error;
- }
- }
-
- sparms.dir = tbl->direction;
- sparms.tcam_tbl_type = tbl->resource_type;
- sparms.idx = idx;
- /* Already verified the key/mask lengths */
- sparms.key = ulp_blob_data_get(&key, &tmplen);
- sparms.mask = ulp_blob_data_get(&mask, &tmplen);
- sparms.key_sz_in_bits = tbl->key_bit_size;
- sparms.result = ulp_blob_data_get(&data, &tmplen);
-
- if (tbl->result_bit_size != tmplen) {
- BNXT_TF_DBG(ERR, "Result len (%d) != Expected (%d)\n",
- tmplen, tbl->result_bit_size);
- rc = -EINVAL;
- goto error;
- }
- sparms.result_sz_in_bits = tbl->result_bit_size;
-
- rc = tf_set_tcam_entry(tfp, &sparms);
- if (rc) {
- BNXT_TF_DBG(ERR, "tcam[%d][%s][%d] write failed.\n",
- sparms.tcam_tbl_type,
- (sparms.dir == TF_DIR_RX) ? "RX" : "TX",
- sparms.idx);
- goto error;
- }
-
- /* Update cache with TCAM index if the was cache allocated. */
- if (parms->tcam_tbl_opc ==
- BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_ALLOC) {
- if (!parms->cache_ptr) {
- BNXT_TF_DBG(ERR, "Unable to update cache");
- rc = -EINVAL;
- goto error;
- }
- parms->cache_ptr->tcam_idx = idx;
- }
-
- /* Mark action */
- rc = ulp_mapper_mark_act_ptr_process(parms, tbl);
- if (rc)
- goto error;
-
- } else {
- struct bnxt_ulp_mapper_ident_info *idents;
- uint32_t num_idents;
-
- /*
- * Extract the listed identifiers from the result field,
- * no need to allocate them.
- */
- idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
- for (i = 0; i < num_idents; i++) {
- rc = ulp_mapper_ident_extract(parms, tbl,
- &idents[i], &data);
- if (rc) {
- BNXT_TF_DBG(ERR,
- "Error in ident extraction\n");
- goto error;
- }
- }
+ /* if it is miss then it is same as no search before alloc */
+ if (!hit)
+ search_flag = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO;
+ else
+ search_flag = tbl->srch_b4_alloc;
+
+ switch (search_flag) {
+ case BNXT_ULP_SEARCH_BEFORE_ALLOC_NO:
+ /*Scan identifier list, allocate identifier and update regfile*/
+ rc = ulp_mapper_tcam_tbl_scan_ident_alloc(parms, tbl);
+ /* Create the result blob */
+ if (!rc)
+ rc = ulp_mapper_tcam_tbl_result_create(parms, tbl,
+ &data);
+ /* write the tcam entry */
+ if (!rc)
+ rc = ulp_mapper_tcam_tbl_entry_write(parms, tbl, &key,
+ &mask, &data, idx);
+ break;
+ case BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP:
+ /*Scan identifier list, extract identifier and update regfile*/
+ rc = ulp_mapper_tcam_tbl_scan_ident_extract(parms, tbl, &data);
+ break;
+ case BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_UPDATE:
+ /*Scan identifier list, extract identifier and update regfile*/
+ rc = ulp_mapper_tcam_tbl_scan_ident_extract(parms, tbl, &data);
+ /* Create the result blob */
+ if (!rc)
+ rc = ulp_mapper_tcam_tbl_result_create(parms, tbl,
+ &update_data);
+ /* Update/overwrite the tcam entry */
+ if (!rc)
+ rc = ulp_mapper_tcam_tbl_entry_write(parms, tbl, &key,
+ &mask,
+ &update_data, idx);
+ break;
+ default:
+ BNXT_TF_DBG(ERR, "invalid search opcode\n");
+ rc = -EINVAL;
+ break;
}
+ if (rc)
+ goto error;
/*
* Only link the entry to the flow db in the event that cache was not
@@ -1598,11 +1693,11 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL;
free_parms.dir = tbl->direction;
free_parms.tcam_tbl_type = tbl->resource_type;
- free_parms.idx = aparms.idx;
+ free_parms.idx = idx;
trc = tf_free_tcam_entry(tfp, &free_parms);
if (trc)
BNXT_TF_DBG(ERR, "Failed to free tcam[%d][%d][%d] on failure\n",
- tbl->resource_type, tbl->direction, aparms.idx);
+ tbl->resource_type, tbl->direction, idx);
return rc;
}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index ac651f6..9855918 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -271,8 +271,9 @@ enum bnxt_ulp_regfile_index {
enum bnxt_ulp_search_before_alloc {
BNXT_ULP_SEARCH_BEFORE_ALLOC_NO = 0,
- BNXT_ULP_SEARCH_BEFORE_ALLOC_YES = 1,
- BNXT_ULP_SEARCH_BEFORE_ALLOC_LAST = 2
+ BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP = 1,
+ BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_UPDATE = 2,
+ BNXT_ULP_SEARCH_BEFORE_ALLOC_LAST = 3
};
enum bnxt_ulp_fdb_resource_flags {
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
index b9a25b0..6617ab9 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
@@ -173,10 +173,10 @@ struct bnxt_ulp_mapper_tbl_info {
enum bnxt_ulp_resource_sub_type resource_sub_type;
enum bnxt_ulp_cond_opcode cond_opcode;
uint32_t cond_operand;
- uint8_t direction;
- uint32_t priority;
- uint8_t srch_b4_alloc;
- enum bnxt_ulp_critical_resource critical_resource;
+ uint8_t direction;
+ uint32_t priority;
+ enum bnxt_ulp_search_before_alloc srch_b4_alloc;
+ enum bnxt_ulp_critical_resource critical_resource;
/* Information for accessing the ulp_key_field_list */
uint32_t key_start_idx;
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 18/20] net/bnxt: added templates for search before alloc
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (16 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 17/20] net/bnxt: tcam table processing support for search and alloc Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 19/20] net/bnxt: enabled shadow tables during session open Somnath Kotur
` (2 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
Search before alloc allows reuse of constrained resources such as tcam,
encap, and source modifications. The new templates will search the
entry and alloc only if necessary.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_template_db_act.c | 6 +++---
drivers/net/bnxt/tf_ulp/ulp_template_db_class.c | 14 +++++++-------
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
index 58b581c..14ce16e 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
@@ -1052,7 +1052,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_act_tbl_list[] = {
.cond_opcode = BNXT_ULP_COND_OPCODE_COMP_FIELD_IS_SET,
.cond_operand = BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
.direction = TF_DIR_TX,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.result_start_idx = 96,
.result_bit_size = 0,
.result_num_fields = 0,
@@ -1069,7 +1069,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_act_tbl_list[] = {
.cond_opcode = BNXT_ULP_COND_OPCODE_COMP_FIELD_IS_SET,
.cond_operand = BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
.direction = TF_DIR_TX,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.result_start_idx = 99,
.result_bit_size = 0,
.result_num_fields = 0,
@@ -1084,7 +1084,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_act_tbl_list[] = {
.resource_sub_type =
BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_NORMAL,
.direction = TF_DIR_TX,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.result_start_idx = 102,
.result_bit_size = 0,
.result_num_fields = 0,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
index 41d1d87..94160a9 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
@@ -3782,7 +3782,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 108,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4210,7 +4210,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 457,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4282,7 +4282,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 526,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4354,7 +4354,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 595,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4426,7 +4426,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 664,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4570,7 +4570,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 802,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4998,7 +4998,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_TX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_UPDATE,
.key_start_idx = 1151,
.blob_key_bit_size = 167,
.key_bit_size = 167,
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 19/20] net/bnxt: enabled shadow tables during session open
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (17 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 18/20] net/bnxt: added templates for search before alloc Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:13 ` [dpdk-dev] [PATCH 20/20] net/bnxt: cleanup of VF-representor dev ops Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
Turn on shadow memory in the core to allow search before allocate.
This allows reuse of constrained resources.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
---
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 2febd58..077527f 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -81,7 +81,7 @@ ulp_ctx_session_open(struct bnxt *bp,
return rc;
}
- params.shadow_copy = false;
+ params.shadow_copy = true;
params.device_type = TF_DEVICE_TYPE_WH;
resources = ¶ms.resources;
/** RX **/
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH 20/20] net/bnxt: cleanup of VF-representor dev ops
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (18 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 19/20] net/bnxt: enabled shadow tables during session open Somnath Kotur
@ 2020-07-23 11:13 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:13 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
No need to access rx_cfa_code, cfa_code_map from the VF-Rep functions
anymore.
Fixes: 322bd6e70272 ("net/bnxt: add port representor infrastructure")
Reviewed-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
drivers/net/bnxt/bnxt.h | 1 -
drivers/net/bnxt/bnxt_reps.c | 75 +++++---------------------------------------
2 files changed, 7 insertions(+), 69 deletions(-)
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 41e7ae5..f4b2a3f 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -799,7 +799,6 @@ struct bnxt_vf_representor {
uint16_t dflt_vnic_id;
uint16_t svif;
uint16_t vfr_tx_cfa_action;
- uint16_t rx_cfa_code;
uint32_t rep2vf_flow_id;
uint32_t vf2rep_flow_id;
/* Private data store of associated PF/Trusted VF */
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index 2f775e0..6fa9a30 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -230,6 +230,9 @@ int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
int rc;
parent_bp = rep->parent_dev->data->dev_private;
+ if (!parent_bp)
+ return 0;
+
rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
/* Link state. Inherited from PF or trusted VF */
@@ -324,7 +327,7 @@ static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
}
/* Check if representor has been already allocated in FW */
- if (vfr->vfr_tx_cfa_action && vfr->rx_cfa_code)
+ if (vfr->vfr_tx_cfa_action)
return 0;
/*
@@ -406,9 +409,11 @@ static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
}
parent_bp = vfr->parent_dev->data->dev_private;
+ if (!parent_bp)
+ return 0;
/* Check if representor has been already freed in FW */
- if (!vfr->vfr_tx_cfa_action && !vfr->rx_cfa_code)
+ if (!vfr->vfr_tx_cfa_action)
return 0;
rc = bnxt_tf_vfr_free(vfr);
@@ -419,11 +424,9 @@ static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
return rc;
}
- parent_bp->cfa_code_map[vfr->rx_cfa_code] = BNXT_VF_IDX_INVALID;
PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
vfr->vf_id);
vfr->vfr_tx_cfa_action = 0;
- vfr->rx_cfa_code = 0;
rc = bnxt_hwrm_cfa_vfr_free(parent_bp, vfr->vf_id);
@@ -456,7 +459,6 @@ int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
{
struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
struct bnxt *parent_bp;
- uint16_t max_vnics, i, j, vpool, vrxq;
unsigned int max_rx_rings;
int rc = 0;
@@ -476,7 +478,6 @@ int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->max_tx_queues = max_rx_rings;
dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
dev_info->hash_key_size = 40;
- max_vnics = parent_bp->max_vnics;
/* MTU specifics */
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -492,68 +493,6 @@ int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
- /* *INDENT-OFF* */
- dev_info->default_rxconf = (struct rte_eth_rxconf) {
- .rx_thresh = {
- .pthresh = 8,
- .hthresh = 8,
- .wthresh = 0,
- },
- .rx_free_thresh = 32,
- /* If no descriptors available, pkts are dropped by default */
- .rx_drop_en = 1,
- };
-
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .tx_thresh = {
- .pthresh = 32,
- .hthresh = 0,
- .wthresh = 0,
- },
- .tx_free_thresh = 32,
- .tx_rs_thresh = 32,
- };
- eth_dev->data->dev_conf.intr_conf.lsc = 1;
-
- eth_dev->data->dev_conf.intr_conf.rxq = 1;
- dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
- dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
- dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
- dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
-
- /* *INDENT-ON* */
-
- /*
- * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
- * need further investigation.
- */
-
- /* VMDq resources */
- vpool = 64; /* ETH_64_POOLS */
- vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
- for (i = 0; i < 4; vpool >>= 1, i++) {
- if (max_vnics > vpool) {
- for (j = 0; j < 5; vrxq >>= 1, j++) {
- if (dev_info->max_rx_queues > vrxq) {
- if (vpool > vrxq)
- vpool = vrxq;
- goto found;
- }
- }
- /* Not enough resources to support VMDq */
- break;
- }
- }
- /* Not enough resources to support VMDq */
- vpool = 0;
- vrxq = 0;
-found:
- dev_info->max_vmdq_pools = vpool;
- dev_info->vmdq_queue_num = vrxq;
-
- dev_info->vmdq_pool_base = 0;
- dev_info->vmdq_queue_base = 0;
-
return 0;
}
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 00/20] bnxt patches
2020-07-23 11:13 [dpdk-dev] [PATCH 00/20] bnxt patches Somnath Kotur
` (19 preceding siblings ...)
2020-07-23 11:13 ` [dpdk-dev] [PATCH 20/20] net/bnxt: cleanup of VF-representor dev ops Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 01/20] net/bnxt: add shadow tcam capability with search Somnath Kotur
` (20 more replies)
20 siblings, 21 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
Some fixes, cleanups and changes to augment pre-existing
support in infrastructure
Please apply
Kishore Padmanabha (7):
net/bnxt: nat global registers support
net/bnxt: parif for offload miss rules
net/bnxt: nat template changes
net/bnxt: configure parif for the egress rules
net/bnxt: ignore VLAN priority mask
net/bnxt: add egress template with VLAN tag match
net/bnxt: tcam table processing support for search and alloc
Mike Baucom (9):
net/bnxt: add shadow tcam capability with search
net/bnxt: ulp mapper changes to use tcam search
net/bnxt: add tf hash API
net/bnxt: modify tf shadow tcam to use common tf hash
net/bnxt: added shadow table capability with search
net/bnxt: ulp mapper changes to use tbl search
net/bnxt: shadow tcam and tbl reference count modification
net/bnxt: added templates for search before alloc
net/bnxt: enabled shadow tables during session open
Somnath Kotur (1):
net/bnxt: cleanup of VF-representor dev ops
Venkat Duvvuru (3):
net/bnxt: skip mark id injection into mbuf
net/bnxt: fix port default rule create and destroy
net/bnxt: delete VF FW rules when a representor is created
drivers/net/bnxt/bnxt.h | 6 +-
drivers/net/bnxt/bnxt_ethdev.c | 81 +--
drivers/net/bnxt/bnxt_hwrm.c | 49 ++
drivers/net/bnxt/bnxt_hwrm.h | 2 +
drivers/net/bnxt/bnxt_reps.c | 94 +--
drivers/net/bnxt/bnxt_rxr.c | 3 +
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 6 +-
drivers/net/bnxt/hsi_struct_def_dpdk.h | 138 ++++
drivers/net/bnxt/meson.build | 1 +
drivers/net/bnxt/tf_core/Makefile | 1 +
drivers/net/bnxt/tf_core/tf_core.c | 139 +++-
drivers/net/bnxt/tf_core/tf_core.h | 174 +++++
drivers/net/bnxt/tf_core/tf_device_p4.c | 4 +-
drivers/net/bnxt/tf_core/tf_hash.c | 106 +++
drivers/net/bnxt/tf_core/tf_hash.h | 27 +
drivers/net/bnxt/tf_core/tf_shadow_tbl.c | 766 +++++++++++++++++++++-
drivers/net/bnxt/tf_core/tf_shadow_tbl.h | 124 ++--
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 818 +++++++++++++++++++++++-
drivers/net/bnxt/tf_core/tf_shadow_tcam.h | 258 ++++----
drivers/net/bnxt/tf_core/tf_tbl.c | 246 ++++++-
drivers/net/bnxt/tf_core/tf_tbl.h | 22 +-
drivers/net/bnxt/tf_core/tf_tcam.c | 300 ++++++++-
drivers/net/bnxt/tf_core/tf_tcam.h | 31 +-
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 97 ++-
drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 18 +-
drivers/net/bnxt/tf_ulp/ulp_def_rules.c | 127 +++-
drivers/net/bnxt/tf_ulp/ulp_flow_db.c | 2 +-
drivers/net/bnxt/tf_ulp/ulp_flow_db.h | 2 +-
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 505 ++++++++++-----
drivers/net/bnxt/tf_ulp/ulp_port_db.c | 2 +
drivers/net/bnxt/tf_ulp/ulp_port_db.h | 1 +
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 81 +++
drivers/net/bnxt/tf_ulp/ulp_template_db_act.c | 434 ++++++++-----
drivers/net/bnxt/tf_ulp/ulp_template_db_class.c | 556 ++++++++++++++--
drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h | 73 ++-
drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c | 4 +-
drivers/net/bnxt/tf_ulp/ulp_template_struct.h | 8 +-
38 files changed, 4457 insertions(+), 850 deletions(-)
create mode 100644 drivers/net/bnxt/tf_core/tf_hash.c
create mode 100644 drivers/net/bnxt/tf_core/tf_hash.h
--
v1->v2: Fixed some typos in patch 9/20
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 01/20] net/bnxt: add shadow tcam capability with search
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 02/20] net/bnxt: nat global registers support Somnath Kotur
` (19 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
- Add TCAM shadow tables for searching
- Add Search API to allow reuse of TCAM entries
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Randy Schacher <stuart.schacher@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_core.c | 73 +++
drivers/net/bnxt/tf_core/tf_core.h | 101 ++++
drivers/net/bnxt/tf_core/tf_device_p4.c | 2 +-
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 885 +++++++++++++++++++++++++++++-
drivers/net/bnxt/tf_core/tf_shadow_tcam.h | 258 ++++-----
drivers/net/bnxt/tf_core/tf_tcam.c | 300 +++++++++-
drivers/net/bnxt/tf_core/tf_tcam.h | 31 +-
7 files changed, 1449 insertions(+), 201 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index 97e7952..ca3280b 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -608,6 +608,79 @@ tf_search_identifier(struct tf *tfp,
}
int
+tf_search_tcam_entry(struct tf *tfp,
+ struct tf_search_tcam_entry_parms *parms)
+{
+ int rc;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_tcam_alloc_search_parms sparms;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ memset(&sparms, 0, sizeof(struct tf_tcam_alloc_search_parms));
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ if (dev->ops->tf_dev_alloc_search_tcam == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ sparms.dir = parms->dir;
+ sparms.type = parms->tcam_tbl_type;
+ sparms.key = parms->key;
+ sparms.key_size = TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits);
+ sparms.mask = parms->mask;
+ sparms.priority = parms->priority;
+ sparms.alloc = parms->alloc;
+
+ /* Result is an in/out and so no need to copy during outputs */
+ sparms.result = parms->result;
+ sparms.result_size =
+ TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits);
+
+ rc = dev->ops->tf_dev_alloc_search_tcam(tfp, &sparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: TCAM allocation failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Copy the outputs */
+ parms->hit = sparms.hit;
+ parms->search_status = sparms.search_status;
+ parms->ref_cnt = sparms.ref_cnt;
+ parms->idx = sparms.idx;
+
+ return 0;
+}
+
+int
tf_alloc_tcam_entry(struct tf *tfp,
struct tf_alloc_tcam_entry_parms *parms)
{
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index 67415ad..349a1f1 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -291,6 +291,18 @@ enum tf_tcam_tbl_type {
};
/**
+ * TCAM SEARCH STATUS
+ */
+enum tf_tcam_search_status {
+ /** The entry was not found, but an idx was allocated if requested. */
+ MISS,
+ /** The entry was found, and the result/idx are valid */
+ HIT,
+ /** The entry was not found and the table is full */
+ REJECT
+};
+
+/**
* EM Resources
* These defines are provisioned during
* tf_open_session()
@@ -949,6 +961,8 @@ int tf_free_tbl_scope(struct tf *tfp,
/**
* @page tcam TCAM Access
*
+ * @ref tf_search_tcam_entry
+ *
* @ref tf_alloc_tcam_entry
*
* @ref tf_set_tcam_entry
@@ -958,6 +972,93 @@ int tf_free_tbl_scope(struct tf *tfp,
* @ref tf_free_tcam_entry
*/
+/**
+ * tf_search_tcam_entry parameter definition (experimental)
+ */
+struct tf_search_tcam_entry_parms {
+ /**
+ * [in] receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] TCAM table type
+ */
+ enum tf_tcam_tbl_type tcam_tbl_type;
+ /**
+ * [in] Key data to match on
+ */
+ uint8_t *key;
+ /**
+ * [in] key size in bits
+ */
+ uint16_t key_sz_in_bits;
+ /**
+ * [in] Mask data to match on
+ */
+ uint8_t *mask;
+ /**
+ * [in] Priority of entry requested (definition TBD)
+ */
+ uint32_t priority;
+ /**
+ * [in] Allocate on miss.
+ */
+ uint8_t alloc;
+ /**
+ * [out] Set if matching entry found
+ */
+ uint8_t hit;
+ /**
+ * [out] Search result status (hit, miss, reject)
+ */
+ enum tf_tcam_search_status search_status;
+ /**
+ * [out] Current refcnt after allocation
+ */
+ uint16_t ref_cnt;
+ /**
+ * [in out] The result data from the search is copied here
+ */
+ uint8_t *result;
+ /**
+ * [in out] result size in bits for the result data
+ */
+ uint16_t result_sz_in_bits;
+ /**
+ * [out] Index found
+ */
+ uint16_t idx;
+};
+
+/**
+ * search TCAM entry (experimental)
+ *
+ * Search for a TCAM entry
+ *
+ * This function searches the shadow copy of the TCAM table for a matching
+ * entry. Key and mask must match for hit to be set. Only TruFlow core data
+ * is accessed. If shadow_copy is not enabled, an error is returned.
+ *
+ * Implementation:
+ *
+ * A hash is performed on the key/mask data and mapped to a shadow copy entry
+ * where the full key/mask is populated. If the full key/mask matches the
+ * entry, hit is set, ref_cnt is incremented, and search_status indicates what
+ * action the caller can take regarding setting the entry.
+ *
+ * search_status should be used as follows:
+ * - On Miss, the caller should create a result and call tf_set_tcam_entry with
+ * returned index.
+ *
+ * - On Reject, the hash table is full and the entry cannot be added.
+ *
+ * - On Hit, the result data is returned to the caller. Additionally, the
+ * ref_cnt is updated.
+ *
+ * Also returns success or failure code.
+ */
+int tf_search_tcam_entry(struct tf *tfp,
+ struct tf_search_tcam_entry_parms *parms);
/**
* tf_alloc_tcam_entry parameter definition
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c
index f38c38e..afb6098 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c
@@ -133,7 +133,7 @@ const struct tf_dev_ops tf_dev_ops_p4 = {
.tf_dev_get_bulk_tbl = tf_tbl_bulk_get,
.tf_dev_alloc_tcam = tf_tcam_alloc,
.tf_dev_free_tcam = tf_tcam_free,
- .tf_dev_alloc_search_tcam = NULL,
+ .tf_dev_alloc_search_tcam = tf_tcam_alloc_search,
.tf_dev_set_tcam = tf_tcam_set,
.tf_dev_get_tcam = NULL,
.tf_dev_insert_int_em_entry = tf_em_insert_int_entry,
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index c61b833..51aae4f 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -3,61 +3,902 @@
* All rights reserved.
*/
-#include <rte_common.h>
-
+#include "tf_common.h"
+#include "tf_util.h"
+#include "tfp.h"
#include "tf_shadow_tcam.h"
/**
- * Shadow tcam DB element
+ * The implementation includes 3 tables per tcam table type.
+ * - hash table
+ * - sized so that a minimum of 4 slots per shadow entry are available to
+ * minimize the likelihood of collisions.
+ * - shadow key table
+ * - sized to the number of entries requested and is directly indexed
+ * - the index is zero based and is the tcam index - the base address
+ * - the key and mask are stored in the key table.
+ * - The stored key is the AND of the key/mask in order to eliminate the need
+ * to compare both the key and mask.
+ * - shadow result table
+ * - the result table is stored separately since it only needs to be accessed
+ * when the key matches.
+ * - the result has a back pointer to the hash table via the hb handle. The
+ * hb handle is a 32 bit represention of the hash with a valid bit, bucket
+ * element index, and the hash index. It is necessary to store the hb handle
+ * with the result since subsequent removes only provide the tcam index.
+ *
+ * - Max entries is limited in the current implementation since bit 15 is the
+ * valid bit in the hash table.
+ * - A 16bit hash is calculated and masked based on the number of entries
+ * - 64b wide bucket is used and broken into 4x16bit elements.
+ * This decision is based on quicker bucket scanning to determine if any
+ * elements are in use.
+ * - bit 15 of each bucket element is the valid, this is done to prevent having
+ * to read the larger key/result data for determining VALID. It also aids
+ * in the more efficient scanning of the bucket for slot usage.
*/
-struct tf_shadow_tcam_element {
- /**
- * Hash table
- */
- void *hash;
- /**
- * Reference count, array of number of tcam entries
- */
- uint16_t *ref_count;
+/*
+ * The maximum number of shadow entries supported. The value also doubles as
+ * the maximum number of hash buckets. There are only 15 bits of data per
+ * bucket to point to the shadow tables.
+ */
+#define TF_SHADOW_TCAM_ENTRIES_MAX (1 << 15)
+
+/* The number of elements(BE) per hash bucket (HB) */
+#define TF_SHADOW_TCAM_HB_NUM_ELEM (4)
+#define TF_SHADOW_TCAM_BE_VALID (1 << 15)
+#define TF_SHADOW_TCAM_BE_IS_VALID(be) (((be) & TF_SHADOW_TCAM_BE_VALID) != 0)
+
+/**
+ * The hash bucket handle is 32b
+ * - bit 31, the Valid bit
+ * - bit 29-30, the element
+ * - bits 0-15, the hash idx (is masked based on the allocated size)
+ */
+#define TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
+#define TF_SHADOW_TCAM_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
+ ((be) << 29) | (idx))
+
+#define TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
+ (TF_SHADOW_TCAM_HB_NUM_ELEM - 1))
+
+#define TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
+ (ctxt)->hash_ctxt.hid_mask)
+
+/**
+ * The idx provided by the caller is within a region, so currently the base is
+ * either added or subtracted from the idx to ensure it can be used as a
+ * compressed index
+ */
+
+/* Convert the tcam index to a shadow index */
+#define TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Convert the shadow index to a tcam index */
+#define TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Simple helper masks for clearing en element from the bucket */
+#define TF_SHADOW_TCAM_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
+#define TF_SHADOW_TCAM_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
+#define TF_SHADOW_TCAM_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
+#define TF_SHADOW_TCAM_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
+
+/**
+ * This should be coming from external, but for now it is assumed that no key
+ * is greater than 1K bits and no result is bigger than 128 bits. This makes
+ * allocation of the hash table easier without having to allocate on the fly.
+ */
+#define TF_SHADOW_TCAM_MAX_KEY_SZ 128
+#define TF_SHADOW_TCAM_MAX_RESULT_SZ 16
+
+/*
+ * Local only defines for the internal data.
+ */
+
+/**
+ * tf_shadow_tcam_shadow_key_entry is the key/mask entry of the key table.
+ * The key stored in the table is the masked version of the key. This is done
+ * to eliminate the need of comparing both the key and mask.
+ */
+struct tf_shadow_tcam_shadow_key_entry {
+ uint8_t key[TF_SHADOW_TCAM_MAX_KEY_SZ];
+ uint8_t mask[TF_SHADOW_TCAM_MAX_KEY_SZ];
};
/**
- * Shadow tcam DB definition
+ * tf_shadow_tcam_shadow_result_entry is the result table entry.
+ * The result table writes are broken into two phases:
+ * - The search phase, which stores the hb_handle and key size and
+ * - The set phase, which writes the result, refcnt, and result size
+ */
+struct tf_shadow_tcam_shadow_result_entry {
+ uint8_t result[TF_SHADOW_TCAM_MAX_RESULT_SZ];
+ uint16_t result_size;
+ uint16_t key_size;
+ uint32_t refcnt;
+ uint32_t hb_handle;
+};
+
+/**
+ * tf_shadow_tcam_shadow_ctxt holds all information for accessing the key and
+ * result tables.
+ */
+struct tf_shadow_tcam_shadow_ctxt {
+ struct tf_shadow_tcam_shadow_key_entry *sh_key_tbl;
+ struct tf_shadow_tcam_shadow_result_entry *sh_res_tbl;
+ uint32_t base_addr;
+ uint16_t num_entries;
+ uint16_t alloc_idx;
+};
+
+/**
+ * tf_shadow_tcam_hash_ctxt holds all information related to accessing the hash
+ * table.
+ */
+struct tf_shadow_tcam_hash_ctxt {
+ uint64_t *hashtbl;
+ uint16_t hid_mask;
+ uint16_t hash_entries;
+};
+
+/**
+ * tf_shadow_tcam_ctxt holds the hash and shadow tables for the current shadow
+ * tcam db. This structure is per tcam table type as each tcam table has it's
+ * own shadow and hash table.
+ */
+struct tf_shadow_tcam_ctxt {
+ struct tf_shadow_tcam_shadow_ctxt shadow_ctxt;
+ struct tf_shadow_tcam_hash_ctxt hash_ctxt;
+};
+
+/**
+ * tf_shadow_tcam_db is the allocated db structure returned as an opaque
+ * void * pointer to the caller during create db. It holds the pointers for
+ * each tcam associated with the db.
*/
struct tf_shadow_tcam_db {
- /**
- * The DB consists of an array of elements
- */
- struct tf_shadow_tcam_element *db;
+ /* Each context holds the shadow and hash table information */
+ struct tf_shadow_tcam_ctxt *ctxt[TF_TCAM_TBL_TYPE_MAX];
+};
+
+/* CRC polynomial 0xedb88320 */
+static const uint32_t tf_shadow_tcam_crc32tbl[] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
+/**
+ * Returns the number of entries in the contexts shadow table.
+ */
+static inline uint16_t
+tf_shadow_tcam_sh_num_entries_get(struct tf_shadow_tcam_ctxt *ctxt)
+{
+ return ctxt->shadow_ctxt.num_entries;
+}
+
+/**
+ * Compare the give key with the key in the shadow table.
+ *
+ * Returns 0 if the keys match
+ */
+static int
+tf_shadow_tcam_key_cmp(struct tf_shadow_tcam_ctxt *ctxt,
+ uint8_t *key,
+ uint8_t *mask,
+ uint16_t sh_idx,
+ uint16_t size)
+{
+ if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
+ sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !key || !mask)
+ return -1;
+
+ return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
+}
+
+/**
+ * Copies the shadow result to the result.
+ *
+ * Returns 0 on failure
+ */
+static void *
+tf_shadow_tcam_res_cpy(struct tf_shadow_tcam_ctxt *ctxt,
+ uint8_t *result,
+ uint16_t sh_idx,
+ uint16_t size)
+{
+ if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !result)
+ return 0;
+
+ if (ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result_size != size)
+ return 0;
+
+ return memcpy(result,
+ ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result,
+ size);
+}
+
+/**
+ * Using a software based CRC function for now, but will look into using hw
+ * assisted in the future.
+ */
+static uint32_t
+tf_shadow_tcam_crc32_calc(uint8_t *key, uint32_t len)
+{
+ uint32_t crc = ~0U;
+
+ while (len--)
+ crc = tf_shadow_tcam_crc32tbl[(crc ^ key[len]) & 0xff] ^
+ (crc >> 8);
+
+ return ~crc;
+}
+
+/**
+ * Free the memory associated with the context.
+ */
+static void
+tf_shadow_tcam_ctxt_delete(struct tf_shadow_tcam_ctxt *ctxt)
+{
+ if (!ctxt)
+ return;
+
+ tfp_free(ctxt->hash_ctxt.hashtbl);
+ tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
+ tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
+}
+
+/**
+ * The TF Shadow TCAM context is per TCAM and holds all information relating to
+ * managing the shadow and search capability. This routine allocated data that
+ * needs to be deallocated by the tf_shadow_tcam_ctxt_delete prior when deleting
+ * the shadow db.
+ */
+static int
+tf_shadow_tcam_ctxt_create(struct tf_shadow_tcam_ctxt *ctxt,
+ uint16_t num_entries,
+ uint16_t base_addr)
+{
+ struct tfp_calloc_parms cparms;
+ uint16_t hash_size = 1;
+ uint16_t hash_mask;
+ int rc;
+
+ /* Hash table is a power of two that holds the number of entries */
+ if (num_entries > TF_SHADOW_TCAM_ENTRIES_MAX) {
+ TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
+ num_entries,
+ TF_SHADOW_TCAM_ENTRIES_MAX);
+ return -ENOMEM;
+ }
+
+ while (hash_size < num_entries)
+ hash_size = hash_size << 1;
+
+ hash_mask = hash_size - 1;
+
+ /* Allocate the hash table */
+ cparms.nitems = hash_size;
+ cparms.size = sizeof(uint64_t);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->hash_ctxt.hashtbl = cparms.mem_va;
+ ctxt->hash_ctxt.hid_mask = hash_mask;
+ ctxt->hash_ctxt.hash_entries = hash_size;
+
+ /* allocate the shadow tables */
+ /* allocate the shadow key table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tcam_shadow_key_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
+
+ /* allocate the shadow result table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tcam_shadow_result_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
+
+ ctxt->shadow_ctxt.num_entries = num_entries;
+ ctxt->shadow_ctxt.base_addr = base_addr;
+
+ return 0;
+error:
+ tf_shadow_tcam_ctxt_delete(ctxt);
+
+ return -ENOMEM;
+}
+
+/**
+ * Get a shadow TCAM context given the db and the TCAM type
+ */
+static struct tf_shadow_tcam_ctxt *
+tf_shadow_tcam_ctxt_get(struct tf_shadow_tcam_db *shadow_db,
+ enum tf_tcam_tbl_type type)
+{
+ if (type >= TF_TCAM_TBL_TYPE_MAX ||
+ !shadow_db ||
+ !shadow_db->ctxt[type])
+ return NULL;
+
+ return shadow_db->ctxt[type];
+}
+
+/**
+ * Sets the hash entry into the table given the TCAM context, hash bucket
+ * handle, and shadow index.
+ */
+static inline int
+tf_shadow_tcam_set_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
+ uint32_t hb_handle,
+ uint16_t sh_idx)
+{
+ uint16_t hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ uint16_t be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
+ uint64_t entry = sh_idx | TF_SHADOW_TCAM_BE_VALID;
+
+ if (hid >= ctxt->hash_ctxt.hash_entries)
+ return -EINVAL;
+
+ ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
+ return 0;
+}
+
+/**
+ * Clears the hash entry given the TCAM context and hash bucket handle.
+ */
+static inline void
+tf_shadow_tcam_clear_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
+ uint32_t hb_handle)
+{
+ uint16_t hid, be;
+ uint64_t *bucket;
+
+ if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hb_handle))
+ return;
+
+ hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
+ bucket = &ctxt->hash_ctxt.hashtbl[hid];
+
+ switch (be) {
+ case 0:
+ *bucket = TF_SHADOW_TCAM_BE0_MASK_CLEAR(*bucket);
+ break;
+ case 1:
+ *bucket = TF_SHADOW_TCAM_BE1_MASK_CLEAR(*bucket);
+ break;
+ case 2:
+ *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
+ break;
+ case 3:
+ *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
+ break;
+ }
+}
+
+/**
+ * Clears the shadow key and result entries given the TCAM context and
+ * shadow index.
+ */
+static void
+tf_shadow_tcam_clear_sh_entry(struct tf_shadow_tcam_ctxt *ctxt,
+ uint16_t sh_idx)
+{
+ struct tf_shadow_tcam_shadow_key_entry *sk_entry;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+
+ if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt))
+ return;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
+
+ /*
+ * memset key/result to zero for now, possibly leave the data alone
+ * in the future and rely on the valid bit in the hash table.
+ */
+ memset(sk_entry, 0, sizeof(struct tf_shadow_tcam_shadow_key_entry));
+ memset(sr_entry, 0, sizeof(struct tf_shadow_tcam_shadow_result_entry));
+}
+
+/**
+ * Binds the allocated tcam index with the hash and shadow tables.
+ * The entry will be incomplete until the set has happened with the result
+ * data.
+ */
int
-tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms __rte_unused)
+tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms)
{
+ int rc;
+ int i;
+ uint16_t idx, klen;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_shadow_tcam_db *shadow_db;
+ struct tf_shadow_tcam_shadow_key_entry *sk_entry;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+ uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
+
+ if (!parms || !TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(parms->hb_handle) ||
+ !parms->key || !parms->mask) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, parms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tcam_tbl_2_str(parms->type));
+ return -EINVAL;
+ }
+
+ memset(tkey, 0, sizeof(tkey));
+ idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, parms->idx);
+ klen = parms->key_size;
+ if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) ||
+ klen > TF_SHADOW_TCAM_MAX_KEY_SZ) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
+ tf_dir_2_str(parms->dir),
+ tf_tcam_tbl_2_str(parms->type),
+ klen,
+ TF_SHADOW_TCAM_MAX_KEY_SZ, idx);
+
+ return -EINVAL;
+ }
+
+ rc = tf_shadow_tcam_set_hash_entry(ctxt, parms->hb_handle, idx);
+ if (rc)
+ return -EINVAL;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /*
+ * Write the masked key to the table for more efficient comparisons
+ * later.
+ */
+ for (i = 0; i < klen; i++)
+ tkey[i] = parms->key[i] & parms->mask[i];
+
+ memcpy(sk_entry->key, tkey, klen);
+ memcpy(sk_entry->mask, parms->mask, klen);
+
+ /* Write the result table */
+ sr_entry->key_size = parms->key_size;
+ sr_entry->hb_handle = parms->hb_handle;
+
return 0;
}
+/**
+ * Deletes hash/shadow information if no more references.
+ *
+ * Returns 0 - The caller should delete the tcam entry in hardware.
+ * Returns non-zero - The number of references to the entry
+ */
int
-tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms __rte_unused)
+tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms)
{
+ uint16_t idx;
+ uint32_t hb_handle;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_shadow_tcam_db *shadow_db;
+ struct tf_tcam_free_parms *fparms;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->fparms) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ fparms = parms->fparms;
+
+ /*
+ * Initialize the reference count to zero. It will only be changed if
+ * non-zero.
+ */
+ fparms->ref_cnt = 0;
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, fparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tcam_tbl_2_str(fparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, fparms->idx);
+ if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
+ tf_tcam_tbl_2_str(fparms->type),
+ fparms->idx,
+ tf_shadow_tcam_sh_num_entries_get(ctxt));
+ return 0;
+ }
+
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+ if (sr_entry->refcnt <= 1) {
+ hb_handle = sr_entry->hb_handle;
+ tf_shadow_tcam_clear_hash_entry(ctxt, hb_handle);
+ tf_shadow_tcam_clear_sh_entry(ctxt, idx);
+ } else {
+ sr_entry->refcnt--;
+ fparms->ref_cnt = sr_entry->refcnt;
+ }
+
return 0;
}
int
-tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms __rte_unused)
+tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms)
{
+ uint16_t len;
+ uint8_t rcopy;
+ uint64_t bucket;
+ uint32_t i, hid32;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_shadow_tcam_db *shadow_db;
+ uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
+ struct tf_tcam_alloc_search_parms *sparms;
+ uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
+ uint32_t be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "tcam search with invalid parms\n");
+ return -EINVAL;
+ }
+
+ memset(tkey, 0, sizeof(tkey));
+ sparms = parms->sparms;
+
+ /* Initialize return values to invalid */
+ sparms->hit = 0;
+ sparms->search_status = REJECT;
+ parms->hb_handle = 0;
+ sparms->ref_cnt = 0;
+ /* see if caller wanted the result */
+ rcopy = sparms->result && sparms->result_size;
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(ERR, "%s Unable to get tcam mgr context\n",
+ tf_tcam_tbl_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ hid_mask = ctxt->hash_ctxt.hid_mask;
+
+ len = sparms->key_size;
+
+ if (len > TF_SHADOW_TCAM_MAX_KEY_SZ ||
+ !sparms->key || !sparms->mask || !len) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p : %p\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type),
+ len,
+ sparms->key,
+ sparms->mask);
+ return -EINVAL;
+ }
+
+ /* Combine the key and mask */
+ for (i = 0; i < len; i++)
+ tkey[i] = sparms->key[i] & sparms->mask[i];
+
+ /*
+ * Calculate the crc32
+ * Fold it to create a 16b value
+ * Reduce it to fit the table
+ */
+ hid32 = tf_shadow_tcam_crc32_calc(tkey, len);
+ hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
+ hb_idx = hid16 & hid_mask;
+
+ bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
+
+ if (!bucket) {
+ /* empty bucket means a miss and available entry */
+ sparms->search_status = MISS;
+ parms->hb_handle = TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, 0);
+ sparms->idx = 0;
+ return 0;
+ }
+
+ /* Set the avail to max so we can detect when there is an avail entry */
+ be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
+ for (i = 0; i < TF_SHADOW_TCAM_HB_NUM_ELEM; i++) {
+ shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
+ be_valid = TF_SHADOW_TCAM_BE_IS_VALID(shtbl_idx);
+ if (!be_valid) {
+ /* The element is avail, keep going */
+ be_avail = i;
+ continue;
+ }
+ /* There is a valid entry, compare it */
+ shtbl_key = shtbl_idx & ~TF_SHADOW_TCAM_BE_VALID;
+ if (!tf_shadow_tcam_key_cmp(ctxt,
+ sparms->key,
+ sparms->mask,
+ shtbl_key,
+ sparms->key_size)) {
+ /*
+ * It matches, increment the ref count if the caller
+ * requested allocation and return the info
+ */
+ if (sparms->alloc)
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt + 1;
+
+ sparms->hit = 1;
+ sparms->search_status = HIT;
+ parms->hb_handle =
+ TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, i);
+ sparms->idx = TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt,
+ shtbl_key);
+ sparms->ref_cnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
+
+ /* copy the result, if caller wanted it. */
+ if (rcopy &&
+ !tf_shadow_tcam_res_cpy(ctxt,
+ sparms->result,
+ shtbl_key,
+ sparms->result_size)) {
+ /*
+ * Should never get here, possible memory
+ * corruption or something unexpected.
+ */
+ TFP_DRV_LOG(ERR, "Error copying result\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+ }
+
+ /* No hits, return avail entry if exists */
+ if (be_avail < TF_SHADOW_TCAM_HB_NUM_ELEM) {
+ parms->hb_handle =
+ TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, be_avail);
+ sparms->search_status = MISS;
+ sparms->hit = 0;
+ sparms->idx = 0;
+ } else {
+ sparms->search_status = REJECT;
+ }
+
return 0;
}
int
-tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms __rte_unused)
+tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms)
{
+ uint16_t idx;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_tcam_set_parms *sparms;
+ struct tf_shadow_tcam_db *shadow_db;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "Null parms\n");
+ return -EINVAL;
+ }
+
+ sparms = parms->sparms;
+ if (!sparms->result || !sparms->result_size) {
+ TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ /* We aren't tracking this table, so return success */
+ TFP_DRV_LOG(DEBUG, "%s Unable to get tcam mgr context\n",
+ tf_tcam_tbl_2_str(sparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, sparms->idx);
+ if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type),
+ sparms->idx);
+ return -EINVAL;
+ }
+
+ /* Write the result table, the key/hash has been written already */
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /*
+ * If the handle is not valid, the bind was never called. We aren't
+ * tracking this entry.
+ */
+ if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
+ return 0;
+
+ if (sparms->result_size > TF_SHADOW_TCAM_MAX_RESULT_SZ) {
+ TFP_DRV_LOG(ERR, "%s:%s Result length %d > %d\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type),
+ sparms->result_size,
+ TF_SHADOW_TCAM_MAX_RESULT_SZ);
+ return -EINVAL;
+ }
+
+ memcpy(sr_entry->result, sparms->result, sparms->result_size);
+ sr_entry->result_size = sparms->result_size;
+ sr_entry->refcnt = 1;
+
return 0;
}
int
-tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms __rte_unused)
+tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms)
+{
+ struct tf_shadow_tcam_db *shadow_db;
+ int i;
+
+ TF_CHECK_PARMS1(parms);
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ if (!shadow_db) {
+ TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
+ return 0;
+}
+
+/**
+ * Allocate the TCAM resources for search and allocate
+ *
+ */
+int tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms)
{
+ int rc;
+ int i;
+ uint16_t base;
+ struct tfp_calloc_parms cparms;
+ struct tf_shadow_tcam_db *shadow_db = NULL;
+
+ TF_CHECK_PARMS1(parms);
+
+ /* Build the shadow DB per the request */
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tcam_db);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+ shadow_db = (void *)cparms.mem_va;
+
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ /* If the element didn't request an allocation no need
+ * to create a pool nor verify if we got a reservation.
+ */
+ if (!parms->cfg->alloc_cnt[i]) {
+ shadow_db->ctxt[i] = NULL;
+ continue;
+ }
+
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tcam_ctxt);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+
+ shadow_db->ctxt[i] = cparms.mem_va;
+ base = parms->cfg->base_addr[i];
+ rc = tf_shadow_tcam_ctxt_create(shadow_db->ctxt[i],
+ parms->cfg->alloc_cnt[i],
+ base);
+ if (rc)
+ goto error;
+ }
+
+ *parms->shadow_db = (void *)shadow_db;
+
+ TFP_DRV_LOG(INFO,
+ "TF SHADOW TCAM - initialized\n");
+
return 0;
+error:
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
+ return -ENOMEM;
}
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.h b/drivers/net/bnxt/tf_core/tf_shadow_tcam.h
index e2c4e06..75c146a 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.h
@@ -8,232 +8,188 @@
#include "tf_core.h"
-struct tf;
-
-/**
- * The Shadow tcam module provides shadow DB handling for tcam based
- * TF types. A shadow DB provides the capability that allows for reuse
- * of TF resources.
- *
- * A Shadow tcam DB is intended to be used by the Tcam module only.
- */
-
/**
- * Shadow DB configuration information for a single tcam type.
- *
- * During Device initialization the HCAPI device specifics are learned
- * and as well as the RM DB creation. From that those initial steps
- * this structure can be populated.
+ * Shadow DB configuration information
*
- * NOTE:
- * If used in an array of tcam types then such array must be ordered
- * by the TF type is represents.
+ * The shadow configuration is for all tcam table types for a direction
*/
struct tf_shadow_tcam_cfg_parms {
/**
- * TF tcam type
+ * [in] The number of elements in the alloc_cnt and base_addr
+ * For now, it should always be equal to TF_TCAM_TBL_TYPE_MAX
*/
- enum tf_tcam_tbl_type type;
-
+ int num_entries;
/**
- * Number of entries the Shadow DB needs to hold
+ * [in] Resource allocation count array
+ * This array content originates from the tf_session_resources
+ * that is passed in on session open
+ * Array size is TF_TCAM_TBL_TYPE_MAX
*/
- int num_entries;
-
+ uint16_t *alloc_cnt;
/**
- * Element width for this table type
+ * [in] The base index for each tcam table
*/
- int element_width;
+ uint16_t base_addr[TF_TCAM_TBL_TYPE_MAX];
};
/**
- * Shadow tcam DB creation parameters
+ * Shadow TCAM DB creation parameters. The shadow db for this direction
+ * is returned
*/
struct tf_shadow_tcam_create_db_parms {
/**
- * [in] Configuration information for the shadow db
+ * [in] Receive or transmit direction
*/
- struct tf_shadow_tcam_cfg_parms *cfg;
+ enum tf_dir dir;
/**
- * [in] Number of elements in the parms structure
+ * [in] Configuration information for the shadow db
*/
- uint16_t num_elements;
+ struct tf_shadow_tcam_cfg_parms *cfg;
/**
* [out] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
+ void **shadow_db;
};
/**
- * Shadow tcam DB free parameters
+ * Create the shadow db for a single direction
+ *
+ * The returned shadow db must be free using the free db API when no longer
+ * needed
*/
-struct tf_shadow_tcam_free_db_parms {
- /**
- * Shadow tcam DB handle
- */
- void *tf_shadow_tcam_db;
-};
+int
+tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms);
/**
- * Shadow tcam search parameters
+ * Shadow TCAM free parameters
*/
-struct tf_shadow_tcam_search_parms {
+struct tf_shadow_tcam_free_db_parms {
/**
* [in] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
- /**
- * [in] TCAM tbl type
- */
- enum tf_tcam_tbl_type type;
- /**
- * [in] Pointer to entry blob value in remap table to match
- */
- uint8_t *entry;
- /**
- * [in] Size of the entry blob passed in bytes
- */
- uint16_t entry_sz;
- /**
- * [out] Index of the found element returned if hit
- */
- uint16_t *index;
- /**
- * [out] Reference count incremented if hit
- */
- uint16_t *ref_cnt;
+ void *shadow_db;
};
/**
- * Shadow tcam insert parameters
+ * Free all resources associated with the shadow db
+ */
+int
+tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms);
+
+/**
+ * Shadow TCAM bind index parameters
*/
-struct tf_shadow_tcam_insert_parms {
+struct tf_shadow_tcam_bind_index_parms {
/**
* [in] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
+ void *shadow_db;
/**
- * [in] TCAM tbl type
+ * [in] receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] TCAM table type
*/
enum tf_tcam_tbl_type type;
/**
- * [in] Pointer to entry blob value in remap table to match
+ * [in] index of the entry to program
*/
- uint8_t *entry;
+ uint16_t idx;
/**
- * [in] Size of the entry blob passed in bytes
+ * [in] struct containing key
*/
- uint16_t entry_sz;
+ uint8_t *key;
/**
- * [in] Entry to update
+ * [in] struct containing mask fields
*/
- uint16_t index;
+ uint8_t *mask;
/**
- * [out] Reference count after insert
+ * [in] key size in bits (if search)
*/
- uint16_t *ref_cnt;
+ uint16_t key_size;
+ /**
+ * [in] The hash bucket handled returned from the search
+ */
+ uint32_t hb_handle;
};
/**
- * Shadow tcam remove parameters
+ * Binds the allocated tcam index with the hash and shadow tables
*/
-struct tf_shadow_tcam_remove_parms {
+int
+tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms);
+
+/**
+ * Shadow TCAM insert parameters
+ */
+struct tf_shadow_tcam_insert_parms {
/**
* [in] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
- /**
- * [in] TCAM tbl type
- */
- enum tf_tcam_tbl_type type;
- /**
- * [in] Entry to update
- */
- uint16_t index;
+ void *shadow_db;
/**
- * [out] Reference count after removal
+ * [in] The set parms from tf core
*/
- uint16_t *ref_cnt;
+ struct tf_tcam_set_parms *sparms;
};
/**
- * @page shadow_tcam Shadow tcam DB
- *
- * @ref tf_shadow_tcam_create_db
- *
- * @ref tf_shadow_tcam_free_db
- *
- * @reg tf_shadow_tcam_search
- *
- * @reg tf_shadow_tcam_insert
- *
- * @reg tf_shadow_tcam_remove
- */
-
-/**
- * Creates and fills a Shadow tcam DB. The DB is indexed per the
- * parms structure.
- *
- * [in] parms
- * Pointer to create db parameters
+ * Set the entry into the tcam manager hash and shadow tables
*
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * The search must have been used prior to setting the entry so that the
+ * hash has been calculated and duplicate entries will not be added
*/
-int tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms);
+int
+tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms);
/**
- * Closes the Shadow tcam DB and frees all allocated
- * resources per the associated database.
- *
- * [in] parms
- * Pointer to the free DB parameters
- *
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * Shadow TCAM remove parameters
*/
-int tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms);
+struct tf_shadow_tcam_remove_parms {
+ /**
+ * [in] Shadow tcam DB handle
+ */
+ void *shadow_db;
+ /**
+ * [inout] The set parms from tf core
+ */
+ struct tf_tcam_free_parms *fparms;
+};
/**
- * Search Shadow tcam db for matching result
- *
- * [in] parms
- * Pointer to the search parameters
+ * Remove the entry from the tcam hash and shadow tables
*
- * Returns
- * - (0) if successful, element was found.
- * - (-EINVAL) on failure.
+ * The search must have been used prior to setting the entry so that the
+ * hash has been calculated and duplicate entries will not be added
*/
-int tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms);
+int
+tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms);
/**
- * Inserts an element into the Shadow tcam DB. Will fail if the
- * elements ref_count is different from 0. Ref_count after insert will
- * be incremented.
- *
- * [in] parms
- * Pointer to insert parameters
- *
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * Shadow TCAM search parameters
*/
-int tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms);
+struct tf_shadow_tcam_search_parms {
+ /**
+ * [in] Shadow tcam DB handle
+ */
+ void *shadow_db;
+ /**
+ * [inout] The search parameters from tf core
+ */
+ struct tf_tcam_alloc_search_parms *sparms;
+ /**
+ * [out] The hash handle to use for the set
+ */
+ uint32_t hb_handle;
+};
/**
- * Removes an element from the Shadow tcam DB. Will fail if the
- * elements ref_count is 0. Ref_count after removal will be
- * decremented.
+ * Search for an entry in the tcam hash/shadow tables
*
- * [in] parms
- * Pointer to remove parameter
- *
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * If there is a miss, but there is room for insertion, the hb_handle returned
+ * is used for insertion during the bind index API
*/
-int tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms);
-
-#endif /* _TF_SHADOW_TCAM_H_ */
+int
+tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms);
+#endif
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.c b/drivers/net/bnxt/tf_core/tf_tcam.c
index cbfaa94..7679d09 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_tcam.c
@@ -14,6 +14,7 @@
#include "tfp.h"
#include "tf_session.h"
#include "tf_msg.h"
+#include "tf_shadow_tcam.h"
struct tf;
@@ -25,7 +26,7 @@ static void *tcam_db[TF_DIR_MAX];
/**
* TCAM Shadow DBs
*/
-/* static void *shadow_tcam_db[TF_DIR_MAX]; */
+static void *shadow_tcam_db[TF_DIR_MAX];
/**
* Init flag, set on bind and cleared on unbind
@@ -35,16 +36,22 @@ static uint8_t init;
/**
* Shadow init flag, set on bind and cleared on unbind
*/
-/* static uint8_t shadow_init; */
+static uint8_t shadow_init;
int
tf_tcam_bind(struct tf *tfp,
struct tf_tcam_cfg_parms *parms)
{
int rc;
- int i;
+ int i, d;
+ struct tf_rm_alloc_info info;
+ struct tf_rm_free_db_parms fparms;
+ struct tf_rm_create_db_parms db_cfg;
struct tf_tcam_resources *tcam_cnt;
- struct tf_rm_create_db_parms db_cfg = { 0 };
+ struct tf_shadow_tcam_free_db_parms fshadow;
+ struct tf_rm_get_alloc_info_parms ainfo;
+ struct tf_shadow_tcam_cfg_parms shadow_cfg;
+ struct tf_shadow_tcam_create_db_parms shadow_cdb;
TF_CHECK_PARMS2(tfp, parms);
@@ -62,29 +69,91 @@ tf_tcam_bind(struct tf *tfp,
return -EINVAL;
}
+ memset(&db_cfg, 0, sizeof(db_cfg));
+
db_cfg.type = TF_DEVICE_MODULE_TYPE_TCAM;
db_cfg.num_elements = parms->num_elements;
db_cfg.cfg = parms->cfg;
- for (i = 0; i < TF_DIR_MAX; i++) {
- db_cfg.dir = i;
- db_cfg.alloc_cnt = parms->resources->tcam_cnt[i].cnt;
- db_cfg.rm_db = &tcam_db[i];
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ db_cfg.dir = d;
+ db_cfg.alloc_cnt = parms->resources->tcam_cnt[d].cnt;
+ db_cfg.rm_db = &tcam_db[d];
rc = tf_rm_create_db(tfp, &db_cfg);
if (rc) {
TFP_DRV_LOG(ERR,
"%s: TCAM DB creation failed\n",
- tf_dir_2_str(i));
+ tf_dir_2_str(d));
return rc;
}
}
+ /* Initialize the TCAM manager. */
+ if (parms->shadow_copy) {
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ memset(&shadow_cfg, 0, sizeof(shadow_cfg));
+ memset(&shadow_cdb, 0, sizeof(shadow_cdb));
+ /* Get the base addresses of the tcams for tcam mgr */
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ memset(&info, 0, sizeof(info));
+
+ if (!parms->resources->tcam_cnt[d].cnt[i])
+ continue;
+ ainfo.rm_db = tcam_db[d];
+ ainfo.db_index = i;
+ ainfo.info = &info;
+ rc = tf_rm_get_info(&ainfo);
+ if (rc)
+ goto error;
+
+ shadow_cfg.base_addr[i] = info.entry.start;
+ }
+
+ /* Create the shadow db */
+ shadow_cfg.alloc_cnt =
+ parms->resources->tcam_cnt[d].cnt;
+ shadow_cfg.num_entries = parms->num_elements;
+
+ shadow_cdb.shadow_db = &shadow_tcam_db[d];
+ shadow_cdb.cfg = &shadow_cfg;
+ rc = tf_shadow_tcam_create_db(&shadow_cdb);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "TCAM MGR DB creation failed "
+ "rc=%d\n", rc);
+ goto error;
+ }
+ }
+ shadow_init = 1;
+ }
+
init = 1;
TFP_DRV_LOG(INFO,
"TCAM - initialized\n");
return 0;
+error:
+ for (i = 0; i < TF_DIR_MAX; i++) {
+ memset(&fparms, 0, sizeof(fparms));
+ fparms.dir = i;
+ fparms.rm_db = tcam_db[i];
+ /* Ignoring return here since we are in the error case */
+ (void)tf_rm_free_db(tfp, &fparms);
+
+ if (parms->shadow_copy) {
+ fshadow.shadow_db = shadow_tcam_db[i];
+ tf_shadow_tcam_free_db(&fshadow);
+ shadow_tcam_db[i] = NULL;
+ }
+
+ tcam_db[i] = NULL;
+ }
+
+ shadow_init = 0;
+ init = 0;
+
+ return rc;
}
int
@@ -92,7 +161,8 @@ tf_tcam_unbind(struct tf *tfp)
{
int rc;
int i;
- struct tf_rm_free_db_parms fparms = { 0 };
+ struct tf_rm_free_db_parms fparms;
+ struct tf_shadow_tcam_free_db_parms fshadow;
TF_CHECK_PARMS1(tfp);
@@ -104,6 +174,7 @@ tf_tcam_unbind(struct tf *tfp)
}
for (i = 0; i < TF_DIR_MAX; i++) {
+ memset(&fparms, 0, sizeof(fparms));
fparms.dir = i;
fparms.rm_db = tcam_db[i];
rc = tf_rm_free_db(tfp, &fparms);
@@ -111,8 +182,17 @@ tf_tcam_unbind(struct tf *tfp)
return rc;
tcam_db[i] = NULL;
+
+ if (shadow_init) {
+ memset(&fshadow, 0, sizeof(fshadow));
+
+ fshadow.shadow_db = shadow_tcam_db[i];
+ tf_shadow_tcam_free_db(&fshadow);
+ shadow_tcam_db[i] = NULL;
+ }
}
+ shadow_init = 0;
init = 0;
return 0;
@@ -125,7 +205,7 @@ tf_tcam_alloc(struct tf *tfp,
int rc;
struct tf_session *tfs;
struct tf_dev_info *dev;
- struct tf_rm_allocate_parms aparms = { 0 };
+ struct tf_rm_allocate_parms aparms;
uint16_t num_slice_per_row = 1;
TF_CHECK_PARMS2(tfp, parms);
@@ -165,6 +245,8 @@ tf_tcam_alloc(struct tf *tfp,
return rc;
/* Allocate requested element */
+ memset(&aparms, 0, sizeof(aparms));
+
aparms.rm_db = tcam_db[parms->dir];
aparms.db_index = parms->type;
aparms.priority = parms->priority;
@@ -202,11 +284,12 @@ tf_tcam_free(struct tf *tfp,
int rc;
struct tf_session *tfs;
struct tf_dev_info *dev;
- struct tf_rm_is_allocated_parms aparms = { 0 };
- struct tf_rm_free_parms fparms = { 0 };
- struct tf_rm_get_hcapi_parms hparms = { 0 };
+ struct tf_rm_is_allocated_parms aparms;
+ struct tf_rm_free_parms fparms;
+ struct tf_rm_get_hcapi_parms hparms;
uint16_t num_slice_per_row = 1;
int allocated = 0;
+ struct tf_shadow_tcam_remove_parms shparms;
TF_CHECK_PARMS2(tfp, parms);
@@ -245,6 +328,8 @@ tf_tcam_free(struct tf *tfp,
return rc;
/* Check if element is in use */
+ memset(&aparms, 0, sizeof(aparms));
+
aparms.rm_db = tcam_db[parms->dir];
aparms.db_index = parms->type;
aparms.index = parms->idx / num_slice_per_row;
@@ -262,7 +347,37 @@ tf_tcam_free(struct tf *tfp,
return -EINVAL;
}
+ /*
+ * The Shadow mgmt, if enabled, determines if the entry needs
+ * to be deleted.
+ */
+ if (shadow_init) {
+ shparms.shadow_db = shadow_tcam_db[parms->dir];
+ shparms.fparms = parms;
+ rc = tf_shadow_tcam_remove(&shparms);
+ if (rc) {
+ /*
+ * Should not get here, log it and let the entry be
+ * deleted.
+ */
+ TFP_DRV_LOG(ERR, "%s: Shadow free fail, "
+ "type:%d index:%d deleting the entry.\n",
+ tf_dir_2_str(parms->dir),
+ parms->type,
+ parms->idx);
+ } else {
+ /*
+ * If the entry still has references, just return the
+ * ref count to the caller. No need to remove entry
+ * from rm or hw
+ */
+ if (parms->ref_cnt >= 1)
+ return rc;
+ }
+ }
+
/* Free requested element */
+ memset(&fparms, 0, sizeof(fparms));
fparms.rm_db = tcam_db[parms->dir];
fparms.db_index = parms->type;
fparms.index = parms->idx / num_slice_per_row;
@@ -291,7 +406,8 @@ tf_tcam_free(struct tf *tfp,
rc = tf_rm_free(&fparms);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s: Free failed, type:%d, index:%d\n",
+ "%s: Free failed, type:%d, "
+ "index:%d\n",
tf_dir_2_str(parms->dir),
parms->type,
fparms.index);
@@ -302,6 +418,8 @@ tf_tcam_free(struct tf *tfp,
}
/* Convert TF type to HCAPI RM type */
+ memset(&hparms, 0, sizeof(hparms));
+
hparms.rm_db = tcam_db[parms->dir];
hparms.db_index = parms->type;
hparms.hcapi_type = &parms->hcapi_type;
@@ -326,9 +444,131 @@ tf_tcam_free(struct tf *tfp,
}
int
-tf_tcam_alloc_search(struct tf *tfp __rte_unused,
- struct tf_tcam_alloc_search_parms *parms __rte_unused)
+tf_tcam_alloc_search(struct tf *tfp,
+ struct tf_tcam_alloc_search_parms *parms)
{
+ struct tf_shadow_tcam_search_parms sparms;
+ struct tf_shadow_tcam_bind_index_parms bparms;
+ struct tf_tcam_alloc_parms aparms;
+ struct tf_tcam_free_parms fparms;
+ uint16_t num_slice_per_row = 1;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ int rc;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ if (!init) {
+ TFP_DRV_LOG(ERR,
+ "%s: No TCAM DBs created\n",
+ tf_dir_2_str(parms->dir));
+ return -EINVAL;
+ }
+
+ if (!shadow_init || !shadow_tcam_db[parms->dir]) {
+ TFP_DRV_LOG(ERR, "%s: TCAM Shadow not initialized for %s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tcam_tbl_2_str(parms->type));
+ return -EINVAL;
+ }
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc)
+ return rc;
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc)
+ return rc;
+
+ if (dev->ops->tf_dev_get_tcam_slice_info == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Need to retrieve row size etc */
+ rc = dev->ops->tf_dev_get_tcam_slice_info(tfp,
+ parms->type,
+ parms->key_size,
+ &num_slice_per_row);
+ if (rc)
+ return rc;
+
+ /*
+ * Prep the shadow search, reusing the parms from original search
+ * instead of copying them. Shadow will update output in there.
+ */
+ memset(&sparms, 0, sizeof(sparms));
+ sparms.sparms = parms;
+ sparms.shadow_db = shadow_tcam_db[parms->dir];
+
+ rc = tf_shadow_tcam_search(&sparms);
+ if (rc)
+ return rc;
+
+ /*
+ * The app didn't request us to alloc the entry, so return now.
+ * The hit should have been updated in the original search parm.
+ */
+ if (!parms->alloc || parms->search_status != MISS)
+ return rc;
+
+ /* Caller desires an allocate on miss */
+ if (dev->ops->tf_dev_alloc_tcam == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+ memset(&aparms, 0, sizeof(aparms));
+ aparms.dir = parms->dir;
+ aparms.type = parms->type;
+ aparms.key_size = parms->key_size;
+ aparms.priority = parms->priority;
+ rc = dev->ops->tf_dev_alloc_tcam(tfp, &aparms);
+ if (rc)
+ return rc;
+
+ /* Successful allocation, attempt to add it to the shadow */
+ memset(&bparms, 0, sizeof(bparms));
+ bparms.dir = parms->dir;
+ bparms.shadow_db = shadow_tcam_db[parms->dir];
+ bparms.type = parms->type;
+ bparms.key = parms->key;
+ bparms.mask = parms->mask;
+ bparms.key_size = parms->key_size;
+ bparms.idx = aparms.idx;
+ bparms.hb_handle = sparms.hb_handle;
+ rc = tf_shadow_tcam_bind_index(&bparms);
+ if (rc) {
+ /* Error binding entry, need to free the allocated idx */
+ if (dev->ops->tf_dev_free_tcam == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ fparms.dir = parms->dir;
+ fparms.type = parms->type;
+ fparms.idx = aparms.idx;
+ rc = dev->ops->tf_dev_free_tcam(tfp, &fparms);
+ if (rc)
+ return rc;
+ }
+
+ /* Add the allocated index to output and done */
+ parms->idx = aparms.idx;
+
return 0;
}
@@ -339,8 +579,9 @@ tf_tcam_set(struct tf *tfp __rte_unused,
int rc;
struct tf_session *tfs;
struct tf_dev_info *dev;
- struct tf_rm_is_allocated_parms aparms = { 0 };
- struct tf_rm_get_hcapi_parms hparms = { 0 };
+ struct tf_rm_is_allocated_parms aparms;
+ struct tf_rm_get_hcapi_parms hparms;
+ struct tf_shadow_tcam_insert_parms iparms;
uint16_t num_slice_per_row = 1;
int allocated = 0;
@@ -381,6 +622,8 @@ tf_tcam_set(struct tf *tfp __rte_unused,
return rc;
/* Check if element is in use */
+ memset(&aparms, 0, sizeof(aparms));
+
aparms.rm_db = tcam_db[parms->dir];
aparms.db_index = parms->type;
aparms.index = parms->idx / num_slice_per_row;
@@ -399,6 +642,8 @@ tf_tcam_set(struct tf *tfp __rte_unused,
}
/* Convert TF type to HCAPI RM type */
+ memset(&hparms, 0, sizeof(hparms));
+
hparms.rm_db = tcam_db[parms->dir];
hparms.db_index = parms->type;
hparms.hcapi_type = &parms->hcapi_type;
@@ -419,6 +664,23 @@ tf_tcam_set(struct tf *tfp __rte_unused,
return rc;
}
+ /* Successfully added to hw, now for shadow if enabled. */
+ if (!shadow_init || !shadow_tcam_db[parms->dir])
+ return 0;
+
+ iparms.shadow_db = shadow_tcam_db[parms->dir];
+ iparms.sparms = parms;
+ rc = tf_shadow_tcam_insert(&iparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: %s: Entry %d set failed, rc:%s",
+ tf_dir_2_str(parms->dir),
+ tf_tcam_tbl_2_str(parms->type),
+ parms->idx,
+ strerror(-rc));
+ return rc;
+ }
+
return 0;
}
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.h b/drivers/net/bnxt/tf_core/tf_tcam.h
index ee5bacc..4722ce0 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_tcam.h
@@ -104,19 +104,19 @@ struct tf_tcam_alloc_search_parms {
*/
enum tf_tcam_tbl_type type;
/**
- * [in] Enable search for matching entry
+ * [in] Type of HCAPI
*/
- uint8_t search_enable;
+ uint16_t hcapi_type;
/**
- * [in] Key data to match on (if search)
+ * [in] Key data to match on
*/
uint8_t *key;
/**
- * [in] key size (if search)
+ * [in] key size in bits
*/
uint16_t key_size;
/**
- * [in] Mask data to match on (if search)
+ * [in] Mask data to match on
*/
uint8_t *mask;
/**
@@ -124,16 +124,31 @@ struct tf_tcam_alloc_search_parms {
*/
uint32_t priority;
/**
- * [out] If search, set if matching entry found
+ * [in] Allocate on miss.
+ */
+ uint8_t alloc;
+ /**
+ * [out] Set if matching entry found
*/
uint8_t hit;
/**
+ * [out] Search result status (hit, miss, reject)
+ */
+ enum tf_tcam_search_status search_status;
+ /**
* [out] Current refcnt after allocation
*/
uint16_t ref_cnt;
/**
- * [out] Idx allocated
- *
+ * [inout] The result data from the search is copied here
+ */
+ uint8_t *result;
+ /**
+ * [inout] result size in bits for the result data
+ */
+ uint16_t result_size;
+ /**
+ * [out] Index found
*/
uint16_t idx;
};
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 02/20] net/bnxt: nat global registers support
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 01/20] net/bnxt: add shadow tcam capability with search Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 03/20] net/bnxt: parif for offload miss rules Somnath Kotur
` (18 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Add support to enable or disable the NAT global registers.
The NAT feature is enabled in hardware during initialization
and disabled at deinitialization of the application.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 83 ++++++++++++++++++++++++++++++++++++++
drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 4 ++
2 files changed, 87 insertions(+)
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 0869231..7c65a4b 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -597,6 +597,52 @@ ulp_session_deinit(struct bnxt_ulp_session_state *session)
}
/*
+ * Internal api to enable NAT feature.
+ * Set set_flag to 1 to set the value or zero to reset the value.
+ * returns 0 on success.
+ */
+static int32_t
+bnxt_ulp_global_cfg_update(struct bnxt *bp,
+ enum tf_dir dir,
+ enum tf_global_config_type type,
+ uint32_t offset,
+ uint32_t value,
+ uint32_t set_flag)
+{
+ uint32_t global_cfg = 0;
+ int rc;
+ struct tf_global_cfg_parms parms;
+
+ /* Initialize the params */
+ parms.dir = dir,
+ parms.type = type,
+ parms.offset = offset,
+ parms.config = (uint8_t *)&global_cfg,
+ parms.config_sz_in_bytes = sizeof(global_cfg);
+
+ rc = tf_get_global_cfg(&bp->tfp, &parms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
+ type, rc);
+ return rc;
+ }
+
+ if (set_flag)
+ global_cfg |= value;
+ else
+ global_cfg &= ~value;
+
+ /* SET the register RE_CFA_REG_ACT_TECT */
+ rc = tf_set_global_cfg(&bp->tfp, &parms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
+ type, rc);
+ return rc;
+ }
+ return rc;
+}
+
+/*
* When a port is initialized by dpdk. This functions is called
* and this function initializes the ULP context and rest of the
* infrastructure associated with it.
@@ -732,6 +778,29 @@ bnxt_ulp_init(struct bnxt *bp)
goto jump_to_error;
}
+ /*
+ * Enable NAT feature. Set the global configuration register
+ * Tunnel encap to enable NAT with the reuse of existing inner
+ * L2 header smac and dmac
+ */
+ rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
+ goto jump_to_error;
+ }
+
+ rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
+ goto jump_to_error;
+ }
+
return rc;
jump_to_error:
@@ -785,6 +854,19 @@ bnxt_ulp_deinit(struct bnxt *bp)
/* Delete the Port database */
ulp_port_db_deinit(bp->ulp_ctx);
+ /* Disable NAT feature */
+ (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC),
+ 0);
+
+ (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC),
+ 0);
+
/* Delete the ulp context and tf session */
ulp_ctx_detach(bp, session);
@@ -942,6 +1024,7 @@ bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
struct bnxt_vf_representor *vfr = dev->data->dev_private;
+
bp = vfr->parent_dev->data->dev_private;
}
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index f9e5e2b..7c95ead 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -14,6 +14,10 @@
#include "ulp_template_db_enum.h"
+/* NAT defines to reuse existing inner L2 SMAC and DMAC */
+#define BNXT_ULP_NAT_INNER_L2_HEADER_SMAC 0x2000
+#define BNXT_ULP_NAT_INNER_L2_HEADER_DMAC 0x100
+
/* defines for the ulp_flags */
#define BNXT_ULP_VF_REP_ENABLED 0x1
#define ULP_VF_REP_IS_ENABLED(flag) ((flag) & BNXT_ULP_VF_REP_ENABLED)
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 03/20] net/bnxt: parif for offload miss rules
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 01/20] net/bnxt: add shadow tcam capability with search Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 02/20] net/bnxt: nat global registers support Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 04/20] net/bnxt: ulp mapper changes to use tcam search Somnath Kotur
` (17 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
For the offload miss rules, the parif miss path needs to be
considered. The higher parif are reserved for handling this.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_def_rules.c | 4 ++--
drivers/net/bnxt/tf_ulp/ulp_port_db.h | 1 +
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 40 ++++++++++++++++++++++++++++++++
3 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
index 4d4f7c4..d86e4c9 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
@@ -12,8 +12,6 @@
#include "ulp_flow_db.h"
#include "ulp_mapper.h"
-#define BNXT_ULP_FREE_PARIF_BASE 11
-
struct bnxt_ulp_def_param_handler {
int32_t (*vfr_func)(struct bnxt_ulp_context *ulp_ctx,
struct ulp_tlv_param *param,
@@ -85,6 +83,8 @@ ulp_set_parif_in_comp_fld(struct bnxt_ulp_context *ulp_ctx,
if (parif_type == BNXT_ULP_PHY_PORT_PARIF) {
idx = BNXT_ULP_CF_IDX_PHY_PORT_PARIF;
+ /* Parif needs to be reset to a free partition */
+ parif += BNXT_ULP_FREE_PARIF_BASE;
} else if (parif_type == BNXT_ULP_DRV_FUNC_PARIF) {
idx = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF;
/* Parif needs to be reset to a free partition */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.h b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
index 393d01b..2b323d1 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
@@ -10,6 +10,7 @@
#define BNXT_PORT_DB_MAX_INTF_LIST 256
#define BNXT_PORT_DB_MAX_FUNC 2048
+#define BNXT_ULP_FREE_PARIF_BASE 11
enum bnxt_ulp_svif_type {
BNXT_ULP_DRV_FUNC_SVIF = 0,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 3891bcd..39f801b 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -159,6 +159,43 @@ bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
}
/*
+ * Function to handle the post processing of the computed
+ * fields for the interface.
+ */
+static void
+bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
+{
+ uint32_t ifindex;
+ uint16_t port_id, parif;
+ enum bnxt_ulp_direction_type dir;
+
+ /* get the direction details */
+ dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
+
+ if (dir == BNXT_ULP_DIR_INGRESS) {
+ /* read the port id details */
+ port_id = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_INCOMING_IF);
+ if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
+ port_id,
+ &ifindex)) {
+ BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
+ return;
+ }
+ /* Set port PARIF */
+ if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_PHY_PORT_PARIF, &parif)) {
+ BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ /* Parif needs to be reset to a free partition */
+ parif += BNXT_ULP_FREE_PARIF_BASE;
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
+ parif);
+ }
+}
+
+/*
* Function to handle the post processing of the parsing details
*/
int32_t
@@ -213,6 +250,9 @@ bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
/* Merge the hdr_fp_bit into the proto header bit */
params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
+ /* Update the computed interface parameters */
+ bnxt_ulp_comp_fld_intf_update(params);
+
/* TBD: Handle the flow rejection scenarios */
return 0;
}
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 04/20] net/bnxt: ulp mapper changes to use tcam search
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (2 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 03/20] net/bnxt: parif for offload miss rules Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 05/20] net/bnxt: add tf hash API Somnath Kotur
` (16 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
modified ulp mappper to use the new tf_search_tcam_entry API.
When search before allocation is requested, mapper calls
tc_search_tcam_entry with the alloc flag.
- On HIT, the result and tcam index is returned.
- On MISS, the tcam index is returned but the result is
created and the tcam entry is set.
- On REJECT, the flow request is rejected.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 106 +++++++++++++++++++++++------------
1 file changed, 71 insertions(+), 35 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 6a727ed..2d3373d 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -690,7 +690,7 @@ ulp_mapper_ident_extract(struct bnxt_ulp_mapper_parms *parms,
{
struct ulp_flow_db_res_params fid_parms;
uint64_t id = 0;
- uint32_t idx;
+ uint32_t idx = 0;
struct tf_search_identifier_parms sparms = { 0 };
struct tf_free_identifier_parms free_parms = { 0 };
struct tf *tfp;
@@ -1292,12 +1292,13 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct tf *tfp;
int32_t rc, trc;
struct tf_alloc_tcam_entry_parms aparms = { 0 };
+ struct tf_search_tcam_entry_parms searchparms = { 0 };
struct tf_set_tcam_entry_parms sparms = { 0 };
struct ulp_flow_db_res_params fid_parms = { 0 };
struct tf_free_tcam_entry_parms free_parms = { 0 };
uint32_t hit = 0;
uint16_t tmplen = 0;
- struct ulp_blob res_blob;
+ uint16_t idx;
/* Skip this if was handled by the cache. */
if (parms->tcam_tbl_opc == BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_SKIP) {
@@ -1352,37 +1353,72 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
}
- aparms.dir = tbl->direction;
- aparms.tcam_tbl_type = tbl->resource_type;
- aparms.search_enable = tbl->srch_b4_alloc;
- aparms.key_sz_in_bits = tbl->key_bit_size;
- aparms.key = ulp_blob_data_get(&key, &tmplen);
- if (tbl->key_bit_size != tmplen) {
- BNXT_TF_DBG(ERR, "Key len (%d) != Expected (%d)\n",
- tmplen, tbl->key_bit_size);
- return -EINVAL;
- }
+ if (!tbl->srch_b4_alloc) {
+ /*
+ * No search for re-use is requested, so simply allocate the
+ * tcam index.
+ */
+ aparms.dir = tbl->direction;
+ aparms.tcam_tbl_type = tbl->resource_type;
+ aparms.search_enable = tbl->srch_b4_alloc;
+ aparms.key_sz_in_bits = tbl->key_bit_size;
+ aparms.key = ulp_blob_data_get(&key, &tmplen);
+ if (tbl->key_bit_size != tmplen) {
+ BNXT_TF_DBG(ERR, "Key len (%d) != Expected (%d)\n",
+ tmplen, tbl->key_bit_size);
+ return -EINVAL;
+ }
- aparms.mask = ulp_blob_data_get(&mask, &tmplen);
- if (tbl->key_bit_size != tmplen) {
- BNXT_TF_DBG(ERR, "Mask len (%d) != Expected (%d)\n",
- tmplen, tbl->key_bit_size);
- return -EINVAL;
- }
+ aparms.mask = ulp_blob_data_get(&mask, &tmplen);
+ if (tbl->key_bit_size != tmplen) {
+ BNXT_TF_DBG(ERR, "Mask len (%d) != Expected (%d)\n",
+ tmplen, tbl->key_bit_size);
+ return -EINVAL;
+ }
- aparms.priority = tbl->priority;
+ aparms.priority = tbl->priority;
- /*
- * All failures after this succeeds require the entry to be freed.
- * cannot return directly on failure, but needs to goto error
- */
- rc = tf_alloc_tcam_entry(tfp, &aparms);
- if (rc) {
- BNXT_TF_DBG(ERR, "tcam alloc failed rc=%d.\n", rc);
- return rc;
- }
+ /*
+ * All failures after this succeeds require the entry to be
+ * freed. cannot return directly on failure, but needs to goto
+ * error.
+ */
+ rc = tf_alloc_tcam_entry(tfp, &aparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "tcam alloc failed rc=%d.\n", rc);
+ return rc;
+ }
+ idx = aparms.idx;
+ hit = aparms.hit;
+ } else {
+ /*
+ * Searching before allocation to see if we already have an
+ * entry. This allows re-use of a constrained resource.
+ */
+ searchparms.dir = tbl->direction;
+ searchparms.tcam_tbl_type = tbl->resource_type;
+ searchparms.key = ulp_blob_data_get(&key, &tmplen);
+ searchparms.key_sz_in_bits = tbl->key_bit_size;
+ searchparms.mask = ulp_blob_data_get(&mask, &tmplen);
+ searchparms.priority = tbl->priority;
+ searchparms.alloc = 1;
+ searchparms.result = ulp_blob_data_get(&data, &tmplen);
+ searchparms.result_sz_in_bits = tbl->result_bit_size;
+
+ rc = tf_search_tcam_entry(tfp, &searchparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "tcam search failed rc=%d\n", rc);
+ return rc;
+ }
- hit = aparms.hit;
+ /* Successful search, check the result */
+ if (searchparms.search_status == REJECT) {
+ BNXT_TF_DBG(ERR, "tcam alloc rejected\n");
+ return -ENOMEM;
+ }
+ idx = searchparms.idx;
+ hit = searchparms.hit;
+ }
/* Build the result */
if (!tbl->srch_b4_alloc || !hit) {
@@ -1430,9 +1466,9 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
}
- sparms.dir = aparms.dir;
- sparms.tcam_tbl_type = aparms.tcam_tbl_type;
- sparms.idx = aparms.idx;
+ sparms.dir = tbl->direction;
+ sparms.tcam_tbl_type = tbl->resource_type;
+ sparms.idx = idx;
/* Already verified the key/mask lengths */
sparms.key = ulp_blob_data_get(&key, &tmplen);
sparms.mask = ulp_blob_data_get(&mask, &tmplen);
@@ -1464,7 +1500,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
rc = -EINVAL;
goto error;
}
- parms->cache_ptr->tcam_idx = aparms.idx;
+ parms->cache_ptr->tcam_idx = idx;
}
/* Mark action */
@@ -1483,7 +1519,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
for (i = 0; i < num_idents; i++) {
rc = ulp_mapper_ident_extract(parms, tbl,
- &idents[i], &res_blob);
+ &idents[i], &data);
if (rc) {
BNXT_TF_DBG(ERR,
"Error in ident extraction\n");
@@ -1501,7 +1537,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
fid_parms.resource_func = tbl->resource_func;
fid_parms.resource_type = tbl->resource_type;
fid_parms.critical_resource = tbl->critical_resource;
- fid_parms.resource_hndl = aparms.idx;
+ fid_parms.resource_hndl = idx;
rc = ulp_flow_db_resource_add(parms->ulp_ctx,
parms->tbl_idx,
parms->fid,
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 05/20] net/bnxt: add tf hash API
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (3 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 04/20] net/bnxt: ulp mapper changes to use tcam search Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 06/20] net/bnxt: skip mark id injection into mbuf Somnath Kotur
` (15 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
Added tf_hash API for common hash uses across tf_core functions
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Farah Smith <farah.smith@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/meson.build | 1 +
drivers/net/bnxt/tf_core/Makefile | 1 +
drivers/net/bnxt/tf_core/tf_hash.c | 106 +++++++++++++++++++++++++++++++++++++
drivers/net/bnxt/tf_core/tf_hash.h | 27 ++++++++++
4 files changed, 135 insertions(+)
create mode 100644 drivers/net/bnxt/tf_core/tf_hash.c
create mode 100644 drivers/net/bnxt/tf_core/tf_hash.h
diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build
index 8529b33..190469e 100644
--- a/drivers/net/bnxt/meson.build
+++ b/drivers/net/bnxt/meson.build
@@ -47,6 +47,7 @@ sources = files('bnxt_cpr.c',
'tf_core/tf_global_cfg.c',
'tf_core/tf_em_host.c',
'tf_core/tf_shadow_identifier.c',
+ 'tf_core/tf_hash.c',
'hcapi/hcapi_cfa_p4.c',
diff --git a/drivers/net/bnxt/tf_core/Makefile b/drivers/net/bnxt/tf_core/Makefile
index cca0e2e..cf6aaec 100644
--- a/drivers/net/bnxt/tf_core/Makefile
+++ b/drivers/net/bnxt/tf_core/Makefile
@@ -32,3 +32,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_util.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_if_tbl.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_global_cfg.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_shadow_identifier.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_hash.c
diff --git a/drivers/net/bnxt/tf_core/tf_hash.c b/drivers/net/bnxt/tf_core/tf_hash.c
new file mode 100644
index 0000000..68476cb
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_hash.c
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#include "tf_hash.h"
+
+/* CRC polynomial 0xedb88320 */
+static const uint32_t tf_hash_crc32tbl[] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+/**
+ * Calculate a crc32 on the buffer with an initial value and len
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32i(uint32_t init, uint8_t *buf, uint32_t len)
+{
+ uint32_t crc = init;
+
+ while (len--)
+ crc = tf_hash_crc32tbl[(crc ^ buf[len]) & 0xff] ^
+ (crc >> 8);
+
+ return crc;
+}
+
+/**
+ * Calculate a crc32 on the buffer with a default initial value
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32(uint8_t *buf, uint32_t len)
+{
+ uint32_t crc = ~0U;
+
+ crc = tf_hash_calc_crc32i(crc, buf, len);
+
+ return ~crc;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_hash.h b/drivers/net/bnxt/tf_core/tf_hash.h
new file mode 100644
index 0000000..6b60aff
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_hash.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TF_HASH_H_
+#define _TF_HASH_H_
+
+#include "tf_core.h"
+
+/**
+ * Calculate a crc32 on the buffer with an initial value and len
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32i(uint32_t init, uint8_t *buf, uint32_t len);
+
+/**
+ * Calculate a crc32 on the buffer with a default initial value
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32(uint8_t *buf, uint32_t len);
+
+#endif
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 06/20] net/bnxt: skip mark id injection into mbuf
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (4 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 05/20] net/bnxt: add tf hash API Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 07/20] net/bnxt: nat template changes Somnath Kotur
` (14 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
When a packet is looped back from VF to VFR, it is marked to identify
the VFR interface. However, this mark_id shouldn't be percolated up to
the OVS as it is internal to pmd.
This patch fixes it by skipping mark injection into mbuf if the packet
is received on VFR interface.
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
drivers/net/bnxt/bnxt_rxr.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index baf73cb..43b1256 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -485,6 +485,9 @@ bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
cfa_code, vfr_flag, &mark_id);
if (!rc) {
+ /* VF to VFR Rx path. So, skip mark_id injection in mbuf */
+ if (vfr_flag && *vfr_flag)
+ return mark_id;
/* Got the mark, write it to the mbuf and return */
mbuf->hash.fdir.hi = mark_id;
mbuf->udata64 = (cfa_code & 0xffffffffull) << 32;
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 07/20] net/bnxt: nat template changes
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (5 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 06/20] net/bnxt: skip mark id injection into mbuf Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 08/20] net/bnxt: configure parif for the egress rules Somnath Kotur
` (13 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
The template is updated to support additional combinations
of NAT actions.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_template_db_act.c | 412 +++++++++++++++---------
drivers/net/bnxt/tf_ulp/ulp_template_db_class.c | 16 +-
drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h | 26 +-
drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c | 4 +-
4 files changed, 285 insertions(+), 173 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
index 0f19e8e..31fe905 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
@@ -12,80 +12,88 @@ uint16_t ulp_act_sig_tbl[BNXT_ULP_ACT_SIG_TBL_MAX_SZ] = {
[BNXT_ULP_ACT_HID_015a] = 1,
[BNXT_ULP_ACT_HID_00eb] = 2,
[BNXT_ULP_ACT_HID_0043] = 3,
- [BNXT_ULP_ACT_HID_01d6] = 4,
- [BNXT_ULP_ACT_HID_015e] = 5,
- [BNXT_ULP_ACT_HID_00ef] = 6,
- [BNXT_ULP_ACT_HID_0047] = 7,
- [BNXT_ULP_ACT_HID_01da] = 8,
- [BNXT_ULP_ACT_HID_025b] = 9,
- [BNXT_ULP_ACT_HID_01ec] = 10,
- [BNXT_ULP_ACT_HID_0144] = 11,
- [BNXT_ULP_ACT_HID_02d7] = 12,
- [BNXT_ULP_ACT_HID_025f] = 13,
- [BNXT_ULP_ACT_HID_01f0] = 14,
- [BNXT_ULP_ACT_HID_0148] = 15,
- [BNXT_ULP_ACT_HID_02db] = 16,
- [BNXT_ULP_ACT_HID_0000] = 17,
- [BNXT_ULP_ACT_HID_0002] = 18,
- [BNXT_ULP_ACT_HID_0800] = 19,
- [BNXT_ULP_ACT_HID_0101] = 20,
- [BNXT_ULP_ACT_HID_0020] = 21,
- [BNXT_ULP_ACT_HID_0901] = 22,
- [BNXT_ULP_ACT_HID_0121] = 23,
- [BNXT_ULP_ACT_HID_0004] = 24,
- [BNXT_ULP_ACT_HID_0804] = 25,
- [BNXT_ULP_ACT_HID_0105] = 26,
- [BNXT_ULP_ACT_HID_0024] = 27,
- [BNXT_ULP_ACT_HID_0905] = 28,
- [BNXT_ULP_ACT_HID_0125] = 29,
- [BNXT_ULP_ACT_HID_0001] = 30,
- [BNXT_ULP_ACT_HID_0005] = 31,
- [BNXT_ULP_ACT_HID_0009] = 32,
- [BNXT_ULP_ACT_HID_000d] = 33,
- [BNXT_ULP_ACT_HID_0021] = 34,
- [BNXT_ULP_ACT_HID_0029] = 35,
- [BNXT_ULP_ACT_HID_0025] = 36,
- [BNXT_ULP_ACT_HID_002d] = 37,
- [BNXT_ULP_ACT_HID_0801] = 38,
- [BNXT_ULP_ACT_HID_0809] = 39,
- [BNXT_ULP_ACT_HID_0805] = 40,
- [BNXT_ULP_ACT_HID_080d] = 41,
- [BNXT_ULP_ACT_HID_0c15] = 42,
- [BNXT_ULP_ACT_HID_0c19] = 43,
- [BNXT_ULP_ACT_HID_02f6] = 44,
- [BNXT_ULP_ACT_HID_04f8] = 45,
- [BNXT_ULP_ACT_HID_01df] = 46,
- [BNXT_ULP_ACT_HID_05e3] = 47,
- [BNXT_ULP_ACT_HID_02fa] = 48,
- [BNXT_ULP_ACT_HID_04fc] = 49,
- [BNXT_ULP_ACT_HID_01e3] = 50,
- [BNXT_ULP_ACT_HID_05e7] = 51,
- [BNXT_ULP_ACT_HID_03f7] = 52,
- [BNXT_ULP_ACT_HID_05f9] = 53,
- [BNXT_ULP_ACT_HID_02e0] = 54,
- [BNXT_ULP_ACT_HID_06e4] = 55,
- [BNXT_ULP_ACT_HID_03fb] = 56,
- [BNXT_ULP_ACT_HID_05fd] = 57,
- [BNXT_ULP_ACT_HID_02e4] = 58,
- [BNXT_ULP_ACT_HID_06e8] = 59,
- [BNXT_ULP_ACT_HID_040d] = 60,
- [BNXT_ULP_ACT_HID_040f] = 61,
- [BNXT_ULP_ACT_HID_0413] = 62,
- [BNXT_ULP_ACT_HID_0c0d] = 63,
- [BNXT_ULP_ACT_HID_0567] = 64,
- [BNXT_ULP_ACT_HID_0a49] = 65,
- [BNXT_ULP_ACT_HID_050e] = 66,
- [BNXT_ULP_ACT_HID_0d0e] = 67,
- [BNXT_ULP_ACT_HID_0668] = 68,
- [BNXT_ULP_ACT_HID_0b4a] = 69,
- [BNXT_ULP_ACT_HID_0411] = 70,
- [BNXT_ULP_ACT_HID_056b] = 71,
- [BNXT_ULP_ACT_HID_0a4d] = 72,
- [BNXT_ULP_ACT_HID_0c11] = 73,
- [BNXT_ULP_ACT_HID_0512] = 74,
- [BNXT_ULP_ACT_HID_0d12] = 75,
- [BNXT_ULP_ACT_HID_066c] = 76,
- [BNXT_ULP_ACT_HID_0b4e] = 77
+ [BNXT_ULP_ACT_HID_03d8] = 4,
+ [BNXT_ULP_ACT_HID_02c1] = 5,
+ [BNXT_ULP_ACT_HID_015e] = 6,
+ [BNXT_ULP_ACT_HID_00ef] = 7,
+ [BNXT_ULP_ACT_HID_0047] = 8,
+ [BNXT_ULP_ACT_HID_03dc] = 9,
+ [BNXT_ULP_ACT_HID_02c5] = 10,
+ [BNXT_ULP_ACT_HID_025b] = 11,
+ [BNXT_ULP_ACT_HID_01ec] = 12,
+ [BNXT_ULP_ACT_HID_0144] = 13,
+ [BNXT_ULP_ACT_HID_04d9] = 14,
+ [BNXT_ULP_ACT_HID_03c2] = 15,
+ [BNXT_ULP_ACT_HID_025f] = 16,
+ [BNXT_ULP_ACT_HID_01f0] = 17,
+ [BNXT_ULP_ACT_HID_0148] = 18,
+ [BNXT_ULP_ACT_HID_04dd] = 19,
+ [BNXT_ULP_ACT_HID_03c6] = 20,
+ [BNXT_ULP_ACT_HID_0000] = 21,
+ [BNXT_ULP_ACT_HID_0002] = 22,
+ [BNXT_ULP_ACT_HID_0800] = 23,
+ [BNXT_ULP_ACT_HID_0101] = 24,
+ [BNXT_ULP_ACT_HID_0020] = 25,
+ [BNXT_ULP_ACT_HID_0901] = 26,
+ [BNXT_ULP_ACT_HID_0121] = 27,
+ [BNXT_ULP_ACT_HID_0004] = 28,
+ [BNXT_ULP_ACT_HID_0804] = 29,
+ [BNXT_ULP_ACT_HID_0105] = 30,
+ [BNXT_ULP_ACT_HID_0024] = 31,
+ [BNXT_ULP_ACT_HID_0905] = 32,
+ [BNXT_ULP_ACT_HID_0125] = 33,
+ [BNXT_ULP_ACT_HID_0001] = 34,
+ [BNXT_ULP_ACT_HID_0005] = 35,
+ [BNXT_ULP_ACT_HID_0009] = 36,
+ [BNXT_ULP_ACT_HID_000d] = 37,
+ [BNXT_ULP_ACT_HID_0021] = 38,
+ [BNXT_ULP_ACT_HID_0029] = 39,
+ [BNXT_ULP_ACT_HID_0025] = 40,
+ [BNXT_ULP_ACT_HID_002d] = 41,
+ [BNXT_ULP_ACT_HID_0801] = 42,
+ [BNXT_ULP_ACT_HID_0809] = 43,
+ [BNXT_ULP_ACT_HID_0805] = 44,
+ [BNXT_ULP_ACT_HID_080d] = 45,
+ [BNXT_ULP_ACT_HID_0c15] = 46,
+ [BNXT_ULP_ACT_HID_0c19] = 47,
+ [BNXT_ULP_ACT_HID_02f6] = 48,
+ [BNXT_ULP_ACT_HID_04f8] = 49,
+ [BNXT_ULP_ACT_HID_01df] = 50,
+ [BNXT_ULP_ACT_HID_07e5] = 51,
+ [BNXT_ULP_ACT_HID_06ce] = 52,
+ [BNXT_ULP_ACT_HID_02fa] = 53,
+ [BNXT_ULP_ACT_HID_04fc] = 54,
+ [BNXT_ULP_ACT_HID_01e3] = 55,
+ [BNXT_ULP_ACT_HID_07e9] = 56,
+ [BNXT_ULP_ACT_HID_06d2] = 57,
+ [BNXT_ULP_ACT_HID_03f7] = 58,
+ [BNXT_ULP_ACT_HID_05f9] = 59,
+ [BNXT_ULP_ACT_HID_02e0] = 60,
+ [BNXT_ULP_ACT_HID_08e6] = 61,
+ [BNXT_ULP_ACT_HID_07cf] = 62,
+ [BNXT_ULP_ACT_HID_03fb] = 63,
+ [BNXT_ULP_ACT_HID_05fd] = 64,
+ [BNXT_ULP_ACT_HID_02e4] = 65,
+ [BNXT_ULP_ACT_HID_08ea] = 66,
+ [BNXT_ULP_ACT_HID_07d3] = 67,
+ [BNXT_ULP_ACT_HID_040d] = 68,
+ [BNXT_ULP_ACT_HID_040f] = 69,
+ [BNXT_ULP_ACT_HID_0413] = 70,
+ [BNXT_ULP_ACT_HID_0c0d] = 71,
+ [BNXT_ULP_ACT_HID_0567] = 72,
+ [BNXT_ULP_ACT_HID_0a49] = 73,
+ [BNXT_ULP_ACT_HID_050e] = 74,
+ [BNXT_ULP_ACT_HID_0d0e] = 75,
+ [BNXT_ULP_ACT_HID_0668] = 76,
+ [BNXT_ULP_ACT_HID_0b4a] = 77,
+ [BNXT_ULP_ACT_HID_0411] = 78,
+ [BNXT_ULP_ACT_HID_056b] = 79,
+ [BNXT_ULP_ACT_HID_0a4d] = 80,
+ [BNXT_ULP_ACT_HID_0c11] = 81,
+ [BNXT_ULP_ACT_HID_0512] = 82,
+ [BNXT_ULP_ACT_HID_0d12] = 83,
+ [BNXT_ULP_ACT_HID_066c] = 84,
+ [BNXT_ULP_ACT_HID_0b4e] = 85
};
struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
@@ -112,14 +120,25 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
.act_tid = 1
},
[4] = {
- .act_hid = BNXT_ULP_ACT_HID_01d6,
+ .act_hid = BNXT_ULP_ACT_HID_03d8,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
[5] = {
+ .act_hid = BNXT_ULP_ACT_HID_02c1,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [6] = {
.act_hid = BNXT_ULP_ACT_HID_015e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -127,7 +146,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [6] = {
+ [7] = {
.act_hid = BNXT_ULP_ACT_HID_00ef,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -136,7 +155,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [7] = {
+ [8] = {
.act_hid = BNXT_ULP_ACT_HID_0047,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -144,16 +163,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [8] = {
- .act_hid = BNXT_ULP_ACT_HID_01da,
+ [9] = {
+ .act_hid = BNXT_ULP_ACT_HID_03dc,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [9] = {
+ [10] = {
+ .act_hid = BNXT_ULP_ACT_HID_02c5,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [11] = {
.act_hid = BNXT_ULP_ACT_HID_025b,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -161,7 +192,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [10] = {
+ [12] = {
.act_hid = BNXT_ULP_ACT_HID_01ec,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -170,7 +201,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [11] = {
+ [13] = {
.act_hid = BNXT_ULP_ACT_HID_0144,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -178,16 +209,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [12] = {
- .act_hid = BNXT_ULP_ACT_HID_02d7,
+ [14] = {
+ .act_hid = BNXT_ULP_ACT_HID_04d9,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [13] = {
+ [15] = {
+ .act_hid = BNXT_ULP_ACT_HID_03c2,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [16] = {
.act_hid = BNXT_ULP_ACT_HID_025f,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -196,7 +239,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [14] = {
+ [17] = {
.act_hid = BNXT_ULP_ACT_HID_01f0,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -206,7 +249,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [15] = {
+ [18] = {
.act_hid = BNXT_ULP_ACT_HID_0148,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -215,51 +258,64 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [16] = {
- .act_hid = BNXT_ULP_ACT_HID_02db,
+ [19] = {
+ .act_hid = BNXT_ULP_ACT_HID_04dd,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [17] = {
+ [20] = {
+ .act_hid = BNXT_ULP_ACT_HID_03c6,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [21] = {
.act_hid = BNXT_ULP_ACT_HID_0000,
.act_sig = { .bits =
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [18] = {
+ [22] = {
.act_hid = BNXT_ULP_ACT_HID_0002,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DROP |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [19] = {
+ [23] = {
.act_hid = BNXT_ULP_ACT_HID_0800,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_POP_VLAN |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [20] = {
+ [24] = {
.act_hid = BNXT_ULP_ACT_HID_0101,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [21] = {
+ [25] = {
.act_hid = BNXT_ULP_ACT_HID_0020,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_DECAP |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [22] = {
+ [26] = {
.act_hid = BNXT_ULP_ACT_HID_0901,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -267,7 +323,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [23] = {
+ [27] = {
.act_hid = BNXT_ULP_ACT_HID_0121,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_DECAP |
@@ -275,14 +331,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [24] = {
+ [28] = {
.act_hid = BNXT_ULP_ACT_HID_0004,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [25] = {
+ [29] = {
.act_hid = BNXT_ULP_ACT_HID_0804,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -290,7 +346,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [26] = {
+ [30] = {
.act_hid = BNXT_ULP_ACT_HID_0105,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -298,7 +354,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [27] = {
+ [31] = {
.act_hid = BNXT_ULP_ACT_HID_0024,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -306,7 +362,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [28] = {
+ [32] = {
.act_hid = BNXT_ULP_ACT_HID_0905,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -315,7 +371,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [29] = {
+ [33] = {
.act_hid = BNXT_ULP_ACT_HID_0125,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -324,14 +380,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [30] = {
+ [34] = {
.act_hid = BNXT_ULP_ACT_HID_0001,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [31] = {
+ [35] = {
.act_hid = BNXT_ULP_ACT_HID_0005,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -339,7 +395,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [32] = {
+ [36] = {
.act_hid = BNXT_ULP_ACT_HID_0009,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -347,7 +403,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [33] = {
+ [37] = {
.act_hid = BNXT_ULP_ACT_HID_000d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -356,7 +412,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [34] = {
+ [38] = {
.act_hid = BNXT_ULP_ACT_HID_0021,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -364,7 +420,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [35] = {
+ [39] = {
.act_hid = BNXT_ULP_ACT_HID_0029,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -373,7 +429,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [36] = {
+ [40] = {
.act_hid = BNXT_ULP_ACT_HID_0025,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -382,7 +438,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [37] = {
+ [41] = {
.act_hid = BNXT_ULP_ACT_HID_002d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -392,7 +448,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [38] = {
+ [42] = {
.act_hid = BNXT_ULP_ACT_HID_0801,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -400,7 +456,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [39] = {
+ [43] = {
.act_hid = BNXT_ULP_ACT_HID_0809,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -409,7 +465,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [40] = {
+ [44] = {
.act_hid = BNXT_ULP_ACT_HID_0805,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -418,7 +474,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [41] = {
+ [45] = {
.act_hid = BNXT_ULP_ACT_HID_080d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -428,14 +484,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [42] = {
+ [46] = {
.act_hid = BNXT_ULP_ACT_HID_0c15,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_ENCAP |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 4
},
- [43] = {
+ [47] = {
.act_hid = BNXT_ULP_ACT_HID_0c19,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_ENCAP |
@@ -443,14 +499,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 4
},
- [44] = {
+ [48] = {
.act_hid = BNXT_ULP_ACT_HID_02f6,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [45] = {
+ [49] = {
.act_hid = BNXT_ULP_ACT_HID_04f8,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
@@ -458,22 +514,33 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [46] = {
+ [50] = {
.act_hid = BNXT_ULP_ACT_HID_01df,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [47] = {
- .act_hid = BNXT_ULP_ACT_HID_05e3,
+ [51] = {
+ .act_hid = BNXT_ULP_ACT_HID_07e5,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [48] = {
+ [52] = {
+ .act_hid = BNXT_ULP_ACT_HID_06ce,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [53] = {
.act_hid = BNXT_ULP_ACT_HID_02fa,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -481,7 +548,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [49] = {
+ [54] = {
.act_hid = BNXT_ULP_ACT_HID_04fc,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -490,7 +557,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [50] = {
+ [55] = {
.act_hid = BNXT_ULP_ACT_HID_01e3,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -498,16 +565,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [51] = {
- .act_hid = BNXT_ULP_ACT_HID_05e7,
+ [56] = {
+ .act_hid = BNXT_ULP_ACT_HID_07e9,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [52] = {
+ [57] = {
+ .act_hid = BNXT_ULP_ACT_HID_06d2,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [58] = {
.act_hid = BNXT_ULP_ACT_HID_03f7,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -515,7 +594,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [53] = {
+ [59] = {
.act_hid = BNXT_ULP_ACT_HID_05f9,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -524,7 +603,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [54] = {
+ [60] = {
.act_hid = BNXT_ULP_ACT_HID_02e0,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -532,16 +611,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [55] = {
- .act_hid = BNXT_ULP_ACT_HID_06e4,
+ [61] = {
+ .act_hid = BNXT_ULP_ACT_HID_08e6,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [56] = {
+ [62] = {
+ .act_hid = BNXT_ULP_ACT_HID_07cf,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [63] = {
.act_hid = BNXT_ULP_ACT_HID_03fb,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -550,7 +641,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [57] = {
+ [64] = {
.act_hid = BNXT_ULP_ACT_HID_05fd,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -560,7 +651,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [58] = {
+ [65] = {
.act_hid = BNXT_ULP_ACT_HID_02e4,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -569,30 +660,43 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [59] = {
- .act_hid = BNXT_ULP_ACT_HID_06e8,
+ [66] = {
+ .act_hid = BNXT_ULP_ACT_HID_08ea,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [60] = {
+ [67] = {
+ .act_hid = BNXT_ULP_ACT_HID_07d3,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [68] = {
.act_hid = BNXT_ULP_ACT_HID_040d,
.act_sig = { .bits =
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [61] = {
+ [69] = {
.act_hid = BNXT_ULP_ACT_HID_040f,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DROP |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [62] = {
+ [70] = {
.act_hid = BNXT_ULP_ACT_HID_0413,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DROP |
@@ -600,14 +704,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [63] = {
+ [71] = {
.act_hid = BNXT_ULP_ACT_HID_0c0d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_POP_VLAN |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [64] = {
+ [72] = {
.act_hid = BNXT_ULP_ACT_HID_0567,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_VLAN_PCP |
@@ -616,7 +720,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [65] = {
+ [73] = {
.act_hid = BNXT_ULP_ACT_HID_0a49,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_VLAN_VID |
@@ -624,14 +728,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [66] = {
+ [74] = {
.act_hid = BNXT_ULP_ACT_HID_050e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [67] = {
+ [75] = {
.act_hid = BNXT_ULP_ACT_HID_0d0e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -639,7 +743,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [68] = {
+ [76] = {
.act_hid = BNXT_ULP_ACT_HID_0668,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -649,7 +753,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [69] = {
+ [77] = {
.act_hid = BNXT_ULP_ACT_HID_0b4a,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -658,14 +762,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [70] = {
+ [78] = {
.act_hid = BNXT_ULP_ACT_HID_0411,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [71] = {
+ [79] = {
.act_hid = BNXT_ULP_ACT_HID_056b,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -675,7 +779,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [72] = {
+ [80] = {
.act_hid = BNXT_ULP_ACT_HID_0a4d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -684,7 +788,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [73] = {
+ [81] = {
.act_hid = BNXT_ULP_ACT_HID_0c11,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -692,7 +796,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [74] = {
+ [82] = {
.act_hid = BNXT_ULP_ACT_HID_0512,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -700,7 +804,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [75] = {
+ [83] = {
.act_hid = BNXT_ULP_ACT_HID_0d12,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -709,7 +813,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [76] = {
+ [84] = {
.act_hid = BNXT_ULP_ACT_HID_066c,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -720,7 +824,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [77] = {
+ [85] = {
.act_hid = BNXT_ULP_ACT_HID_0b4e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
index 200a5a6..9de45cd 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
@@ -17241,7 +17241,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17311,7 +17311,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17325,7 +17325,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17339,7 +17339,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17353,7 +17353,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17367,7 +17367,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17381,7 +17381,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17451,7 +17451,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index b5deaf6..c9fe1bc 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -18,7 +18,7 @@
#define BNXT_ULP_CLASS_HID_SHFTL 31
#define BNXT_ULP_CLASS_HID_MASK 2047
#define BNXT_ULP_ACT_SIG_TBL_MAX_SZ 4096
-#define BNXT_ULP_ACT_MATCH_LIST_MAX_SZ 78
+#define BNXT_ULP_ACT_MATCH_LIST_MAX_SZ 86
#define BNXT_ULP_ACT_HID_LOW_PRIME 7919
#define BNXT_ULP_ACT_HID_HIGH_PRIME 4721
#define BNXT_ULP_ACT_HID_SHFTR 23
@@ -786,19 +786,23 @@ enum bnxt_ulp_act_hid {
BNXT_ULP_ACT_HID_015a = 0x015a,
BNXT_ULP_ACT_HID_00eb = 0x00eb,
BNXT_ULP_ACT_HID_0043 = 0x0043,
- BNXT_ULP_ACT_HID_01d6 = 0x01d6,
+ BNXT_ULP_ACT_HID_03d8 = 0x03d8,
+ BNXT_ULP_ACT_HID_02c1 = 0x02c1,
BNXT_ULP_ACT_HID_015e = 0x015e,
BNXT_ULP_ACT_HID_00ef = 0x00ef,
BNXT_ULP_ACT_HID_0047 = 0x0047,
- BNXT_ULP_ACT_HID_01da = 0x01da,
+ BNXT_ULP_ACT_HID_03dc = 0x03dc,
+ BNXT_ULP_ACT_HID_02c5 = 0x02c5,
BNXT_ULP_ACT_HID_025b = 0x025b,
BNXT_ULP_ACT_HID_01ec = 0x01ec,
BNXT_ULP_ACT_HID_0144 = 0x0144,
- BNXT_ULP_ACT_HID_02d7 = 0x02d7,
+ BNXT_ULP_ACT_HID_04d9 = 0x04d9,
+ BNXT_ULP_ACT_HID_03c2 = 0x03c2,
BNXT_ULP_ACT_HID_025f = 0x025f,
BNXT_ULP_ACT_HID_01f0 = 0x01f0,
BNXT_ULP_ACT_HID_0148 = 0x0148,
- BNXT_ULP_ACT_HID_02db = 0x02db,
+ BNXT_ULP_ACT_HID_04dd = 0x04dd,
+ BNXT_ULP_ACT_HID_03c6 = 0x03c6,
BNXT_ULP_ACT_HID_0000 = 0x0000,
BNXT_ULP_ACT_HID_0002 = 0x0002,
BNXT_ULP_ACT_HID_0800 = 0x0800,
@@ -829,19 +833,23 @@ enum bnxt_ulp_act_hid {
BNXT_ULP_ACT_HID_02f6 = 0x02f6,
BNXT_ULP_ACT_HID_04f8 = 0x04f8,
BNXT_ULP_ACT_HID_01df = 0x01df,
- BNXT_ULP_ACT_HID_05e3 = 0x05e3,
+ BNXT_ULP_ACT_HID_07e5 = 0x07e5,
+ BNXT_ULP_ACT_HID_06ce = 0x06ce,
BNXT_ULP_ACT_HID_02fa = 0x02fa,
BNXT_ULP_ACT_HID_04fc = 0x04fc,
BNXT_ULP_ACT_HID_01e3 = 0x01e3,
- BNXT_ULP_ACT_HID_05e7 = 0x05e7,
+ BNXT_ULP_ACT_HID_07e9 = 0x07e9,
+ BNXT_ULP_ACT_HID_06d2 = 0x06d2,
BNXT_ULP_ACT_HID_03f7 = 0x03f7,
BNXT_ULP_ACT_HID_05f9 = 0x05f9,
BNXT_ULP_ACT_HID_02e0 = 0x02e0,
- BNXT_ULP_ACT_HID_06e4 = 0x06e4,
+ BNXT_ULP_ACT_HID_08e6 = 0x08e6,
+ BNXT_ULP_ACT_HID_07cf = 0x07cf,
BNXT_ULP_ACT_HID_03fb = 0x03fb,
BNXT_ULP_ACT_HID_05fd = 0x05fd,
BNXT_ULP_ACT_HID_02e4 = 0x02e4,
- BNXT_ULP_ACT_HID_06e8 = 0x06e8,
+ BNXT_ULP_ACT_HID_08ea = 0x08ea,
+ BNXT_ULP_ACT_HID_07d3 = 0x07d3,
BNXT_ULP_ACT_HID_040d = 0x040d,
BNXT_ULP_ACT_HID_040f = 0x040f,
BNXT_ULP_ACT_HID_0413 = 0x0413,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c
index 4388a0a..f2e2a59 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c
@@ -259,8 +259,8 @@ struct bnxt_ulp_rte_act_info ulp_act_info[] = {
.proto_act_func = NULL
},
[RTE_FLOW_ACTION_TYPE_DEC_TTL] = {
- .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
- .proto_act_func = ulp_rte_dec_ttl_act_handler
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_dec_ttl_act_handler
},
[RTE_FLOW_ACTION_TYPE_SET_TTL] = {
.act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 08/20] net/bnxt: configure parif for the egress rules
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (6 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 07/20] net/bnxt: nat template changes Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 09/20] net/bnxt: ignore VLAN priority mask Somnath Kotur
` (12 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
The parif for the egress rules need to be dynamically
configured based on the port type.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_def_rules.c | 11 ++---
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 35 ++++++++++++++++
drivers/net/bnxt/tf_ulp/ulp_port_db.c | 2 +
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 54 ++++++++++++++++++++-----
drivers/net/bnxt/tf_ulp/ulp_template_db_act.c | 16 ++++++--
drivers/net/bnxt/tf_ulp/ulp_template_db_class.c | 25 +++++++++---
drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h | 14 ++++---
7 files changed, 123 insertions(+), 34 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
index d86e4c9..ddc6da8 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
@@ -81,17 +81,12 @@ ulp_set_parif_in_comp_fld(struct bnxt_ulp_context *ulp_ctx,
if (rc)
return rc;
- if (parif_type == BNXT_ULP_PHY_PORT_PARIF) {
+ if (parif_type == BNXT_ULP_PHY_PORT_PARIF)
idx = BNXT_ULP_CF_IDX_PHY_PORT_PARIF;
- /* Parif needs to be reset to a free partition */
- parif += BNXT_ULP_FREE_PARIF_BASE;
- } else if (parif_type == BNXT_ULP_DRV_FUNC_PARIF) {
+ else if (parif_type == BNXT_ULP_DRV_FUNC_PARIF)
idx = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF;
- /* Parif needs to be reset to a free partition */
- parif += BNXT_ULP_FREE_PARIF_BASE;
- } else {
+ else
idx = BNXT_ULP_CF_IDX_VF_FUNC_PARIF;
- }
ULP_COMP_FLD_IDX_WR(mapper_params, idx, parif);
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 2d3373d..a071c07 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -998,6 +998,41 @@ ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms,
return -EINVAL;
}
break;
+ case BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF:
+ if (!ulp_operand_read(fld->result_operand,
+ (uint8_t *)&idx,
+ sizeof(uint16_t))) {
+ BNXT_TF_DBG(ERR, "%s key operand read failed.\n", name);
+ return -EINVAL;
+ }
+ idx = tfp_be_to_cpu_16(idx);
+ if (idx >= BNXT_ULP_CF_IDX_LAST) {
+ BNXT_TF_DBG(ERR, "%s invalid index %u\n", name, idx);
+ return -EINVAL;
+ }
+ /* check if the computed field is set */
+ if (ULP_COMP_FLD_IDX_RD(parms, idx))
+ val = fld->result_operand_true;
+ else
+ val = fld->result_operand_false;
+
+ /* read the appropriate computed field */
+ if (!ulp_operand_read(val, (uint8_t *)&idx, sizeof(uint16_t))) {
+ BNXT_TF_DBG(ERR, "%s val operand read failed\n", name);
+ return -EINVAL;
+ }
+ idx = tfp_be_to_cpu_16(idx);
+ if (idx >= BNXT_ULP_CF_IDX_LAST) {
+ BNXT_TF_DBG(ERR, "%s invalid index %u\n", name, idx);
+ return -EINVAL;
+ }
+ val = ulp_blob_push_32(blob, &parms->comp_fld[idx],
+ fld->field_bit_size);
+ if (!val) {
+ BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name);
+ return -EINVAL;
+ }
+ break;
default:
BNXT_TF_DBG(ERR, "invalid result mapper opcode 0x%x\n",
fld->result_opcode);
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.c b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
index 0fc7c0a..3087647 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
@@ -372,6 +372,8 @@ ulp_port_db_parif_get(struct bnxt_ulp_context *ulp_ctxt,
phy_port_id = port_db->ulp_func_id_tbl[func_id].phy_port_id;
*parif = port_db->phy_port_list[phy_port_id].port_parif;
}
+ /* Parif needs to be reset to a free partition */
+ *parif += BNXT_ULP_FREE_PARIF_BASE;
return 0;
}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 39f801b..67f9319 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -167,31 +167,63 @@ bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
{
uint32_t ifindex;
uint16_t port_id, parif;
+ uint32_t mtype;
enum bnxt_ulp_direction_type dir;
/* get the direction details */
dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
+ /* read the port id details */
+ port_id = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_INCOMING_IF);
+ if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
+ port_id,
+ &ifindex)) {
+ BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
+ return;
+ }
+
if (dir == BNXT_ULP_DIR_INGRESS) {
- /* read the port id details */
- port_id = ULP_COMP_FLD_IDX_RD(params,
- BNXT_ULP_CF_IDX_INCOMING_IF);
- if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
- port_id,
- &ifindex)) {
- BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
- return;
- }
/* Set port PARIF */
if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
BNXT_ULP_PHY_PORT_PARIF, &parif)) {
BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
return;
}
- /* Parif needs to be reset to a free partition */
- parif += BNXT_ULP_FREE_PARIF_BASE;
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
parif);
+ } else {
+ /* Get the match port type */
+ mtype = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
+ if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
+ 1);
+ /* Set VF func PARIF */
+ if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_VF_FUNC_PARIF,
+ &parif)) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
+ parif);
+ } else {
+ /* Set DRV func PARIF */
+ if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_DRV_FUNC_PARIF,
+ &parif)) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
+ parif);
+ }
}
}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
index 31fe905..58b581c 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
@@ -1808,11 +1808,19 @@ struct bnxt_ulp_mapper_result_field_info ulp_act_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_ACT_BIT_THEN_CONST_ELSE_CONST,
.result_operand = {
- BNXT_ULP_SYM_DECAP_FUNC_THRU_TUN,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 56) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 48) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 40) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 32) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 24) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 16) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 8) & 0xff,
+ (uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 12,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
index 9de45cd..330c5ec 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
@@ -5058,7 +5058,9 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 2,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
.spec_operand = {0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
@@ -5149,7 +5151,9 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 2,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
.spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
@@ -17054,11 +17058,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index c9fe1bc..f08065b 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -127,11 +127,12 @@ enum bnxt_ulp_cf_idx {
BNXT_ULP_CF_IDX_ACT_PORT_IS_SET = 35,
BNXT_ULP_CF_IDX_ACT_PORT_TYPE = 36,
BNXT_ULP_CF_IDX_MATCH_PORT_TYPE = 37,
- BNXT_ULP_CF_IDX_VF_TO_VF = 38,
- BNXT_ULP_CF_IDX_L3_HDR_CNT = 39,
- BNXT_ULP_CF_IDX_L4_HDR_CNT = 40,
- BNXT_ULP_CF_IDX_VFR_MODE = 41,
- BNXT_ULP_CF_IDX_LAST = 42
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP = 38,
+ BNXT_ULP_CF_IDX_VF_TO_VF = 39,
+ BNXT_ULP_CF_IDX_L3_HDR_CNT = 40,
+ BNXT_ULP_CF_IDX_L4_HDR_CNT = 41,
+ BNXT_ULP_CF_IDX_VFR_MODE = 42,
+ BNXT_ULP_CF_IDX_LAST = 43
};
enum bnxt_ulp_cond_opcode {
@@ -215,7 +216,8 @@ enum bnxt_ulp_mapper_opc {
BNXT_ULP_MAPPER_OPC_SET_TO_ENCAP_ACT_PROP_SZ = 8,
BNXT_ULP_MAPPER_OPC_IF_ACT_BIT_THEN_ACT_PROP_ELSE_CONST = 9,
BNXT_ULP_MAPPER_OPC_IF_ACT_BIT_THEN_CONST_ELSE_CONST = 10,
- BNXT_ULP_MAPPER_OPC_LAST = 11
+ BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF = 11,
+ BNXT_ULP_MAPPER_OPC_LAST = 12
};
enum bnxt_ulp_mark_db_opcode {
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 09/20] net/bnxt: ignore VLAN priority mask
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (7 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 08/20] net/bnxt: configure parif for the egress rules Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 10/20] net/bnxt: add egress template with VLAN tag match Somnath Kotur
` (11 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
This is a work around for the OVS setting offload rules that
are passing vlan priority mask as wild card and currently we
do not support it.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 67f9319..665f5d3 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -709,8 +709,17 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
vlan_tag |= ~ULP_VLAN_TAG_MASK;
vlan_tag = htons(vlan_tag);
+#ifdef ULP_DONT_IGNORE_TOS
ulp_rte_prsr_mask_copy(params, &idx, &priority,
sizeof(priority));
+#else
+ /*
+ * The priority field is ignored since OVS is setting it as
+ * wild card match and it is not supported. This is a work
+ * around and shall be addressed in the future.
+ */
+ idx += 1;
+#endif
ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
sizeof(vlan_tag));
ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 10/20] net/bnxt: add egress template with VLAN tag match
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (8 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 09/20] net/bnxt: ignore VLAN priority mask Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 11/20] net/bnxt: modify tf shadow tcam to use common tf hash Somnath Kotur
` (10 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Added egress template with VLAN tag match
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Shahaji Bhosle <shahaji.bhosle@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_template_db_class.c | 501 +++++++++++++++++++++++-
drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h | 28 +-
2 files changed, 509 insertions(+), 20 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
index 330c5ec..41d1d87 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
@@ -162,7 +162,31 @@ uint16_t ulp_class_sig_tbl[BNXT_ULP_CLASS_SIG_TBL_MAX_SZ] = {
[BNXT_ULP_CLASS_HID_01d1] = 151,
[BNXT_ULP_CLASS_HID_0319] = 152,
[BNXT_ULP_CLASS_HID_01cd] = 153,
- [BNXT_ULP_CLASS_HID_0305] = 154
+ [BNXT_ULP_CLASS_HID_0305] = 154,
+ [BNXT_ULP_CLASS_HID_01e2] = 155,
+ [BNXT_ULP_CLASS_HID_032a] = 156,
+ [BNXT_ULP_CLASS_HID_0650] = 157,
+ [BNXT_ULP_CLASS_HID_0198] = 158,
+ [BNXT_ULP_CLASS_HID_01de] = 159,
+ [BNXT_ULP_CLASS_HID_0316] = 160,
+ [BNXT_ULP_CLASS_HID_066c] = 161,
+ [BNXT_ULP_CLASS_HID_01a4] = 162,
+ [BNXT_ULP_CLASS_HID_01c2] = 163,
+ [BNXT_ULP_CLASS_HID_030a] = 164,
+ [BNXT_ULP_CLASS_HID_0670] = 165,
+ [BNXT_ULP_CLASS_HID_01b8] = 166,
+ [BNXT_ULP_CLASS_HID_003e] = 167,
+ [BNXT_ULP_CLASS_HID_02f6] = 168,
+ [BNXT_ULP_CLASS_HID_078c] = 169,
+ [BNXT_ULP_CLASS_HID_0044] = 170,
+ [BNXT_ULP_CLASS_HID_01d2] = 171,
+ [BNXT_ULP_CLASS_HID_031a] = 172,
+ [BNXT_ULP_CLASS_HID_0660] = 173,
+ [BNXT_ULP_CLASS_HID_01a8] = 174,
+ [BNXT_ULP_CLASS_HID_01ce] = 175,
+ [BNXT_ULP_CLASS_HID_0306] = 176,
+ [BNXT_ULP_CLASS_HID_067c] = 177,
+ [BNXT_ULP_CLASS_HID_01b4] = 178
};
struct bnxt_ulp_class_match_info ulp_class_match_list[] = {
@@ -2833,6 +2857,382 @@ struct bnxt_ulp_class_match_info ulp_class_match_list[] = {
BNXT_ULP_MATCH_TYPE_BITMASK_EM },
.class_tid = 21,
.wc_pri = 11
+ },
+ [155] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01e2,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 12
+ },
+ [156] = {
+ .class_hid = BNXT_ULP_CLASS_HID_032a,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 13
+ },
+ [157] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0650,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 14
+ },
+ [158] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0198,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 15
+ },
+ [159] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01de,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 16
+ },
+ [160] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0316,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 17
+ },
+ [161] = {
+ .class_hid = BNXT_ULP_CLASS_HID_066c,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 18
+ },
+ [162] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01a4,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 19
+ },
+ [163] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01c2,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 20
+ },
+ [164] = {
+ .class_hid = BNXT_ULP_CLASS_HID_030a,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 21
+ },
+ [165] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0670,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 22
+ },
+ [166] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01b8,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 23
+ },
+ [167] = {
+ .class_hid = BNXT_ULP_CLASS_HID_003e,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 24
+ },
+ [168] = {
+ .class_hid = BNXT_ULP_CLASS_HID_02f6,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 25
+ },
+ [169] = {
+ .class_hid = BNXT_ULP_CLASS_HID_078c,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 26
+ },
+ [170] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0044,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 27
+ },
+ [171] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01d2,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 28
+ },
+ [172] = {
+ .class_hid = BNXT_ULP_CLASS_HID_031a,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 29
+ },
+ [173] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0660,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 30
+ },
+ [174] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01a8,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 31
+ },
+ [175] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01ce,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 32
+ },
+ [176] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0306,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 33
+ },
+ [177] = {
+ .class_hid = BNXT_ULP_CLASS_HID_067c,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 34
+ },
+ [178] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01b4,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 35
}
};
@@ -3236,7 +3636,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE,
- .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW,
+ .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
.srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
@@ -3255,7 +3655,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE,
- .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW,
+ .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
.srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
@@ -3346,7 +3746,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE,
- .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW,
+ .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
.srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
@@ -12534,8 +12934,18 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 12,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
- .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_HDR_FIELD,
+ .mask_operand = {
+ (BNXT_ULP_HF21_IDX_OO_VLAN_VID >> 8) & 0xff,
+ BNXT_ULP_HF21_IDX_OO_VLAN_VID & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_HDR_FIELD,
+ .spec_operand = {
+ (BNXT_ULP_HF21_IDX_OO_VLAN_VID >> 8) & 0xff,
+ BNXT_ULP_HF21_IDX_OO_VLAN_VID & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 12,
@@ -12594,8 +13004,15 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 2,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
- .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_COMP_FIELD,
+ .spec_operand = {
+ (BNXT_ULP_CF_IDX_O_VTAG_NUM >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_O_VTAG_NUM & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 2,
@@ -16307,11 +16724,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
@@ -16498,11 +16926,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
@@ -16689,7 +17128,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
+ .result_operand = {
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
@@ -16876,11 +17330,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index f08065b..ac651f6 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -11,7 +11,7 @@
#define BNXT_ULP_LOG2_MAX_NUM_DEV 2
#define BNXT_ULP_CACHE_TBL_MAX_SZ 4
#define BNXT_ULP_CLASS_SIG_TBL_MAX_SZ 2048
-#define BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ 155
+#define BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ 179
#define BNXT_ULP_CLASS_HID_LOW_PRIME 7919
#define BNXT_ULP_CLASS_HID_HIGH_PRIME 7907
#define BNXT_ULP_CLASS_HID_SHFTR 32
@@ -781,7 +781,31 @@ enum bnxt_ulp_class_hid {
BNXT_ULP_CLASS_HID_01d1 = 0x01d1,
BNXT_ULP_CLASS_HID_0319 = 0x0319,
BNXT_ULP_CLASS_HID_01cd = 0x01cd,
- BNXT_ULP_CLASS_HID_0305 = 0x0305
+ BNXT_ULP_CLASS_HID_0305 = 0x0305,
+ BNXT_ULP_CLASS_HID_01e2 = 0x01e2,
+ BNXT_ULP_CLASS_HID_032a = 0x032a,
+ BNXT_ULP_CLASS_HID_0650 = 0x0650,
+ BNXT_ULP_CLASS_HID_0198 = 0x0198,
+ BNXT_ULP_CLASS_HID_01de = 0x01de,
+ BNXT_ULP_CLASS_HID_0316 = 0x0316,
+ BNXT_ULP_CLASS_HID_066c = 0x066c,
+ BNXT_ULP_CLASS_HID_01a4 = 0x01a4,
+ BNXT_ULP_CLASS_HID_01c2 = 0x01c2,
+ BNXT_ULP_CLASS_HID_030a = 0x030a,
+ BNXT_ULP_CLASS_HID_0670 = 0x0670,
+ BNXT_ULP_CLASS_HID_01b8 = 0x01b8,
+ BNXT_ULP_CLASS_HID_003e = 0x003e,
+ BNXT_ULP_CLASS_HID_02f6 = 0x02f6,
+ BNXT_ULP_CLASS_HID_078c = 0x078c,
+ BNXT_ULP_CLASS_HID_0044 = 0x0044,
+ BNXT_ULP_CLASS_HID_01d2 = 0x01d2,
+ BNXT_ULP_CLASS_HID_031a = 0x031a,
+ BNXT_ULP_CLASS_HID_0660 = 0x0660,
+ BNXT_ULP_CLASS_HID_01a8 = 0x01a8,
+ BNXT_ULP_CLASS_HID_01ce = 0x01ce,
+ BNXT_ULP_CLASS_HID_0306 = 0x0306,
+ BNXT_ULP_CLASS_HID_067c = 0x067c,
+ BNXT_ULP_CLASS_HID_01b4 = 0x01b4
};
enum bnxt_ulp_act_hid {
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 11/20] net/bnxt: modify tf shadow tcam to use common tf hash
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (9 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 10/20] net/bnxt: add egress template with VLAN tag match Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 12/20] net/bnxt: added shadow table capability with search Somnath Kotur
` (9 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
Removed the hash calculation from tf_shadow_tcam in favor of using a new
common implementation.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Farah Smith <farah.smith@broadcom.com>
Reviewed-by: Ajit Kumar Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 77 +------------------------------
1 file changed, 2 insertions(+), 75 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index 51aae4f..beaea03 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -7,6 +7,7 @@
#include "tf_util.h"
#include "tfp.h"
#include "tf_shadow_tcam.h"
+#include "tf_hash.h"
/**
* The implementation includes 3 tables per tcam table type.
@@ -164,74 +165,6 @@ struct tf_shadow_tcam_db {
struct tf_shadow_tcam_ctxt *ctxt[TF_TCAM_TBL_TYPE_MAX];
};
-/* CRC polynomial 0xedb88320 */
-static const uint32_t tf_shadow_tcam_crc32tbl[] = {
- 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
- 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
- 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
- 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
- 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
- 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
- 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
- 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
- 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
- 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
- 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
- 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
- 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
- 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
- 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
- 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
- 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
- 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
- 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
- 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
- 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
- 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
- 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
- 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
- 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
- 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
- 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
- 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
- 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
- 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
- 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
- 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
- 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
- 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
- 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
- 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
- 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
- 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
- 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
- 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
- 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
- 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
- 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
- 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
- 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
- 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
- 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
- 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
- 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
- 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
- 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
- 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
- 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
- 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
- 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
- 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
- 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
- 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
- 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
- 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
- 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
- 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
- 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
- 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
-};
-
/**
* Returns the number of entries in the contexts shadow table.
*/
@@ -289,13 +222,7 @@ tf_shadow_tcam_res_cpy(struct tf_shadow_tcam_ctxt *ctxt,
static uint32_t
tf_shadow_tcam_crc32_calc(uint8_t *key, uint32_t len)
{
- uint32_t crc = ~0U;
-
- while (len--)
- crc = tf_shadow_tcam_crc32tbl[(crc ^ key[len]) & 0xff] ^
- (crc >> 8);
-
- return ~crc;
+ return tf_hash_calc_crc32(key, len);
}
/**
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 12/20] net/bnxt: added shadow table capability with search
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (10 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 11/20] net/bnxt: modify tf shadow tcam to use common tf hash Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 13/20] net/bnxt: ulp mapper changes to use tbl search Somnath Kotur
` (8 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
- Added Index Table shadow tables for searching
- Added Search API to allow reuse of Table entries
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Farah Smith <farah.smith@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_core.c | 66 ++-
drivers/net/bnxt/tf_core/tf_core.h | 79 ++-
drivers/net/bnxt/tf_core/tf_device_p4.c | 2 +-
drivers/net/bnxt/tf_core/tf_shadow_tbl.c | 768 +++++++++++++++++++++++++++++-
drivers/net/bnxt/tf_core/tf_shadow_tbl.h | 124 ++---
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 6 +
drivers/net/bnxt/tf_core/tf_tbl.c | 246 +++++++++-
drivers/net/bnxt/tf_core/tf_tbl.h | 22 +-
drivers/net/bnxt/tf_core/tf_tcam.h | 2 +-
9 files changed, 1211 insertions(+), 104 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index ca3280b..0dbde1d 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -75,7 +75,6 @@ tf_open_session(struct tf *tfp,
/* Session vs session client is decided in
* tf_session_open_session()
*/
- printf("TF_OPEN, %s\n", parms->ctrl_chan_name);
rc = tf_session_open_session(tfp, &oparms);
/* Logging handled by tf_session_open_session */
if (rc)
@@ -954,6 +953,71 @@ tf_alloc_tbl_entry(struct tf *tfp,
}
int
+tf_search_tbl_entry(struct tf *tfp,
+ struct tf_search_tbl_entry_parms *parms)
+{
+ int rc;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_tbl_alloc_search_parms sparms;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ if (dev->ops->tf_dev_alloc_search_tbl == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ memset(&sparms, 0, sizeof(struct tf_tbl_alloc_search_parms));
+ sparms.dir = parms->dir;
+ sparms.type = parms->type;
+ sparms.result = parms->result;
+ sparms.result_sz_in_bytes = parms->result_sz_in_bytes;
+ sparms.alloc = parms->alloc;
+ sparms.tbl_scope_id = parms->tbl_scope_id;
+ rc = dev->ops->tf_dev_alloc_search_tbl(tfp, &sparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: TBL allocation failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Return the outputs from the search */
+ parms->hit = sparms.hit;
+ parms->search_status = sparms.search_status;
+ parms->ref_cnt = sparms.ref_cnt;
+ parms->idx = sparms.idx;
+
+ return 0;
+}
+
+int
tf_free_tbl_entry(struct tf *tfp,
struct tf_free_tbl_entry_parms *parms)
{
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index 349a1f1..db10935 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -291,9 +291,9 @@ enum tf_tcam_tbl_type {
};
/**
- * TCAM SEARCH STATUS
+ * SEARCH STATUS
*/
-enum tf_tcam_search_status {
+enum tf_search_status {
/** The entry was not found, but an idx was allocated if requested. */
MISS,
/** The entry was found, and the result/idx are valid */
@@ -1011,7 +1011,7 @@ struct tf_search_tcam_entry_parms {
/**
* [out] Search result status (hit, miss, reject)
*/
- enum tf_tcam_search_status search_status;
+ enum tf_search_status search_status;
/**
* [out] Current refcnt after allocation
*/
@@ -1288,6 +1288,79 @@ int tf_free_tcam_entry(struct tf *tfp,
/**
* tf_alloc_tbl_entry parameter definition
*/
+struct tf_search_tbl_entry_parms {
+ /**
+ * [in] Receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] Type of the allocation
+ */
+ enum tf_tbl_type type;
+ /**
+ * [in] Table scope identifier (ignored unless TF_TBL_TYPE_EXT)
+ */
+ uint32_t tbl_scope_id;
+ /**
+ * [in] Result data to search for
+ */
+ uint8_t *result;
+ /**
+ * [in] Result data size in bytes
+ */
+ uint16_t result_sz_in_bytes;
+ /**
+ * [in] Allocate on miss.
+ */
+ uint8_t alloc;
+ /**
+ * [out] Set if matching entry found
+ */
+ uint8_t hit;
+ /**
+ * [out] Search result status (hit, miss, reject)
+ */
+ enum tf_search_status search_status;
+ /**
+ * [out] Current ref count after allocation
+ */
+ uint16_t ref_cnt;
+ /**
+ * [out] Idx of allocated entry or found entry
+ */
+ uint32_t idx;
+};
+
+/**
+ * search Table Entry (experimental)
+ *
+ * This function searches the shadow copy of an index table for a matching
+ * entry. The result data must match for hit to be set. Only TruFlow core
+ * data is accessed. If shadow_copy is not enabled, an error is returned.
+ *
+ * Implementation:
+ *
+ * A hash is performed on the result data and mappe3d to a shadow copy entry
+ * where the result is populated. If the result matches the entry, hit is set,
+ * ref_cnt is incremented (if alloc), and the search status indicates what
+ * action the caller can take regarding setting the entry.
+ *
+ * search status should be used as follows:
+ * - On MISS, the caller should set the result into the returned index.
+ *
+ * - On REJECT, the caller should reject the flow since there are no resources.
+ *
+ * - On Hit, the matching index is returned to the caller. Additionally, the
+ * ref_cnt is updated.
+ *
+ * Also returns success or failure code.
+ */
+int tf_search_tbl_entry(struct tf *tfp,
+ struct tf_search_tbl_entry_parms *parms);
+
+/**
+ * tf_alloc_tbl_entry parameter definition
+ */
struct tf_alloc_tbl_entry_parms {
/**
* [in] Receive or transmit direction
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c
index afb6098..fe8dec3 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c
@@ -126,7 +126,7 @@ const struct tf_dev_ops tf_dev_ops_p4 = {
.tf_dev_alloc_ext_tbl = tf_tbl_ext_alloc,
.tf_dev_free_tbl = tf_tbl_free,
.tf_dev_free_ext_tbl = tf_tbl_ext_free,
- .tf_dev_alloc_search_tbl = NULL,
+ .tf_dev_alloc_search_tbl = tf_tbl_alloc_search,
.tf_dev_set_tbl = tf_tbl_set,
.tf_dev_set_ext_tbl = tf_tbl_ext_common_set,
.tf_dev_get_tbl = tf_tbl_get,
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
index 8f2b6de..019a26e 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
@@ -3,61 +3,785 @@
* All rights reserved.
*/
-#include <rte_common.h>
-
+#include "tf_common.h"
+#include "tf_util.h"
+#include "tfp.h"
+#include "tf_core.h"
#include "tf_shadow_tbl.h"
+#include "tf_hash.h"
/**
- * Shadow table DB element
+ * The implementation includes 3 tables per table table type.
+ * - hash table
+ * - sized so that a minimum of 4 slots per shadow entry are available to
+ * minimize the likelihood of collisions.
+ * - shadow key table
+ * - sized to the number of entries requested and is directly indexed
+ * - the index is zero based and is the table index - the base address
+ * - the data associated with the entry is stored in the key table.
+ * - The stored key is actually the data associated with the entry.
+ * - shadow result table
+ * - the result table is stored separately since it only needs to be accessed
+ * when the key matches.
+ * - the result has a back pointer to the hash table via the hb handle. The
+ * hb handle is a 32 bit represention of the hash with a valid bit, bucket
+ * element index, and the hash index. It is necessary to store the hb handle
+ * with the result since subsequent removes only provide the table index.
+ *
+ * - Max entries is limited in the current implementation since bit 15 is the
+ * valid bit in the hash table.
+ * - A 16bit hash is calculated and masked based on the number of entries
+ * - 64b wide bucket is used and broken into 4x16bit elements.
+ * This decision is based on quicker bucket scanning to determine if any
+ * elements are in use.
+ * - bit 15 of each bucket element is the valid, this is done to prevent having
+ * to read the larger key/result data for determining VALID. It also aids
+ * in the more efficient scanning of the bucket for slot usage.
*/
-struct tf_shadow_tbl_element {
- /**
- * Hash table
- */
- void *hash;
- /**
- * Reference count, array of number of table type entries
- */
- uint16_t *ref_count;
+/*
+ * The maximum number of shadow entries supported. The value also doubles as
+ * the maximum number of hash buckets. There are only 15 bits of data per
+ * bucket to point to the shadow tables.
+ */
+#define TF_SHADOW_ENTRIES_MAX (1 << 15)
+
+/* The number of elements(BE) per hash bucket (HB) */
+#define TF_SHADOW_HB_NUM_ELEM (4)
+#define TF_SHADOW_BE_VALID (1 << 15)
+#define TF_SHADOW_BE_IS_VALID(be) (((be) & TF_SHADOW_BE_VALID) != 0)
+
+/**
+ * The hash bucket handle is 32b
+ * - bit 31, the Valid bit
+ * - bit 29-30, the element
+ * - bits 0-15, the hash idx (is masked based on the allocated size)
+ */
+#define TF_SHADOW_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
+#define TF_SHADOW_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
+ ((be) << 29) | (idx))
+
+#define TF_SHADOW_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
+ (TF_SHADOW_HB_NUM_ELEM - 1))
+
+#define TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
+ (ctxt)->hash_ctxt.hid_mask)
+
+/**
+ * The idx provided by the caller is within a region, so currently the base is
+ * either added or subtracted from the idx to ensure it can be used as a
+ * compressed index
+ */
+
+/* Convert the table index to a shadow index */
+#define TF_SHADOW_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Convert the shadow index to a tbl index */
+#define TF_SHADOW_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Simple helper masks for clearing en element from the bucket */
+#define TF_SHADOW_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
+#define TF_SHADOW_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
+#define TF_SHADOW_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
+#define TF_SHADOW_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
+
+/**
+ * This should be coming from external, but for now it is assumed that no key
+ * is greater than 512 bits (64B). This makes allocation of the key table
+ * easier without having to allocate on the fly.
+ */
+#define TF_SHADOW_MAX_KEY_SZ 64
+
+/*
+ * Local only defines for the internal data.
+ */
+
+/**
+ * tf_shadow_tbl_shadow_key_entry is the key entry of the key table.
+ * The key stored in the table is the result data of the index table.
+ */
+struct tf_shadow_tbl_shadow_key_entry {
+ uint8_t key[TF_SHADOW_MAX_KEY_SZ];
+};
+
+/**
+ * tf_shadow_tbl_shadow_result_entry is the result table entry.
+ * The result table writes are broken into two phases:
+ * - The search phase, which stores the hb_handle and key size and
+ * - The set phase, which writes the refcnt
+ */
+struct tf_shadow_tbl_shadow_result_entry {
+ uint16_t key_size;
+ uint32_t refcnt;
+ uint32_t hb_handle;
+};
+
+/**
+ * tf_shadow_tbl_shadow_ctxt holds all information for accessing the key and
+ * result tables.
+ */
+struct tf_shadow_tbl_shadow_ctxt {
+ struct tf_shadow_tbl_shadow_key_entry *sh_key_tbl;
+ struct tf_shadow_tbl_shadow_result_entry *sh_res_tbl;
+ uint32_t base_addr;
+ uint16_t num_entries;
+ uint16_t alloc_idx;
+};
+
+/**
+ * tf_shadow_tbl_hash_ctxt holds all information related to accessing the hash
+ * table.
+ */
+struct tf_shadow_tbl_hash_ctxt {
+ uint64_t *hashtbl;
+ uint16_t hid_mask;
+ uint16_t hash_entries;
};
/**
- * Shadow table DB definition
+ * tf_shadow_tbl_ctxt holds the hash and shadow tables for the current shadow
+ * table db. This structure is per table table type as each table table has
+ * it's own shadow and hash table.
+ */
+struct tf_shadow_tbl_ctxt {
+ struct tf_shadow_tbl_shadow_ctxt shadow_ctxt;
+ struct tf_shadow_tbl_hash_ctxt hash_ctxt;
+};
+
+/**
+ * tf_shadow_tbl_db is the allocated db structure returned as an opaque
+ * void * pointer to the caller during create db. It holds the pointers for
+ * each table associated with the db.
*/
struct tf_shadow_tbl_db {
- /**
- * The DB consists of an array of elements
- */
- struct tf_shadow_tbl_element *db;
+ /* Each context holds the shadow and hash table information */
+ struct tf_shadow_tbl_ctxt *ctxt[TF_TBL_TYPE_MAX];
};
+/**
+ * Simple routine that decides what table types can be searchable.
+ *
+ */
+static int tf_shadow_tbl_is_searchable(enum tf_tbl_type type)
+{
+ int rc = 0;
+
+ switch (type) {
+ case TF_TBL_TYPE_ACT_ENCAP_8B:
+ case TF_TBL_TYPE_ACT_ENCAP_16B:
+ case TF_TBL_TYPE_ACT_ENCAP_32B:
+ case TF_TBL_TYPE_ACT_ENCAP_64B:
+ case TF_TBL_TYPE_ACT_SP_SMAC:
+ case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
+ case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
+ case TF_TBL_TYPE_ACT_MODIFY_IPV4:
+ case TF_TBL_TYPE_ACT_MODIFY_SPORT:
+ case TF_TBL_TYPE_ACT_MODIFY_DPORT:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ };
+
+ return rc;
+}
+
+/**
+ * Returns the number of entries in the contexts shadow table.
+ */
+static inline uint16_t
+tf_shadow_tbl_sh_num_entries_get(struct tf_shadow_tbl_ctxt *ctxt)
+{
+ return ctxt->shadow_ctxt.num_entries;
+}
+
+/**
+ * Compare the give key with the key in the shadow table.
+ *
+ * Returns 0 if the keys match
+ */
+static int
+tf_shadow_tbl_key_cmp(struct tf_shadow_tbl_ctxt *ctxt,
+ uint8_t *key,
+ uint16_t sh_idx,
+ uint16_t size)
+{
+ if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
+ sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt) || !key)
+ return -1;
+
+ return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
+}
+
+/**
+ * Free the memory associated with the context.
+ */
+static void
+tf_shadow_tbl_ctxt_delete(struct tf_shadow_tbl_ctxt *ctxt)
+{
+ if (!ctxt)
+ return;
+
+ tfp_free(ctxt->hash_ctxt.hashtbl);
+ tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
+ tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
+}
+
+/**
+ * The TF Shadow TBL context is per TBL and holds all information relating to
+ * managing the shadow and search capability. This routine allocated data that
+ * needs to be deallocated by the tf_shadow_tbl_ctxt_delete prior when deleting
+ * the shadow db.
+ */
+static int
+tf_shadow_tbl_ctxt_create(struct tf_shadow_tbl_ctxt *ctxt,
+ uint16_t num_entries,
+ uint16_t base_addr)
+{
+ struct tfp_calloc_parms cparms;
+ uint16_t hash_size = 1;
+ uint16_t hash_mask;
+ int rc;
+
+ /* Hash table is a power of two that holds the number of entries */
+ if (num_entries > TF_SHADOW_ENTRIES_MAX) {
+ TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
+ num_entries,
+ TF_SHADOW_ENTRIES_MAX);
+ return -ENOMEM;
+ }
+
+ while (hash_size < num_entries)
+ hash_size = hash_size << 1;
+
+ hash_mask = hash_size - 1;
+
+ /* Allocate the hash table */
+ cparms.nitems = hash_size;
+ cparms.size = sizeof(uint64_t);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->hash_ctxt.hashtbl = cparms.mem_va;
+ ctxt->hash_ctxt.hid_mask = hash_mask;
+ ctxt->hash_ctxt.hash_entries = hash_size;
+
+ /* allocate the shadow tables */
+ /* allocate the shadow key table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tbl_shadow_key_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
+
+ /* allocate the shadow result table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tbl_shadow_result_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
+
+ ctxt->shadow_ctxt.num_entries = num_entries;
+ ctxt->shadow_ctxt.base_addr = base_addr;
+
+ return 0;
+error:
+ tf_shadow_tbl_ctxt_delete(ctxt);
+
+ return -ENOMEM;
+}
+
+/**
+ * Get a shadow table context given the db and the table type
+ */
+static struct tf_shadow_tbl_ctxt *
+tf_shadow_tbl_ctxt_get(struct tf_shadow_tbl_db *shadow_db,
+ enum tf_tbl_type type)
+{
+ if (type >= TF_TBL_TYPE_MAX ||
+ !shadow_db ||
+ !shadow_db->ctxt[type])
+ return NULL;
+
+ return shadow_db->ctxt[type];
+}
+
+/**
+ * Sets the hash entry into the table given the table context, hash bucket
+ * handle, and shadow index.
+ */
+static inline int
+tf_shadow_tbl_set_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint32_t hb_handle,
+ uint16_t sh_idx)
+{
+ uint16_t hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ uint16_t be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
+ uint64_t entry = sh_idx | TF_SHADOW_BE_VALID;
+
+ if (hid >= ctxt->hash_ctxt.hash_entries)
+ return -EINVAL;
+
+ ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
+ return 0;
+}
+
+/**
+ * Clears the hash entry given the TBL context and hash bucket handle.
+ */
+static inline void
+tf_shadow_tbl_clear_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint32_t hb_handle)
+{
+ uint16_t hid, be;
+ uint64_t *bucket;
+
+ if (!TF_SHADOW_HB_HANDLE_IS_VALID(hb_handle))
+ return;
+
+ hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
+ bucket = &ctxt->hash_ctxt.hashtbl[hid];
+
+ switch (be) {
+ case 0:
+ *bucket = TF_SHADOW_BE0_MASK_CLEAR(*bucket);
+ break;
+ case 1:
+ *bucket = TF_SHADOW_BE1_MASK_CLEAR(*bucket);
+ break;
+ case 2:
+ *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
+ break;
+ case 3:
+ *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
+ break;
+ default:
+ /*
+ * Since the BE_GET masks non-inclusive bits, this will not
+ * happen.
+ */
+ break;
+ }
+}
+
+/**
+ * Clears the shadow key and result entries given the table context and
+ * shadow index.
+ */
+static void
+tf_shadow_tbl_clear_sh_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint16_t sh_idx)
+{
+ struct tf_shadow_tbl_shadow_key_entry *sk_entry;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt))
+ return;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
+
+ /*
+ * memset key/result to zero for now, possibly leave the data alone
+ * in the future and rely on the valid bit in the hash table.
+ */
+ memset(sk_entry, 0, sizeof(struct tf_shadow_tbl_shadow_key_entry));
+ memset(sr_entry, 0, sizeof(struct tf_shadow_tbl_shadow_result_entry));
+}
+
+/**
+ * Binds the allocated tbl index with the hash and shadow tables.
+ * The entry will be incomplete until the set has happened with the result
+ * data.
+ */
int
-tf_shadow_tbl_create_db(struct tf_shadow_tbl_create_db_parms *parms __rte_unused)
+tf_shadow_tbl_bind_index(struct tf_shadow_tbl_bind_index_parms *parms)
{
+ int rc;
+ uint16_t idx, len;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_shadow_tbl_db *shadow_db;
+ struct tf_shadow_tbl_shadow_key_entry *sk_entry;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (!parms || !TF_SHADOW_HB_HANDLE_IS_VALID(parms->hb_handle) ||
+ !parms->data) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, parms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tbl_type_2_str(parms->type));
+ return -EINVAL;
+ }
+
+ idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, parms->idx);
+ len = parms->data_sz_in_bytes;
+ if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt) ||
+ len > TF_SHADOW_MAX_KEY_SZ) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ len,
+ TF_SHADOW_MAX_KEY_SZ, idx);
+
+ return -EINVAL;
+ }
+
+ rc = tf_shadow_tbl_set_hash_entry(ctxt, parms->hb_handle, idx);
+ if (rc)
+ return -EINVAL;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /* For tables, the data is the key */
+ memcpy(sk_entry->key, parms->data, len);
+
+ /* Write the result table */
+ sr_entry->key_size = len;
+ sr_entry->hb_handle = parms->hb_handle;
+ sr_entry->refcnt = 1;
+
return 0;
}
+/**
+ * Deletes hash/shadow information if no more references.
+ *
+ * Returns 0 - The caller should delete the table entry in hardware.
+ * Returns non-zero - The number of references to the entry
+ */
int
-tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms __rte_unused)
+tf_shadow_tbl_remove(struct tf_shadow_tbl_remove_parms *parms)
{
+ uint16_t idx;
+ uint32_t hb_handle;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_shadow_tbl_db *shadow_db;
+ struct tf_tbl_free_parms *fparms;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->fparms) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ fparms = parms->fparms;
+ if (!tf_shadow_tbl_is_searchable(fparms->type))
+ return 0;
+ /*
+ * Initialize the ref count to zero. The default would be to remove
+ * the entry.
+ */
+ fparms->ref_cnt = 0;
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, fparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tbl_type_2_str(fparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, fparms->idx);
+ if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
+ tf_tbl_type_2_str(fparms->type),
+ fparms->idx,
+ tf_shadow_tbl_sh_num_entries_get(ctxt));
+ return 0;
+ }
+
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+ if (sr_entry->refcnt <= 1) {
+ hb_handle = sr_entry->hb_handle;
+ tf_shadow_tbl_clear_hash_entry(ctxt, hb_handle);
+ tf_shadow_tbl_clear_sh_entry(ctxt, idx);
+ } else {
+ sr_entry->refcnt--;
+ fparms->ref_cnt = sr_entry->refcnt;
+ }
+
return 0;
}
int
-tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms __rte_unused)
+tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms)
{
+ uint16_t len;
+ uint64_t bucket;
+ uint32_t i, hid32;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_shadow_tbl_db *shadow_db;
+ uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
+ struct tf_tbl_alloc_search_parms *sparms;
+ uint32_t be_avail = TF_SHADOW_HB_NUM_ELEM;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "tbl search with invalid parms\n");
+ return -EINVAL;
+ }
+
+ sparms = parms->sparms;
+ /* Check that caller was supposed to call search */
+ if (!tf_shadow_tbl_is_searchable(sparms->type))
+ return -EINVAL;
+
+ /* Initialize return values to invalid */
+ sparms->hit = 0;
+ sparms->search_status = REJECT;
+ parms->hb_handle = 0;
+ sparms->ref_cnt = 0;
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(ERR, "%s Unable to get tbl mgr context\n",
+ tf_tbl_type_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ len = sparms->result_sz_in_bytes;
+ if (len > TF_SHADOW_MAX_KEY_SZ || !sparms->result || !len) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tbl_type_2_str(sparms->type),
+ len,
+ sparms->result);
+ return -EINVAL;
+ }
+
+ /*
+ * Calculate the crc32
+ * Fold it to create a 16b value
+ * Reduce it to fit the table
+ */
+ hid32 = tf_hash_calc_crc32(sparms->result, len);
+ hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
+ hid_mask = ctxt->hash_ctxt.hid_mask;
+ hb_idx = hid16 & hid_mask;
+
+ bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
+ if (!bucket) {
+ /* empty bucket means a miss and available entry */
+ sparms->search_status = MISS;
+ parms->hb_handle = TF_SHADOW_HB_HANDLE_CREATE(hb_idx, 0);
+ sparms->idx = 0;
+ return 0;
+ }
+
+ /* Set the avail to max so we can detect when there is an avail entry */
+ be_avail = TF_SHADOW_HB_NUM_ELEM;
+ for (i = 0; i < TF_SHADOW_HB_NUM_ELEM; i++) {
+ shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
+ be_valid = TF_SHADOW_BE_IS_VALID(shtbl_idx);
+ if (!be_valid) {
+ /* The element is avail, keep going */
+ be_avail = i;
+ continue;
+ }
+ /* There is a valid entry, compare it */
+ shtbl_key = shtbl_idx & ~TF_SHADOW_BE_VALID;
+ if (!tf_shadow_tbl_key_cmp(ctxt,
+ sparms->result,
+ shtbl_key,
+ len)) {
+ /*
+ * It matches, increment the ref count if the caller
+ * requested allocation and return the info
+ */
+ if (sparms->alloc)
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt + 1;
+
+ sparms->hit = 1;
+ sparms->search_status = HIT;
+ parms->hb_handle =
+ TF_SHADOW_HB_HANDLE_CREATE(hb_idx, i);
+ sparms->idx = TF_SHADOW_SHIDX_TO_IDX(ctxt, shtbl_key);
+ sparms->ref_cnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
+
+ return 0;
+ }
+ }
+
+ /* No hits, return avail entry if exists */
+ if (be_avail < TF_SHADOW_HB_NUM_ELEM) {
+ /*
+ * There is an available hash entry, so return MISS and the
+ * hash handle for the subsequent bind.
+ */
+ parms->hb_handle = TF_SHADOW_HB_HANDLE_CREATE(hb_idx, be_avail);
+ sparms->search_status = MISS;
+ sparms->hit = 0;
+ sparms->idx = 0;
+ } else {
+ /* No room for the entry in the hash table, must REJECT */
+ sparms->search_status = REJECT;
+ }
+
return 0;
}
int
-tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms __rte_unused)
+tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms)
{
+ uint16_t idx;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_tbl_set_parms *sparms;
+ struct tf_shadow_tbl_db *shadow_db;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "Null parms\n");
+ return -EINVAL;
+ }
+
+ sparms = parms->sparms;
+ if (!sparms->data || !sparms->data_sz_in_bytes) {
+ TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tbl_type_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ /* We aren't tracking this table, so return success */
+ TFP_DRV_LOG(DEBUG, "%s Unable to get tbl mgr context\n",
+ tf_tbl_type_2_str(sparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, sparms->idx);
+ if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tbl_type_2_str(sparms->type),
+ sparms->idx);
+ return -EINVAL;
+ }
+
+ /* Write the result table, the key/hash has been written already */
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /*
+ * If the handle is not valid, the bind was never called. We aren't
+ * tracking this entry.
+ */
+ if (!TF_SHADOW_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
+ return 0;
+
+ sr_entry->refcnt = 1;
+
return 0;
}
int
-tf_shadow_tbl_remove(struct tf_shadow_tbl_remove_parms *parms __rte_unused)
+tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms)
{
+ struct tf_shadow_tbl_db *shadow_db;
+ int i;
+
+ TF_CHECK_PARMS1(parms);
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ if (!shadow_db) {
+ TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tbl_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
return 0;
}
+
+/**
+ * Allocate the table resources for search and allocate
+ *
+ */
+int tf_shadow_tbl_create_db(struct tf_shadow_tbl_create_db_parms *parms)
+{
+ int rc;
+ int i;
+ uint16_t base;
+ struct tfp_calloc_parms cparms;
+ struct tf_shadow_tbl_db *shadow_db = NULL;
+
+ TF_CHECK_PARMS1(parms);
+
+ /* Build the shadow DB per the request */
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tbl_db);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+ shadow_db = (void *)cparms.mem_va;
+
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ /* If the element didn't request an allocation no need
+ * to create a pool nor verify if we got a reservation.
+ */
+ if (!parms->cfg->alloc_cnt[i] ||
+ !tf_shadow_tbl_is_searchable(i)) {
+ shadow_db->ctxt[i] = NULL;
+ continue;
+ }
+
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tbl_ctxt);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+
+ shadow_db->ctxt[i] = cparms.mem_va;
+ base = parms->cfg->base_addr[i];
+ rc = tf_shadow_tbl_ctxt_create(shadow_db->ctxt[i],
+ parms->cfg->alloc_cnt[i],
+ base);
+ if (rc)
+ goto error;
+ }
+
+ *parms->shadow_db = (void *)shadow_db;
+
+ TFP_DRV_LOG(INFO,
+ "TF SHADOW TABLE - initialized\n");
+
+ return 0;
+error:
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tbl_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
+ return -ENOMEM;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h b/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
index dfd336e..e73381f 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
@@ -8,8 +8,6 @@
#include "tf_core.h"
-struct tf;
-
/**
* The Shadow Table module provides shadow DB handling for table based
* TF types. A shadow DB provides the capability that allows for reuse
@@ -32,19 +30,22 @@ struct tf;
*/
struct tf_shadow_tbl_cfg_parms {
/**
- * TF Table type
+ * [in] The number of elements in the alloc_cnt and base_addr
+ * For now, it should always be equal to TF_TBL_TYPE_MAX
*/
- enum tf_tbl_type type;
+ int num_entries;
/**
- * Number of entries the Shadow DB needs to hold
+ * [in] Resource allocation count array
+ * This array content originates from the tf_session_resources
+ * that is passed in on session open
+ * Array size is TF_TBL_TYPE_MAX
*/
- int num_entries;
-
+ uint16_t *alloc_cnt;
/**
- * Element width for this table type
+ * [in] The base index for each table
*/
- int element_width;
+ uint16_t base_addr[TF_TBL_TYPE_MAX];
};
/**
@@ -52,17 +53,17 @@ struct tf_shadow_tbl_cfg_parms {
*/
struct tf_shadow_tbl_create_db_parms {
/**
- * [in] Configuration information for the shadow db
+ * [in] Receive or transmit direction
*/
- struct tf_shadow_tbl_cfg_parms *cfg;
+ enum tf_dir dir;
/**
- * [in] Number of elements in the parms structure
+ * [in] Configuration information for the shadow db
*/
- uint16_t num_elements;
+ struct tf_shadow_tbl_cfg_parms *cfg;
/**
* [out] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void **shadow_db;
};
/**
@@ -70,9 +71,9 @@ struct tf_shadow_tbl_create_db_parms {
*/
struct tf_shadow_tbl_free_db_parms {
/**
- * Shadow table DB handle
+ * [in] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
};
/**
@@ -82,79 +83,77 @@ struct tf_shadow_tbl_search_parms {
/**
* [in] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
/**
- * [in] Table type
+ * [inout] The search parms from tf core
*/
- enum tf_tbl_type type;
- /**
- * [in] Pointer to entry blob value in remap table to match
- */
- uint8_t *entry;
- /**
- * [in] Size of the entry blob passed in bytes
- */
- uint16_t entry_sz;
- /**
- * [out] Index of the found element returned if hit
- */
- uint16_t *index;
+ struct tf_tbl_alloc_search_parms *sparms;
/**
* [out] Reference count incremented if hit
*/
- uint16_t *ref_cnt;
+ uint32_t hb_handle;
};
/**
- * Shadow table insert parameters
+ * Shadow Table bind index parameters
*/
-struct tf_shadow_tbl_insert_parms {
+struct tf_shadow_tbl_bind_index_parms {
/**
- * [in] Shadow table DB handle
+ * [in] Shadow tcam DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
/**
- * [in] Tbl type
+ * [in] receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] TCAM table type
*/
enum tf_tbl_type type;
/**
- * [in] Pointer to entry blob value in remap table to match
+ * [in] index of the entry to program
*/
- uint8_t *entry;
+ uint16_t idx;
/**
- * [in] Size of the entry blob passed in bytes
+ * [in] struct containing key
*/
- uint16_t entry_sz;
+ uint8_t *data;
/**
- * [in] Entry to update
+ * [in] data size in bytes
*/
- uint16_t index;
+ uint16_t data_sz_in_bytes;
/**
- * [out] Reference count after insert
+ * [in] The hash bucket handled returned from the search
*/
- uint16_t *ref_cnt;
+ uint32_t hb_handle;
};
/**
- * Shadow table remove parameters
+ * Shadow table insert parameters
*/
-struct tf_shadow_tbl_remove_parms {
+struct tf_shadow_tbl_insert_parms {
/**
* [in] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
/**
- * [in] Tbl type
+ * [in] The insert parms from tf core
*/
- enum tf_tbl_type type;
+ struct tf_tbl_set_parms *sparms;
+};
+
+/**
+ * Shadow table remove parameters
+ */
+struct tf_shadow_tbl_remove_parms {
/**
- * [in] Entry to update
+ * [in] Shadow table DB handle
*/
- uint16_t index;
+ void *shadow_db;
/**
- * [out] Reference count after removal
+ * [in] The free parms from tf core
*/
- uint16_t *ref_cnt;
+ struct tf_tbl_free_parms *fparms;
};
/**
@@ -206,10 +205,27 @@ int tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms);
* Returns
* - (0) if successful, element was found.
* - (-EINVAL) on failure.
+ *
+ * If there is a miss, but there is room for insertion, the hb_handle returned
+ * is used for insertion during the bind index API
*/
int tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms);
/**
+ * Bind Shadow table db hash and result tables with result from search/alloc
+ *
+ * [in] parms
+ * Pointer to the search parameters
+ *
+ * Returns
+ * - (0) if successful
+ * - (-EINVAL) on failure.
+ *
+ * This is only called after a MISS in the search returns a hb_handle
+ */
+int tf_shadow_tbl_bind_index(struct tf_shadow_tbl_bind_index_parms *parms);
+
+/**
* Inserts an element into the Shadow table DB. Will fail if the
* elements ref_count is different from 0. Ref_count after insert will
* be incremented.
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index beaea03..a0130d6 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -373,6 +373,12 @@ tf_shadow_tcam_clear_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
case 3:
*bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
break;
+ default:
+ /*
+ * Since the BE_GET masks non-inclusive bits, this will not
+ * happen.
+ */
+ break;
}
}
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.c b/drivers/net/bnxt/tf_core/tf_tbl.c
index 9ebaa34..bec5210 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_tbl.c
@@ -13,6 +13,9 @@
#include "tf_util.h"
#include "tf_msg.h"
#include "tfp.h"
+#include "tf_shadow_tbl.h"
+#include "tf_session.h"
+#include "tf_device.h"
struct tf;
@@ -25,7 +28,7 @@ static void *tbl_db[TF_DIR_MAX];
/**
* Table Shadow DBs
*/
-/* static void *shadow_tbl_db[TF_DIR_MAX]; */
+static void *shadow_tbl_db[TF_DIR_MAX];
/**
* Init flag, set on bind and cleared on unbind
@@ -35,14 +38,19 @@ static uint8_t init;
/**
* Shadow init flag, set on bind and cleared on unbind
*/
-/* static uint8_t shadow_init; */
+static uint8_t shadow_init;
int
tf_tbl_bind(struct tf *tfp,
struct tf_tbl_cfg_parms *parms)
{
- int rc;
- int i;
+ int rc, d, i;
+ struct tf_rm_alloc_info info;
+ struct tf_rm_free_db_parms fparms;
+ struct tf_shadow_tbl_free_db_parms fshadow;
+ struct tf_rm_get_alloc_info_parms ainfo;
+ struct tf_shadow_tbl_cfg_parms shadow_cfg;
+ struct tf_shadow_tbl_create_db_parms shadow_cdb;
struct tf_rm_create_db_parms db_cfg = { 0 };
TF_CHECK_PARMS2(tfp, parms);
@@ -58,26 +66,86 @@ tf_tbl_bind(struct tf *tfp,
db_cfg.num_elements = parms->num_elements;
db_cfg.cfg = parms->cfg;
- for (i = 0; i < TF_DIR_MAX; i++) {
- db_cfg.dir = i;
- db_cfg.alloc_cnt = parms->resources->tbl_cnt[i].cnt;
- db_cfg.rm_db = &tbl_db[i];
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ db_cfg.dir = d;
+ db_cfg.alloc_cnt = parms->resources->tbl_cnt[d].cnt;
+ db_cfg.rm_db = &tbl_db[d];
rc = tf_rm_create_db(tfp, &db_cfg);
if (rc) {
TFP_DRV_LOG(ERR,
"%s: Table DB creation failed\n",
- tf_dir_2_str(i));
+ tf_dir_2_str(d));
return rc;
}
}
+ /* Initialize the Shadow Table. */
+ if (parms->shadow_copy) {
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ memset(&shadow_cfg, 0, sizeof(shadow_cfg));
+ memset(&shadow_cdb, 0, sizeof(shadow_cdb));
+ /* Get the base addresses of the tables */
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ memset(&info, 0, sizeof(info));
+
+ if (!parms->resources->tbl_cnt[d].cnt[i])
+ continue;
+ ainfo.rm_db = tbl_db[d];
+ ainfo.db_index = i;
+ ainfo.info = &info;
+ rc = tf_rm_get_info(&ainfo);
+ if (rc)
+ goto error;
+
+ shadow_cfg.base_addr[i] = info.entry.start;
+ }
+
+ /* Create the shadow db */
+ shadow_cfg.alloc_cnt =
+ parms->resources->tbl_cnt[d].cnt;
+ shadow_cfg.num_entries = parms->num_elements;
+
+ shadow_cdb.shadow_db = &shadow_tbl_db[d];
+ shadow_cdb.cfg = &shadow_cfg;
+ rc = tf_shadow_tbl_create_db(&shadow_cdb);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Shadow TBL DB creation failed "
+ "rc=%d\n", rc);
+ goto error;
+ }
+ }
+ shadow_init = 1;
+ }
+
init = 1;
TFP_DRV_LOG(INFO,
"Table Type - initialized\n");
return 0;
+error:
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ memset(&fparms, 0, sizeof(fparms));
+ fparms.dir = d;
+ fparms.rm_db = tbl_db[d];
+ /* Ignoring return here since we are in the error case */
+ (void)tf_rm_free_db(tfp, &fparms);
+
+ if (parms->shadow_copy) {
+ fshadow.shadow_db = shadow_tbl_db[d];
+ tf_shadow_tbl_free_db(&fshadow);
+ shadow_tbl_db[d] = NULL;
+ }
+
+ tbl_db[d] = NULL;
+ }
+
+ shadow_init = 0;
+ init = 0;
+
+ return rc;
}
int
@@ -86,6 +154,7 @@ tf_tbl_unbind(struct tf *tfp)
int rc;
int i;
struct tf_rm_free_db_parms fparms = { 0 };
+ struct tf_shadow_tbl_free_db_parms fshadow;
TF_CHECK_PARMS1(tfp);
@@ -104,9 +173,17 @@ tf_tbl_unbind(struct tf *tfp)
return rc;
tbl_db[i] = NULL;
+
+ if (shadow_init) {
+ memset(&fshadow, 0, sizeof(fshadow));
+ fshadow.shadow_db = shadow_tbl_db[i];
+ tf_shadow_tbl_free_db(&fshadow);
+ shadow_tbl_db[i] = NULL;
+ }
}
init = 0;
+ shadow_init = 0;
return 0;
}
@@ -153,6 +230,7 @@ tf_tbl_free(struct tf *tfp __rte_unused,
int rc;
struct tf_rm_is_allocated_parms aparms = { 0 };
struct tf_rm_free_parms fparms = { 0 };
+ struct tf_shadow_tbl_remove_parms shparms;
int allocated = 0;
TF_CHECK_PARMS2(tfp, parms);
@@ -182,6 +260,36 @@ tf_tbl_free(struct tf *tfp __rte_unused,
return -EINVAL;
}
+ /*
+ * The Shadow mgmt, if enabled, determines if the entry needs
+ * to be deleted.
+ */
+ if (shadow_init) {
+ memset(&shparms, 0, sizeof(shparms));
+ shparms.shadow_db = shadow_tbl_db[parms->dir];
+ shparms.fparms = parms;
+ rc = tf_shadow_tbl_remove(&shparms);
+ if (rc) {
+ /*
+ * Should not get here, log it and let the entry be
+ * deleted.
+ */
+ TFP_DRV_LOG(ERR, "%s: Shadow free fail, "
+ "type:%d index:%d deleting the entry.\n",
+ tf_dir_2_str(parms->dir),
+ parms->type,
+ parms->idx);
+ } else {
+ /*
+ * If the entry still has references, just return the
+ * ref count to the caller. No need to remove entry
+ * from rm.
+ */
+ if (parms->ref_cnt >= 1)
+ return rc;
+ }
+ }
+
/* Free requested element */
fparms.rm_db = tbl_db[parms->dir];
fparms.db_index = parms->type;
@@ -200,10 +308,124 @@ tf_tbl_free(struct tf *tfp __rte_unused,
}
int
-tf_tbl_alloc_search(struct tf *tfp __rte_unused,
- struct tf_tbl_alloc_search_parms *parms __rte_unused)
+tf_tbl_alloc_search(struct tf *tfp,
+ struct tf_tbl_alloc_search_parms *parms)
{
- return 0;
+ int rc, frc;
+ uint32_t idx;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_tbl_alloc_parms aparms;
+ struct tf_shadow_tbl_search_parms sparms;
+ struct tf_shadow_tbl_bind_index_parms bparms;
+ struct tf_tbl_free_parms fparms;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ if (!shadow_init || !shadow_tbl_db[parms->dir]) {
+ TFP_DRV_LOG(ERR, "%s: Shadow TBL not initialized.\n",
+ tf_dir_2_str(parms->dir));
+ return -EINVAL;
+ }
+
+ memset(&sparms, 0, sizeof(sparms));
+ sparms.sparms = parms;
+ sparms.shadow_db = shadow_tbl_db[parms->dir];
+ rc = tf_shadow_tbl_search(&sparms);
+ if (rc)
+ return rc;
+
+ /*
+ * The app didn't request us to alloc the entry, so return now.
+ * The hit should have been updated in the original search parm.
+ */
+ if (!parms->alloc || parms->search_status != MISS)
+ return rc;
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Allocate the index */
+ if (dev->ops->tf_dev_alloc_tbl == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return -EOPNOTSUPP;
+ }
+
+ memset(&aparms, 0, sizeof(aparms));
+ aparms.dir = parms->dir;
+ aparms.type = parms->type;
+ aparms.tbl_scope_id = parms->tbl_scope_id;
+ aparms.idx = &idx;
+ rc = dev->ops->tf_dev_alloc_tbl(tfp, &aparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Table allocation failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Bind the allocated index to the data */
+ memset(&bparms, 0, sizeof(bparms));
+ bparms.shadow_db = shadow_tbl_db[parms->dir];
+ bparms.dir = parms->dir;
+ bparms.type = parms->type;
+ bparms.idx = idx;
+ bparms.data = parms->result;
+ bparms.data_sz_in_bytes = parms->result_sz_in_bytes;
+ bparms.hb_handle = sparms.hb_handle;
+ rc = tf_shadow_tbl_bind_index(&bparms);
+ if (rc) {
+ /* Error binding entry, need to free the allocated idx */
+ if (dev->ops->tf_dev_free_tbl == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ memset(&fparms, 0, sizeof(fparms));
+ fparms.dir = parms->dir;
+ fparms.type = parms->type;
+ fparms.idx = idx;
+ frc = dev->ops->tf_dev_free_tbl(tfp, &fparms);
+ if (frc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed free index allocated during "
+ "search. rc=%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-frc));
+ /* return the original failure. */
+ return rc;
+ }
+ }
+
+ parms->idx = idx;
+
+ return rc;
}
int
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.h b/drivers/net/bnxt/tf_core/tf_tbl.h
index f20e8d7..930fcc3 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl.h
+++ b/drivers/net/bnxt/tf_core/tf_tbl.h
@@ -144,29 +144,31 @@ struct tf_tbl_alloc_search_parms {
*/
uint32_t tbl_scope_id;
/**
- * [in] Enable search for matching entry. If the table type is
- * internal the shadow copy will be searched before
- * alloc. Session must be configured with shadow copy enabled.
- */
- uint8_t search_enable;
- /**
- * [in] Result data to search for (if search_enable)
+ * [in] Result data to search for
*/
uint8_t *result;
/**
- * [in] Result data size in bytes (if search_enable)
+ * [in] Result data size in bytes
*/
uint16_t result_sz_in_bytes;
/**
+ * [in] Whether or not to allocate on MISS, 1 is allocate.
+ */
+ uint8_t alloc;
+ /**
* [out] If search_enable, set if matching entry found
*/
uint8_t hit;
/**
- * [out] Current ref count after allocation (if search_enable)
+ * [out] The status of the search (REJECT, MISS, HIT)
+ */
+ enum tf_search_status search_status;
+ /**
+ * [out] Current ref count after allocation
*/
uint16_t ref_cnt;
/**
- * [out] Idx of allocated entry or found entry (if search_enable)
+ * [out] Idx of allocated entry or found entry
*/
uint32_t idx;
};
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.h b/drivers/net/bnxt/tf_core/tf_tcam.h
index 4722ce0..ffa0a94 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_tcam.h
@@ -134,7 +134,7 @@ struct tf_tcam_alloc_search_parms {
/**
* [out] Search result status (hit, miss, reject)
*/
- enum tf_tcam_search_status search_status;
+ enum tf_search_status search_status;
/**
* [out] Current refcnt after allocation
*/
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 13/20] net/bnxt: ulp mapper changes to use tbl search
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (11 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 12/20] net/bnxt: added shadow table capability with search Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 14/20] net/bnxt: fix port default rule create and destroy Somnath Kotur
` (7 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
modified ulp mappper to use the new tf_search_tbl_entry API.
When search before allocation is requested, mapper calls
tc_search_tbl_entry with the alloc flag.
- On HIT, the result and table index is returned.
- On MISS, the table index is returned but the result is
created and the table entry is set.
- On REJECT, the flow request is rejected.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 75 ++++++++++++++++++++++++------------
1 file changed, 51 insertions(+), 24 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index a071c07..4dee659 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -1764,9 +1764,10 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct ulp_blob data;
uint64_t idx = 0;
uint16_t tmplen;
- uint32_t i, num_flds;
+ uint32_t i, num_flds, index, hit;
int32_t rc = 0, trc = 0;
struct tf_alloc_tbl_entry_parms aparms = { 0 };
+ struct tf_search_tbl_entry_parms srchparms = { 0 };
struct tf_set_tbl_entry_parms sparms = { 0 };
struct tf_free_tbl_entry_parms free_parms = { 0 };
uint32_t tbl_scope_id;
@@ -1868,33 +1869,59 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
return 0; /* success */
}
+ index = 0;
+ hit = 0;
/* Perform the tf table allocation by filling the alloc params */
- aparms.dir = tbl->direction;
- aparms.type = tbl->resource_type;
- aparms.search_enable = tbl->srch_b4_alloc;
- aparms.result = ulp_blob_data_get(&data, &tmplen);
- aparms.result_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
- aparms.tbl_scope_id = tbl_scope_id;
-
- /* All failures after the alloc succeeds require a free */
- rc = tf_alloc_tbl_entry(tfp, &aparms);
- if (rc) {
- BNXT_TF_DBG(ERR, "Alloc table[%d][%s] failed rc=%d\n",
- aparms.type,
- (aparms.dir == TF_DIR_RX) ? "RX" : "TX",
- rc);
- return rc;
- }
+ if (tbl->srch_b4_alloc) {
+ memset(&srchparms, 0, sizeof(srchparms));
+ srchparms.dir = tbl->direction;
+ srchparms.type = tbl->resource_type;
+ srchparms.alloc = 1;
+ srchparms.result = ulp_blob_data_get(&data, &tmplen);
+ srchparms.result_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
+ srchparms.tbl_scope_id = tbl_scope_id;
+ rc = tf_search_tbl_entry(tfp, &srchparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Alloc table[%s][%s] failed rc=%d\n",
+ tf_tbl_type_2_str(tbl->resource_type),
+ tf_dir_2_str(tbl->direction), rc);
+ return rc;
+ }
+ if (srchparms.search_status == REJECT) {
+ BNXT_TF_DBG(ERR, "Alloc table[%s][%s] rejected.\n",
+ tf_tbl_type_2_str(tbl->resource_type),
+ tf_dir_2_str(tbl->direction));
+ return -ENOMEM;
+ }
+ index = srchparms.idx;
+ hit = srchparms.hit;
+ } else {
+ aparms.dir = tbl->direction;
+ aparms.type = tbl->resource_type;
+ aparms.search_enable = tbl->srch_b4_alloc;
+ aparms.result = ulp_blob_data_get(&data, &tmplen);
+ aparms.result_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
+ aparms.tbl_scope_id = tbl_scope_id;
+ /* All failures after the alloc succeeds require a free */
+ rc = tf_alloc_tbl_entry(tfp, &aparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Alloc table[%s][%s] failed rc=%d\n",
+ tf_tbl_type_2_str(tbl->resource_type),
+ tf_dir_2_str(tbl->direction), rc);
+ return rc;
+ }
+ index = aparms.idx;
+ }
/*
* calculate the idx for the result record, for external EM the offset
* needs to be shifted accordingly. If external non-inline table types
* are used then need to revisit this logic.
*/
- if (aparms.type == TF_TBL_TYPE_EXT)
- idx = TF_ACT_REC_OFFSET_2_PTR(aparms.idx);
+ if (tbl->resource_type == TF_TBL_TYPE_EXT)
+ idx = TF_ACT_REC_OFFSET_2_PTR(index);
else
- idx = aparms.idx;
+ idx = index;
/* Always storing values in Regfile in BE */
idx = tfp_cpu_to_be_64(idx);
@@ -1908,12 +1935,12 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
/* Perform the tf table set by filling the set params */
- if (!tbl->srch_b4_alloc || !aparms.hit) {
+ if (!tbl->srch_b4_alloc || !hit) {
sparms.dir = tbl->direction;
sparms.type = tbl->resource_type;
sparms.data = ulp_blob_data_get(&data, &tmplen);
sparms.data_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
- sparms.idx = aparms.idx;
+ sparms.idx = index;
sparms.tbl_scope_id = tbl_scope_id;
rc = tf_set_tbl_entry(tfp, &sparms);
@@ -1933,7 +1960,7 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
fid_parms.resource_func = tbl->resource_func;
fid_parms.resource_type = tbl->resource_type;
fid_parms.resource_sub_type = tbl->resource_sub_type;
- fid_parms.resource_hndl = aparms.idx;
+ fid_parms.resource_hndl = index;
fid_parms.critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO;
rc = ulp_flow_db_resource_add(parms->ulp_ctx,
@@ -1960,7 +1987,7 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
*/
free_parms.dir = tbl->direction;
free_parms.type = tbl->resource_type;
- free_parms.idx = aparms.idx;
+ free_parms.idx = index;
free_parms.tbl_scope_id = tbl_scope_id;
trc = tf_free_tbl_entry(tfp, &free_parms);
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 14/20] net/bnxt: fix port default rule create and destroy
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (12 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 13/20] net/bnxt: ulp mapper changes to use tbl search Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 15/20] net/bnxt: delete VF FW rules when a representor is created Somnath Kotur
` (6 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Currently, the flow_ids of port_to_app/app_to_port & tx_cfa_action
for the first port are getting over-written by the second port because
these fields are stored in the ulp context which is common across the
ports.
This patch fixes the problem by having per port structure to store these
fields.
Fixes: 9f702636d7ba ("net/bnxt: add port default rules for ingress and egress")
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/bnxt.h | 5 +-
drivers/net/bnxt/bnxt_ethdev.c | 81 ++--------------------
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 6 +-
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 12 +++-
drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 14 +++-
drivers/net/bnxt/tf_ulp/ulp_def_rules.c | 116 ++++++++++++++++++++++++++++++++
drivers/net/bnxt/tf_ulp/ulp_flow_db.c | 2 +-
drivers/net/bnxt/tf_ulp/ulp_flow_db.h | 2 +-
9 files changed, 151 insertions(+), 88 deletions(-)
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 50f93ff..41e7ae5 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -784,6 +784,7 @@ struct bnxt {
struct bnxt_flow_stat_info *flow_stat;
uint8_t flow_xstat;
uint16_t max_num_kflows;
+ uint16_t tx_cfa_action;
};
#define BNXT_FC_TIMER 1 /* Timer freq in Sec Flow Counters */
@@ -797,7 +798,7 @@ struct bnxt_vf_representor {
uint16_t fw_fid;
uint16_t dflt_vnic_id;
uint16_t svif;
- uint32_t vfr_tx_cfa_action;
+ uint16_t vfr_tx_cfa_action;
uint16_t rx_cfa_code;
uint32_t rep2vf_flow_id;
uint32_t vf2rep_flow_id;
@@ -872,6 +873,8 @@ extern int bnxt_logtype_driver;
extern const struct rte_flow_ops bnxt_ulp_rte_flow_ops;
int32_t bnxt_ulp_init(struct bnxt *bp);
void bnxt_ulp_deinit(struct bnxt *bp);
+int32_t bnxt_ulp_create_df_rules(struct bnxt *bp);
+void bnxt_ulp_destroy_df_rules(struct bnxt *bp, bool global);
uint16_t bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type);
uint16_t bnxt_get_svif(uint16_t port_id, bool func_svif,
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 0829493..dfc4b41 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1168,73 +1168,6 @@ static int bnxt_handle_if_change_status(struct bnxt *bp)
return rc;
}
-static int32_t
-bnxt_create_port_app_df_rule(struct bnxt *bp, uint8_t flow_type,
- uint32_t *flow_id)
-{
- uint16_t port_id = bp->eth_dev->data->port_id;
- struct ulp_tlv_param param_list[] = {
- {
- .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
- .length = 2,
- .value = {(port_id >> 8) & 0xff, port_id & 0xff}
- },
- {
- .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
- .length = 0,
- .value = {0}
- }
- };
-
- return ulp_default_flow_create(bp->eth_dev, param_list, flow_type,
- flow_id);
-}
-
-static int32_t
-bnxt_create_df_rules(struct bnxt *bp)
-{
- struct bnxt_ulp_data *cfg_data;
- int rc;
-
- cfg_data = bp->ulp_ctx->cfg_data;
- rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_PORT_TO_VS,
- &cfg_data->port_to_app_flow_id);
- if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to create port to app default rule\n");
- return rc;
- }
-
- BNXT_TF_DBG(DEBUG, "***** created port to app default rule ******\n");
- rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_VS_TO_PORT,
- &cfg_data->app_to_port_flow_id);
- if (!rc) {
- rc = ulp_default_flow_db_cfa_action_get(bp->ulp_ctx,
- cfg_data->app_to_port_flow_id,
- &cfg_data->tx_cfa_action);
- if (rc)
- goto err;
-
- BNXT_TF_DBG(DEBUG,
- "***** created app to port default rule *****\n");
- return 0;
- }
-
-err:
- BNXT_TF_DBG(DEBUG, "Failed to create app to port default rule\n");
- return rc;
-}
-
-static void
-bnxt_destroy_df_rules(struct bnxt *bp)
-{
- struct bnxt_ulp_data *cfg_data;
-
- cfg_data = bp->ulp_ctx->cfg_data;
- ulp_default_flow_destroy(bp->eth_dev, cfg_data->port_to_app_flow_id);
- ulp_default_flow_destroy(bp->eth_dev, cfg_data->app_to_port_flow_id);
-}
-
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
@@ -1296,8 +1229,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
bnxt_schedule_fw_health_check(bp);
pthread_mutex_unlock(&bp->def_cp_lock);
- if (BNXT_TRUFLOW_EN(bp))
- bnxt_ulp_init(bp);
+ bnxt_ulp_init(bp);
return 0;
@@ -1358,6 +1290,9 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
+ bnxt_ulp_destroy_df_rules(bp, false);
+ bnxt_ulp_deinit(bp);
+
bnxt_cancel_fw_health_check(bp);
bnxt_dev_set_link_down_op(eth_dev);
@@ -1403,11 +1338,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
bnxt_cancel_fc_thread(bp);
- if (BNXT_TRUFLOW_EN(bp)) {
- bnxt_destroy_df_rules(bp);
- bnxt_ulp_deinit(bp);
- }
-
if (eth_dev->data->dev_started)
bnxt_dev_stop_op(eth_dev);
@@ -1656,8 +1586,7 @@ static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
if (rc != 0)
vnic->flags = old_flags;
- if (BNXT_TRUFLOW_EN(bp))
- bnxt_create_df_rules(bp);
+ bnxt_ulp_create_df_rules(bp);
return rc;
}
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index a1ab3f3..83a9853 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -29,7 +29,6 @@ struct bnxt_tx_queue {
struct bnxt *bp;
int index;
int tx_wake_thresh;
- uint32_t tx_cfa_action;
uint32_t vfr_tx_cfa_action;
struct bnxt_tx_ring_info *tx_ring;
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index c741c71..1113aca 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -133,8 +133,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST |
PKT_TX_QINQ_PKT) ||
(BNXT_TRUFLOW_EN(txq->bp) &&
- (txq->bp->ulp_ctx->cfg_data->tx_cfa_action ||
- txq->vfr_tx_cfa_action)))
+ (txq->bp->tx_cfa_action || txq->vfr_tx_cfa_action)))
long_bd = true;
nr_bds = long_bd + tx_pkt->nb_segs;
@@ -192,8 +191,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
if (txq->vfr_tx_cfa_action)
cfa_action = txq->vfr_tx_cfa_action;
else
- cfa_action =
- txq->bp->ulp_ctx->cfg_data->tx_cfa_action;
+ cfa_action = txq->bp->tx_cfa_action;
}
/* HW can accelerate only outer vlan in QinQ mode */
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 7c65a4b..2febd58 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -9,9 +9,9 @@
#include <rte_flow_driver.h>
#include <rte_tailq.h>
+#include "bnxt.h"
#include "bnxt_ulp.h"
#include "bnxt_tf_common.h"
-#include "bnxt.h"
#include "tf_core.h"
#include "tf_ext_flow_handle.h"
@@ -381,6 +381,7 @@ ulp_ctx_init(struct bnxt *bp,
(void)ulp_ctx_deinit(bp, session);
return rc;
}
+
bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, session->g_tfp);
return rc;
}
@@ -654,6 +655,9 @@ bnxt_ulp_init(struct bnxt *bp)
bool init;
int rc;
+ if (!BNXT_TRUFLOW_EN(bp))
+ return 0;
+
if (bp->ulp_ctx) {
BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
return -EINVAL;
@@ -822,6 +826,9 @@ bnxt_ulp_deinit(struct bnxt *bp)
struct rte_pci_device *pci_dev;
struct rte_pci_addr *pci_addr;
+ if (!BNXT_TRUFLOW_EN(bp))
+ return;
+
/* Get the session first */
pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
pci_addr = &pci_dev->addr;
@@ -833,6 +840,9 @@ bnxt_ulp_deinit(struct bnxt *bp)
if (!session)
return;
+ /* clean up default flows */
+ bnxt_ulp_destroy_df_rules(bp, true);
+
/* clean up regular flows */
ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE);
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index 7c95ead..d532452 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -22,6 +22,12 @@
#define BNXT_ULP_VF_REP_ENABLED 0x1
#define ULP_VF_REP_IS_ENABLED(flag) ((flag) & BNXT_ULP_VF_REP_ENABLED)
+struct bnxt_ulp_df_rule_info {
+ uint32_t port_to_app_flow_id;
+ uint32_t app_to_port_flow_id;
+ uint8_t valid;
+};
+
struct bnxt_ulp_data {
uint32_t tbl_scope_id;
struct bnxt_ulp_mark_tbl *mark_tbl;
@@ -32,9 +38,7 @@ struct bnxt_ulp_data {
struct bnxt_ulp_port_db *port_db;
struct bnxt_ulp_fc_info *fc_info;
uint32_t ulp_flags;
- uint32_t port_to_app_flow_id;
- uint32_t app_to_port_flow_id;
- uint32_t tx_cfa_action;
+ struct bnxt_ulp_df_rule_info df_rule_info[RTE_MAX_ETHPORTS];
};
struct bnxt_ulp_context {
@@ -175,4 +179,8 @@ int32_t
bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
uint32_t *flags);
+int32_t
+bnxt_ulp_get_df_rule_info(uint8_t port_id, struct bnxt_ulp_context *ulp_ctx,
+ struct bnxt_ulp_df_rule_info *info);
+
#endif /* _BNXT_ULP_H_ */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
index ddc6da8..9fb1a02 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
@@ -392,3 +392,119 @@ ulp_default_flow_destroy(struct rte_eth_dev *eth_dev, uint32_t flow_id)
return rc;
}
+
+void
+bnxt_ulp_destroy_df_rules(struct bnxt *bp, bool global)
+{
+ struct bnxt_ulp_df_rule_info *info;
+ uint8_t port_id;
+
+ if (!BNXT_TRUFLOW_EN(bp) ||
+ BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
+ return;
+
+ if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
+ return;
+
+ /* Delete default rules per port */
+ if (!global) {
+ port_id = bp->eth_dev->data->port_id;
+ info = &bp->ulp_ctx->cfg_data->df_rule_info[port_id];
+ if (!info->valid)
+ return;
+
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->port_to_app_flow_id);
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->app_to_port_flow_id);
+ info->valid = false;
+ return;
+ }
+
+ /* Delete default rules for all ports */
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ info = &bp->ulp_ctx->cfg_data->df_rule_info[port_id];
+ if (!info->valid)
+ continue;
+
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->port_to_app_flow_id);
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->app_to_port_flow_id);
+ info->valid = false;
+ }
+}
+
+static int32_t
+bnxt_create_port_app_df_rule(struct bnxt *bp, uint8_t flow_type,
+ uint32_t *flow_id)
+{
+ uint16_t port_id = bp->eth_dev->data->port_id;
+ struct ulp_tlv_param param_list[] = {
+ {
+ .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
+ .length = 2,
+ .value = {(port_id >> 8) & 0xff, port_id & 0xff}
+ },
+ {
+ .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
+ .length = 0,
+ .value = {0}
+ }
+ };
+
+ return ulp_default_flow_create(bp->eth_dev, param_list, flow_type,
+ flow_id);
+}
+
+int32_t
+bnxt_ulp_create_df_rules(struct bnxt *bp)
+{
+ struct bnxt_ulp_df_rule_info *info;
+ uint8_t port_id;
+ int rc;
+
+ if (!BNXT_TRUFLOW_EN(bp) ||
+ BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
+ return 0;
+
+ port_id = bp->eth_dev->data->port_id;
+ info = &bp->ulp_ctx->cfg_data->df_rule_info[port_id];
+ BNXT_TF_DBG(INFO, "*** creating port to app default rule ***\n");
+ rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_PORT_TO_VS,
+ &info->port_to_app_flow_id);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to create port to app default rule\n");
+ return rc;
+ }
+ BNXT_TF_DBG(INFO, "*** created port to app default rule ***\n");
+
+ bp->tx_cfa_action = 0;
+ BNXT_TF_DBG(INFO, "*** creating app to port default rule ***\n");
+ rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_VS_TO_PORT,
+ &info->app_to_port_flow_id);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to create app to port default rule\n");
+ goto port_to_app_free;
+ }
+
+ rc = ulp_default_flow_db_cfa_action_get(bp->ulp_ctx,
+ info->app_to_port_flow_id,
+ &bp->tx_cfa_action);
+ if (rc)
+ goto app_to_port_free;
+
+ info->valid = true;
+ BNXT_TF_DBG(INFO, "*** created app to port default rule ***\n");
+ return 0;
+
+app_to_port_free:
+ ulp_default_flow_destroy(bp->eth_dev, info->app_to_port_flow_id);
+port_to_app_free:
+ ulp_default_flow_destroy(bp->eth_dev, info->port_to_app_flow_id);
+ info->valid = false;
+
+ return rc;
+}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
index a3cfe54..7144517 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
@@ -962,7 +962,7 @@ ulp_flow_db_resource_hndl_get(struct bnxt_ulp_context *ulp_ctx,
int32_t
ulp_default_flow_db_cfa_action_get(struct bnxt_ulp_context *ulp_ctx,
uint32_t flow_id,
- uint32_t *cfa_action)
+ uint16_t *cfa_action)
{
uint8_t sub_type = BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_VFR_CFA_ACTION;
uint64_t hndl;
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
index 1fc0601..117e250 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
@@ -234,7 +234,7 @@ ulp_flow_db_validate_flow_func(struct bnxt_ulp_context *ulp_ctx,
int32_t
ulp_default_flow_db_cfa_action_get(struct bnxt_ulp_context *ulp_ctx,
uint32_t flow_id,
- uint32_t *cfa_action);
+ uint16_t *cfa_action);
#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
/*
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 15/20] net/bnxt: delete VF FW rules when a representor is created
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (13 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 14/20] net/bnxt: fix port default rule create and destroy Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 16/20] net/bnxt: shadow tcam and tbl reference count modification Somnath Kotur
` (5 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Truflow stack adds VFR to VF and VF to VFR conduits when VF
representor is created. However, in the ingress direction the
VF's fw rules conflict with Truflow rules, resulting in not hitting
the Truflow VFR rules. To fix this, fw is going to remove it’s
VF rules when vf representor is created in Truflow mode and will
restore the removed rules when vf representor is destroyed.
This patch invokes the vf representor alloc and free hwrm commands
as part of which fw will do the above mentioned actions.
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Shahaji Bhosle <shahaji.bhosle@broadcom.com>
---
drivers/net/bnxt/bnxt_hwrm.c | 49 ++++++++++++
drivers/net/bnxt/bnxt_hwrm.h | 2 +
drivers/net/bnxt/bnxt_reps.c | 19 ++++-
drivers/net/bnxt/hsi_struct_def_dpdk.h | 138 +++++++++++++++++++++++++++++++++
4 files changed, 205 insertions(+), 3 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 7ea13a8..f5f0dfe 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -5486,6 +5486,55 @@ int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
return 0;
}
+int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx)
+{
+ struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_vfr_alloc_input req = {0};
+ int rc;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_VFR_ALLOC, BNXT_USE_CHIMP_MB);
+ req.vf_id = rte_cpu_to_le_16(vf_idx);
+ snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
+ bp->eth_dev->data->name, vf_idx);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "VFR %d allocated\n", vf_idx);
+ return rc;
+}
+
+int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx)
+{
+ struct hwrm_cfa_vfr_free_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_vfr_free_input req = {0};
+ int rc;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_VFR_FREE, BNXT_USE_CHIMP_MB);
+ req.vf_id = rte_cpu_to_le_16(vf_idx);
+ snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
+ bp->eth_dev->data->name, vf_idx);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "VFR %d freed\n", vf_idx);
+ return rc;
+}
+
#ifdef RTE_LIBRTE_BNXT_PMD_SYSTEM
int
bnxt_hwrm_oem_cmd(struct bnxt *bp, uint32_t entry_num)
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 01201a7..4a2af13 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -278,4 +278,6 @@ int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp);
int bnxt_hwrm_oem_cmd(struct bnxt *bp, uint32_t entry_num);
int bnxt_clear_one_vnic_filter(struct bnxt *bp,
struct bnxt_filter_info *filter);
+int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx);
+int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx);
#endif
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index c425e69..2f775e0 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -272,7 +272,7 @@ static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
if (rc) {
BNXT_TF_DBG(DEBUG,
"Default flow rule creation for VFR->VF failed!\n");
- return -EIO;
+ goto err;
}
BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VFR->VF! ***\n");
@@ -283,7 +283,7 @@ static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
if (rc) {
BNXT_TF_DBG(DEBUG,
"Failed to get action_ptr for VFR->VF dflt rule\n");
- return -EIO;
+ goto rep2vf_free;
}
BNXT_TF_DBG(DEBUG, "tx_cfa_action = %d\n", vfr->vfr_tx_cfa_action);
rc = ulp_default_flow_create(parent_dev, param_list,
@@ -292,13 +292,24 @@ static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
if (rc) {
BNXT_TF_DBG(DEBUG,
"Default flow rule creation for VF->VFR failed!\n");
- return -EIO;
+ goto rep2vf_free;
}
BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VF->VFR! ***\n");
BNXT_TF_DBG(DEBUG, "vfr2rep_flow_id = %d\n", vfr->vf2rep_flow_id);
+ rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp, vfr->vf_id);
+ if (rc)
+ goto vf2rep_free;
+
return 0;
+
+vf2rep_free:
+ ulp_default_flow_destroy(vfr->parent_dev, vfr->vf2rep_flow_id);
+rep2vf_free:
+ ulp_default_flow_destroy(vfr->parent_dev, vfr->rep2vf_flow_id);
+err:
+ return -EIO;
}
static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
@@ -414,6 +425,8 @@ static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
vfr->vfr_tx_cfa_action = 0;
vfr->rx_cfa_code = 0;
+ rc = bnxt_hwrm_cfa_vfr_free(parent_bp, vfr->vf_id);
+
return rc;
}
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index 598da71..3553935 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -35127,6 +35127,144 @@ struct hwrm_cfa_pair_info_output {
uint8_t valid;
} __rte_packed;
+/**********************
+ * hwrm_cfa_vfr_alloc *
+ **********************/
+
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vfr_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint16_t reserved;
+ uint8_t unused_0[4];
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vfr_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_cfa_vfr_free *
+ *********************/
+
+
+/* hwrm_cfa_vfr_free_input (size:448b/56B) */
+struct hwrm_cfa_vfr_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ uint16_t reserved;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+struct hwrm_cfa_vfr_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+
+
/***************************************
* hwrm_cfa_redirect_query_tunnel_type *
***************************************/
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 16/20] net/bnxt: shadow tcam and tbl reference count modification
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (14 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 15/20] net/bnxt: delete VF FW rules when a representor is created Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 17/20] net/bnxt: tcam table processing support for search and alloc Somnath Kotur
` (4 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
Moved setting the refcnt for shadow tcam and table entries to the
allocation path only. The insert can be called multiple times for
updates and was resetting the refcnt to 1 each time. Now multiple
insertion/modifications will not change the reference count.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_shadow_tbl.c | 2 --
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 2 +-
2 files changed, 1 insertion(+), 3 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
index 019a26e..a4207eb 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
@@ -687,8 +687,6 @@ tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms)
if (!TF_SHADOW_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
return 0;
- sr_entry->refcnt = 1;
-
return 0;
}
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index a0130d6..e2c347a 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -472,6 +472,7 @@ tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms)
/* Write the result table */
sr_entry->key_size = parms->key_size;
sr_entry->hb_handle = parms->hb_handle;
+ sr_entry->refcnt = 1;
return 0;
}
@@ -738,7 +739,6 @@ tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms)
memcpy(sr_entry->result, sparms->result, sparms->result_size);
sr_entry->result_size = sparms->result_size;
- sr_entry->refcnt = 1;
return 0;
}
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 17/20] net/bnxt: tcam table processing support for search and alloc
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (15 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 16/20] net/bnxt: shadow tcam and tbl reference count modification Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 18/20] net/bnxt: added templates for search before alloc Somnath Kotur
` (3 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Added support for tcam table processing to enable the search
and allocate support. This also includes the tcam entry update
support.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 317 ++++++++++++++++---------
drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h | 5 +-
drivers/net/bnxt/tf_ulp/ulp_template_struct.h | 8 +-
3 files changed, 213 insertions(+), 117 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 4dee659..6ac4b0f 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -1317,20 +1317,177 @@ ulp_mapper_mark_vfr_idx_process(struct bnxt_ulp_mapper_parms *parms,
return rc;
}
+/*
+ * Tcam table - create the result blob.
+ * data [out] - the result blob data
+ */
+static int32_t
+ulp_mapper_tcam_tbl_result_create(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl,
+ struct ulp_blob *data)
+{
+ struct bnxt_ulp_mapper_result_field_info *dflds;
+ uint32_t num_dflds;
+ uint32_t encap_flds = 0;
+ uint32_t i;
+ int32_t rc = 0;
+
+ /* Create the result data blob */
+ dflds = ulp_mapper_result_fields_get(tbl, &num_dflds,
+ &encap_flds);
+ if (!dflds || !num_dflds || encap_flds) {
+ BNXT_TF_DBG(ERR, "Failed to get data fields.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_dflds; i++) {
+ rc = ulp_mapper_result_field_process(parms,
+ tbl->direction,
+ &dflds[i],
+ data,
+ "TCAM Result");
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set data fields\n");
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+/* Tcam table scan the identifier list and allocate each identifier */
+static int32_t
+ulp_mapper_tcam_tbl_scan_ident_alloc(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl)
+{
+ struct bnxt_ulp_mapper_ident_info *idents;
+ uint32_t num_idents;
+ uint32_t i;
+
+ /*
+ * Since the cache entry is responsible for allocating
+ * identifiers when in use, allocate the identifiers only
+ * during normal processing.
+ */
+ if (parms->tcam_tbl_opc ==
+ BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL) {
+ idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
+
+ for (i = 0; i < num_idents; i++) {
+ if (ulp_mapper_ident_process(parms, tbl,
+ &idents[i], NULL))
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Tcam table scan the identifier list and extract the identifier from
+ * the result blob.
+ */
+static int32_t
+ulp_mapper_tcam_tbl_scan_ident_extract(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl,
+ struct ulp_blob *data)
+{
+ struct bnxt_ulp_mapper_ident_info *idents;
+ uint32_t num_idents = 0, i;
+ int32_t rc = 0;
+
+ /*
+ * Extract the listed identifiers from the result field,
+ * no need to allocate them.
+ */
+ idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
+ for (i = 0; i < num_idents; i++) {
+ rc = ulp_mapper_ident_extract(parms, tbl, &idents[i], data);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Error in identifier extraction\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
+/* Internal function to write the tcam entry */
+static int32_t
+ulp_mapper_tcam_tbl_entry_write(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl,
+ struct ulp_blob *key,
+ struct ulp_blob *mask,
+ struct ulp_blob *data,
+ uint16_t idx)
+{
+ struct tf_set_tcam_entry_parms sparms = { 0 };
+ struct tf *tfp;
+ uint16_t tmplen;
+ int32_t rc;
+
+ tfp = bnxt_ulp_cntxt_tfp_get(parms->ulp_ctx);
+ if (!tfp) {
+ BNXT_TF_DBG(ERR, "Failed to get truflow pointer\n");
+ return -EINVAL;
+ }
+
+ sparms.dir = tbl->direction;
+ sparms.tcam_tbl_type = tbl->resource_type;
+ sparms.idx = idx;
+ /* Already verified the key/mask lengths */
+ sparms.key = ulp_blob_data_get(key, &tmplen);
+ sparms.mask = ulp_blob_data_get(mask, &tmplen);
+ sparms.key_sz_in_bits = tbl->key_bit_size;
+ sparms.result = ulp_blob_data_get(data, &tmplen);
+
+ if (tbl->result_bit_size != tmplen) {
+ BNXT_TF_DBG(ERR, "Result len (%d) != Expected (%d)\n",
+ tmplen, tbl->result_bit_size);
+ return -EINVAL;
+ }
+ sparms.result_sz_in_bits = tbl->result_bit_size;
+ if (tf_set_tcam_entry(tfp, &sparms)) {
+ BNXT_TF_DBG(ERR, "tcam[%s][%s][%x] write failed.\n",
+ tf_tcam_tbl_2_str(sparms.tcam_tbl_type),
+ tf_dir_2_str(sparms.dir), sparms.idx);
+ return -EIO;
+ }
+ BNXT_TF_DBG(INFO, "tcam[%s][%s][%x] write success.\n",
+ tf_tcam_tbl_2_str(sparms.tcam_tbl_type),
+ tf_dir_2_str(sparms.dir), sparms.idx);
+
+ /* Update cache with TCAM index if the was cache allocated. */
+ if (parms->tcam_tbl_opc ==
+ BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_ALLOC) {
+ if (!parms->cache_ptr) {
+ BNXT_TF_DBG(ERR, "Unable to update cache");
+ return -EINVAL;
+ }
+ parms->cache_ptr->tcam_idx = idx;
+ }
+
+ /* Mark action */
+ rc = ulp_mapper_mark_act_ptr_process(parms, tbl);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "failed mark action processing\n");
+ return rc;
+ }
+
+ return rc;
+}
+
static int32_t
ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct bnxt_ulp_mapper_tbl_info *tbl)
{
struct bnxt_ulp_mapper_class_key_field_info *kflds;
- struct ulp_blob key, mask, data;
+ struct ulp_blob key, mask, data, update_data;
uint32_t i, num_kflds;
struct tf *tfp;
int32_t rc, trc;
struct tf_alloc_tcam_entry_parms aparms = { 0 };
struct tf_search_tcam_entry_parms searchparms = { 0 };
- struct tf_set_tcam_entry_parms sparms = { 0 };
struct ulp_flow_db_res_params fid_parms = { 0 };
struct tf_free_tcam_entry_parms free_parms = { 0 };
+ enum bnxt_ulp_search_before_alloc search_flag;
uint32_t hit = 0;
uint16_t tmplen = 0;
uint16_t idx;
@@ -1358,6 +1515,8 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
!ulp_blob_init(&mask, tbl->key_bit_size,
parms->device_params->byte_order) ||
!ulp_blob_init(&data, tbl->result_bit_size,
+ parms->device_params->byte_order) ||
+ !ulp_blob_init(&update_data, tbl->result_bit_size,
parms->device_params->byte_order)) {
BNXT_TF_DBG(ERR, "blob inits failed.\n");
return -EINVAL;
@@ -1388,7 +1547,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
}
- if (!tbl->srch_b4_alloc) {
+ if (tbl->srch_b4_alloc == BNXT_ULP_SEARCH_BEFORE_ALLOC_NO) {
/*
* No search for re-use is requested, so simply allocate the
* tcam index.
@@ -1455,113 +1614,49 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
hit = searchparms.hit;
}
- /* Build the result */
- if (!tbl->srch_b4_alloc || !hit) {
- struct bnxt_ulp_mapper_result_field_info *dflds;
- struct bnxt_ulp_mapper_ident_info *idents;
- uint32_t num_dflds, num_idents;
- uint32_t encap_flds = 0;
-
- /*
- * Since the cache entry is responsible for allocating
- * identifiers when in use, allocate the identifiers only
- * during normal processing.
- */
- if (parms->tcam_tbl_opc ==
- BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL) {
- idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
-
- for (i = 0; i < num_idents; i++) {
- rc = ulp_mapper_ident_process(parms, tbl,
- &idents[i], NULL);
- /* Already logged the error, just return */
- if (rc)
- goto error;
- }
- }
-
- /* Create the result data blob */
- dflds = ulp_mapper_result_fields_get(tbl, &num_dflds,
- &encap_flds);
- if (!dflds || !num_dflds || encap_flds) {
- BNXT_TF_DBG(ERR, "Failed to get data fields.\n");
- rc = -EINVAL;
- goto error;
- }
-
- for (i = 0; i < num_dflds; i++) {
- rc = ulp_mapper_result_field_process(parms,
- tbl->direction,
- &dflds[i],
- &data,
- "TCAM Result");
- if (rc) {
- BNXT_TF_DBG(ERR, "Failed to set data fields\n");
- goto error;
- }
- }
-
- sparms.dir = tbl->direction;
- sparms.tcam_tbl_type = tbl->resource_type;
- sparms.idx = idx;
- /* Already verified the key/mask lengths */
- sparms.key = ulp_blob_data_get(&key, &tmplen);
- sparms.mask = ulp_blob_data_get(&mask, &tmplen);
- sparms.key_sz_in_bits = tbl->key_bit_size;
- sparms.result = ulp_blob_data_get(&data, &tmplen);
-
- if (tbl->result_bit_size != tmplen) {
- BNXT_TF_DBG(ERR, "Result len (%d) != Expected (%d)\n",
- tmplen, tbl->result_bit_size);
- rc = -EINVAL;
- goto error;
- }
- sparms.result_sz_in_bits = tbl->result_bit_size;
-
- rc = tf_set_tcam_entry(tfp, &sparms);
- if (rc) {
- BNXT_TF_DBG(ERR, "tcam[%d][%s][%d] write failed.\n",
- sparms.tcam_tbl_type,
- (sparms.dir == TF_DIR_RX) ? "RX" : "TX",
- sparms.idx);
- goto error;
- }
-
- /* Update cache with TCAM index if the was cache allocated. */
- if (parms->tcam_tbl_opc ==
- BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_ALLOC) {
- if (!parms->cache_ptr) {
- BNXT_TF_DBG(ERR, "Unable to update cache");
- rc = -EINVAL;
- goto error;
- }
- parms->cache_ptr->tcam_idx = idx;
- }
-
- /* Mark action */
- rc = ulp_mapper_mark_act_ptr_process(parms, tbl);
- if (rc)
- goto error;
-
- } else {
- struct bnxt_ulp_mapper_ident_info *idents;
- uint32_t num_idents;
-
- /*
- * Extract the listed identifiers from the result field,
- * no need to allocate them.
- */
- idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
- for (i = 0; i < num_idents; i++) {
- rc = ulp_mapper_ident_extract(parms, tbl,
- &idents[i], &data);
- if (rc) {
- BNXT_TF_DBG(ERR,
- "Error in ident extraction\n");
- goto error;
- }
- }
+ /* if it is miss then it is same as no search before alloc */
+ if (!hit)
+ search_flag = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO;
+ else
+ search_flag = tbl->srch_b4_alloc;
+
+ switch (search_flag) {
+ case BNXT_ULP_SEARCH_BEFORE_ALLOC_NO:
+ /*Scan identifier list, allocate identifier and update regfile*/
+ rc = ulp_mapper_tcam_tbl_scan_ident_alloc(parms, tbl);
+ /* Create the result blob */
+ if (!rc)
+ rc = ulp_mapper_tcam_tbl_result_create(parms, tbl,
+ &data);
+ /* write the tcam entry */
+ if (!rc)
+ rc = ulp_mapper_tcam_tbl_entry_write(parms, tbl, &key,
+ &mask, &data, idx);
+ break;
+ case BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP:
+ /*Scan identifier list, extract identifier and update regfile*/
+ rc = ulp_mapper_tcam_tbl_scan_ident_extract(parms, tbl, &data);
+ break;
+ case BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_UPDATE:
+ /*Scan identifier list, extract identifier and update regfile*/
+ rc = ulp_mapper_tcam_tbl_scan_ident_extract(parms, tbl, &data);
+ /* Create the result blob */
+ if (!rc)
+ rc = ulp_mapper_tcam_tbl_result_create(parms, tbl,
+ &update_data);
+ /* Update/overwrite the tcam entry */
+ if (!rc)
+ rc = ulp_mapper_tcam_tbl_entry_write(parms, tbl, &key,
+ &mask,
+ &update_data, idx);
+ break;
+ default:
+ BNXT_TF_DBG(ERR, "invalid search opcode\n");
+ rc = -EINVAL;
+ break;
}
+ if (rc)
+ goto error;
/*
* Only link the entry to the flow db in the event that cache was not
@@ -1598,11 +1693,11 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
parms->tcam_tbl_opc = BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL;
free_parms.dir = tbl->direction;
free_parms.tcam_tbl_type = tbl->resource_type;
- free_parms.idx = aparms.idx;
+ free_parms.idx = idx;
trc = tf_free_tcam_entry(tfp, &free_parms);
if (trc)
BNXT_TF_DBG(ERR, "Failed to free tcam[%d][%d][%d] on failure\n",
- tbl->resource_type, tbl->direction, aparms.idx);
+ tbl->resource_type, tbl->direction, idx);
return rc;
}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index ac651f6..9855918 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -271,8 +271,9 @@ enum bnxt_ulp_regfile_index {
enum bnxt_ulp_search_before_alloc {
BNXT_ULP_SEARCH_BEFORE_ALLOC_NO = 0,
- BNXT_ULP_SEARCH_BEFORE_ALLOC_YES = 1,
- BNXT_ULP_SEARCH_BEFORE_ALLOC_LAST = 2
+ BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP = 1,
+ BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_UPDATE = 2,
+ BNXT_ULP_SEARCH_BEFORE_ALLOC_LAST = 3
};
enum bnxt_ulp_fdb_resource_flags {
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
index b9a25b0..6617ab9 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
@@ -173,10 +173,10 @@ struct bnxt_ulp_mapper_tbl_info {
enum bnxt_ulp_resource_sub_type resource_sub_type;
enum bnxt_ulp_cond_opcode cond_opcode;
uint32_t cond_operand;
- uint8_t direction;
- uint32_t priority;
- uint8_t srch_b4_alloc;
- enum bnxt_ulp_critical_resource critical_resource;
+ uint8_t direction;
+ uint32_t priority;
+ enum bnxt_ulp_search_before_alloc srch_b4_alloc;
+ enum bnxt_ulp_critical_resource critical_resource;
/* Information for accessing the ulp_key_field_list */
uint32_t key_start_idx;
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 18/20] net/bnxt: added templates for search before alloc
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (16 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 17/20] net/bnxt: tcam table processing support for search and alloc Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 19/20] net/bnxt: enabled shadow tables during session open Somnath Kotur
` (2 subsequent siblings)
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
Search before alloc allows reuse of constrained resources such as tcam,
encap, and source modifications. The new templates will search the
entry and alloc only if necessary.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_template_db_act.c | 6 +++---
drivers/net/bnxt/tf_ulp/ulp_template_db_class.c | 14 +++++++-------
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
index 58b581c..14ce16e 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
@@ -1052,7 +1052,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_act_tbl_list[] = {
.cond_opcode = BNXT_ULP_COND_OPCODE_COMP_FIELD_IS_SET,
.cond_operand = BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
.direction = TF_DIR_TX,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.result_start_idx = 96,
.result_bit_size = 0,
.result_num_fields = 0,
@@ -1069,7 +1069,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_act_tbl_list[] = {
.cond_opcode = BNXT_ULP_COND_OPCODE_COMP_FIELD_IS_SET,
.cond_operand = BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
.direction = TF_DIR_TX,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.result_start_idx = 99,
.result_bit_size = 0,
.result_num_fields = 0,
@@ -1084,7 +1084,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_act_tbl_list[] = {
.resource_sub_type =
BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_NORMAL,
.direction = TF_DIR_TX,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.result_start_idx = 102,
.result_bit_size = 0,
.result_num_fields = 0,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
index 41d1d87..94160a9 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
@@ -3782,7 +3782,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 108,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4210,7 +4210,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 457,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4282,7 +4282,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 526,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4354,7 +4354,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 595,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4426,7 +4426,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 664,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4570,7 +4570,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_SKIP,
.key_start_idx = 802,
.blob_key_bit_size = 167,
.key_bit_size = 167,
@@ -4998,7 +4998,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
.resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_TX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
- .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
+ .srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_SEARCH_IF_HIT_UPDATE,
.key_start_idx = 1151,
.blob_key_bit_size = 167,
.key_bit_size = 167,
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 19/20] net/bnxt: enabled shadow tables during session open
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (17 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 18/20] net/bnxt: added templates for search before alloc Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 20/20] net/bnxt: cleanup of VF-representor dev ops Somnath Kotur
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
From: Mike Baucom <michael.baucom@broadcom.com>
Turn on shadow memory in the core to allow search before allocate.
This allows reuse of constrained resources.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
---
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 2febd58..077527f 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -81,7 +81,7 @@ ulp_ctx_session_open(struct bnxt *bp,
return rc;
}
- params.shadow_copy = false;
+ params.shadow_copy = true;
params.device_type = TF_DEVICE_TYPE_WH;
resources = ¶ms.resources;
/** RX **/
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v2 20/20] net/bnxt: cleanup of VF-representor dev ops
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (18 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 19/20] net/bnxt: enabled shadow tables during session open Somnath Kotur
@ 2020-07-23 11:56 ` Somnath Kotur
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
20 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-23 11:56 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
No need to access rx_cfa_code, cfa_code_map from the VF-Rep functions
anymore.
Fixes: 322bd6e70272 ("net/bnxt: add port representor infrastructure")
Reviewed-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
drivers/net/bnxt/bnxt.h | 1 -
drivers/net/bnxt/bnxt_reps.c | 75 +++++---------------------------------------
2 files changed, 7 insertions(+), 69 deletions(-)
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 41e7ae5..f4b2a3f 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -799,7 +799,6 @@ struct bnxt_vf_representor {
uint16_t dflt_vnic_id;
uint16_t svif;
uint16_t vfr_tx_cfa_action;
- uint16_t rx_cfa_code;
uint32_t rep2vf_flow_id;
uint32_t vf2rep_flow_id;
/* Private data store of associated PF/Trusted VF */
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index 2f775e0..6fa9a30 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -230,6 +230,9 @@ int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
int rc;
parent_bp = rep->parent_dev->data->dev_private;
+ if (!parent_bp)
+ return 0;
+
rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
/* Link state. Inherited from PF or trusted VF */
@@ -324,7 +327,7 @@ static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
}
/* Check if representor has been already allocated in FW */
- if (vfr->vfr_tx_cfa_action && vfr->rx_cfa_code)
+ if (vfr->vfr_tx_cfa_action)
return 0;
/*
@@ -406,9 +409,11 @@ static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
}
parent_bp = vfr->parent_dev->data->dev_private;
+ if (!parent_bp)
+ return 0;
/* Check if representor has been already freed in FW */
- if (!vfr->vfr_tx_cfa_action && !vfr->rx_cfa_code)
+ if (!vfr->vfr_tx_cfa_action)
return 0;
rc = bnxt_tf_vfr_free(vfr);
@@ -419,11 +424,9 @@ static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
return rc;
}
- parent_bp->cfa_code_map[vfr->rx_cfa_code] = BNXT_VF_IDX_INVALID;
PMD_DRV_LOG(DEBUG, "freed representor %d in FW\n",
vfr->vf_id);
vfr->vfr_tx_cfa_action = 0;
- vfr->rx_cfa_code = 0;
rc = bnxt_hwrm_cfa_vfr_free(parent_bp, vfr->vf_id);
@@ -456,7 +459,6 @@ int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
{
struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
struct bnxt *parent_bp;
- uint16_t max_vnics, i, j, vpool, vrxq;
unsigned int max_rx_rings;
int rc = 0;
@@ -476,7 +478,6 @@ int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->max_tx_queues = max_rx_rings;
dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
dev_info->hash_key_size = 40;
- max_vnics = parent_bp->max_vnics;
/* MTU specifics */
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
@@ -492,68 +493,6 @@ int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
- /* *INDENT-OFF* */
- dev_info->default_rxconf = (struct rte_eth_rxconf) {
- .rx_thresh = {
- .pthresh = 8,
- .hthresh = 8,
- .wthresh = 0,
- },
- .rx_free_thresh = 32,
- /* If no descriptors available, pkts are dropped by default */
- .rx_drop_en = 1,
- };
-
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .tx_thresh = {
- .pthresh = 32,
- .hthresh = 0,
- .wthresh = 0,
- },
- .tx_free_thresh = 32,
- .tx_rs_thresh = 32,
- };
- eth_dev->data->dev_conf.intr_conf.lsc = 1;
-
- eth_dev->data->dev_conf.intr_conf.rxq = 1;
- dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
- dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
- dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
- dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
-
- /* *INDENT-ON* */
-
- /*
- * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
- * need further investigation.
- */
-
- /* VMDq resources */
- vpool = 64; /* ETH_64_POOLS */
- vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
- for (i = 0; i < 4; vpool >>= 1, i++) {
- if (max_vnics > vpool) {
- for (j = 0; j < 5; vrxq >>= 1, j++) {
- if (dev_info->max_rx_queues > vrxq) {
- if (vpool > vrxq)
- vpool = vrxq;
- goto found;
- }
- }
- /* Not enough resources to support VMDq */
- break;
- }
- }
- /* Not enough resources to support VMDq */
- vpool = 0;
- vrxq = 0;
-found:
- dev_info->max_vmdq_pools = vpool;
- dev_info->vmdq_queue_num = vrxq;
-
- dev_info->vmdq_pool_base = 0;
- dev_info->vmdq_queue_base = 0;
-
return 0;
}
--
2.7.4
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 00/22] bnxt patches
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 00/20] bnxt patches Somnath Kotur
` (19 preceding siblings ...)
2020-07-23 11:56 ` [dpdk-dev] [PATCH v2 20/20] net/bnxt: cleanup of VF-representor dev ops Somnath Kotur
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 01/22] net/bnxt: add shadow and search capability to tcam Ajit Khaparde
` (22 more replies)
20 siblings, 23 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit
Some fixes, cleanups and changes to augment pre-existing
support in infrastructure
Please apply
v1->v2:
- Fixed some typos in patch [9/20].
v2->v3:
- Fixed coding style issues in patch [15/20] to use __rte_attribute.
- Updated commit messages.
- Added a patch to fix a logic error [21/22].
- Added a patch to fix compilation issue with -O and -g CFLAGS [22/22].
Ajit Khaparde (2):
net/bnxt: fix if condition
net/bnxt: fix build error with extra cflags
Kishore Padmanabha (7):
net/bnxt: add access to nat global register
net/bnxt: configure parif for offload miss rules
net/bnxt: update nat template
net/bnxt: configure parif for the egress rules
net/bnxt: ignore VLAN priority mask
net/bnxt: add egress template with VLAN tag match
net/bnxt: add tcam table processing for search and alloc
Mike Baucom (9):
net/bnxt: add shadow and search capability to tcam
net/bnxt: modify ulp mapper to use tcam search
net/bnxt: add tf hash API
net/bnxt: modify tf shadow tcam to use tf hash
net/bnxt: add shadow table capability with search
net/bnxt: modify ulp mapper to use tbl search
net/bnxt: modify shadow tcam and tbl reference count logic
net/bnxt: add templates for search before alloc
net/bnxt: enable shadow tables during session open
Somnath Kotur (1):
net/bnxt: cleanup VF-representor dev ops
Venkat Duvvuru (3):
net/bnxt: skip mark id injection into mbuf
net/bnxt: fix port default rule create and destroy
net/bnxt: delete VF FW rules on representor create
drivers/net/bnxt/bnxt.h | 6 +-
drivers/net/bnxt/bnxt_ethdev.c | 83 +-
drivers/net/bnxt/bnxt_hwrm.c | 49 ++
drivers/net/bnxt/bnxt_hwrm.h | 2 +
drivers/net/bnxt/bnxt_reps.c | 94 +-
drivers/net/bnxt/bnxt_rxr.c | 3 +
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 19 +-
drivers/net/bnxt/hsi_struct_def_dpdk.h | 138 +++
drivers/net/bnxt/meson.build | 1 +
drivers/net/bnxt/tf_core/Makefile | 1 +
drivers/net/bnxt/tf_core/tf_core.c | 139 ++-
drivers/net/bnxt/tf_core/tf_core.h | 174 ++++
drivers/net/bnxt/tf_core/tf_device_p4.c | 4 +-
drivers/net/bnxt/tf_core/tf_hash.c | 106 +++
drivers/net/bnxt/tf_core/tf_hash.h | 27 +
drivers/net/bnxt/tf_core/tf_shadow_tbl.c | 766 +++++++++++++++-
drivers/net/bnxt/tf_core/tf_shadow_tbl.h | 124 +--
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 818 +++++++++++++++++-
drivers/net/bnxt/tf_core/tf_shadow_tcam.h | 258 +++---
drivers/net/bnxt/tf_core/tf_tbl.c | 246 +++++-
drivers/net/bnxt/tf_core/tf_tbl.h | 22 +-
drivers/net/bnxt/tf_core/tf_tcam.c | 300 ++++++-
drivers/net/bnxt/tf_core/tf_tcam.h | 31 +-
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 97 ++-
drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 18 +-
drivers/net/bnxt/tf_ulp/ulp_def_rules.c | 127 ++-
drivers/net/bnxt/tf_ulp/ulp_flow_db.c | 2 +-
drivers/net/bnxt/tf_ulp/ulp_flow_db.h | 2 +-
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 505 +++++++----
drivers/net/bnxt/tf_ulp/ulp_port_db.c | 2 +
drivers/net/bnxt/tf_ulp/ulp_port_db.h | 1 +
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 81 ++
drivers/net/bnxt/tf_ulp/ulp_template_db_act.c | 434 ++++++----
.../net/bnxt/tf_ulp/ulp_template_db_class.c | 556 +++++++++++-
.../net/bnxt/tf_ulp/ulp_template_db_enum.h | 73 +-
drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c | 4 +-
drivers/net/bnxt/tf_ulp/ulp_template_struct.h | 8 +-
38 files changed, 4462 insertions(+), 860 deletions(-)
create mode 100644 drivers/net/bnxt/tf_core/tf_hash.c
create mode 100644 drivers/net/bnxt/tf_core/tf_hash.h
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 01/22] net/bnxt: add shadow and search capability to tcam
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 18:04 ` Stephen Hemminger
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 02/22] net/bnxt: add access to nat global register Ajit Khaparde
` (21 subsequent siblings)
22 siblings, 1 reply; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Mike Baucom, Randy Schacher
From: Mike Baucom <michael.baucom@broadcom.com>
- Add TCAM shadow tables for searching
- Add Search API to allow reuse of TCAM entries
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Randy Schacher <stuart.schacher@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_core.c | 73 ++
drivers/net/bnxt/tf_core/tf_core.h | 101 +++
drivers/net/bnxt/tf_core/tf_device_p4.c | 2 +-
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 885 +++++++++++++++++++++-
drivers/net/bnxt/tf_core/tf_shadow_tcam.h | 258 +++----
drivers/net/bnxt/tf_core/tf_tcam.c | 300 +++++++-
drivers/net/bnxt/tf_core/tf_tcam.h | 31 +-
7 files changed, 1449 insertions(+), 201 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index 97e7952a9..ca3280b6b 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -607,6 +607,79 @@ tf_search_identifier(struct tf *tfp,
return 0;
}
+int
+tf_search_tcam_entry(struct tf *tfp,
+ struct tf_search_tcam_entry_parms *parms)
+{
+ int rc;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_tcam_alloc_search_parms sparms;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ memset(&sparms, 0, sizeof(struct tf_tcam_alloc_search_parms));
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ if (dev->ops->tf_dev_alloc_search_tcam == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ sparms.dir = parms->dir;
+ sparms.type = parms->tcam_tbl_type;
+ sparms.key = parms->key;
+ sparms.key_size = TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits);
+ sparms.mask = parms->mask;
+ sparms.priority = parms->priority;
+ sparms.alloc = parms->alloc;
+
+ /* Result is an in/out and so no need to copy during outputs */
+ sparms.result = parms->result;
+ sparms.result_size =
+ TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits);
+
+ rc = dev->ops->tf_dev_alloc_search_tcam(tfp, &sparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: TCAM allocation failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Copy the outputs */
+ parms->hit = sparms.hit;
+ parms->search_status = sparms.search_status;
+ parms->ref_cnt = sparms.ref_cnt;
+ parms->idx = sparms.idx;
+
+ return 0;
+}
+
int
tf_alloc_tcam_entry(struct tf *tfp,
struct tf_alloc_tcam_entry_parms *parms)
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index 67415adaf..349a1f1a7 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -290,6 +290,18 @@ enum tf_tcam_tbl_type {
TF_TCAM_TBL_TYPE_MAX
};
+/**
+ * TCAM SEARCH STATUS
+ */
+enum tf_tcam_search_status {
+ /** The entry was not found, but an idx was allocated if requested. */
+ MISS,
+ /** The entry was found, and the result/idx are valid */
+ HIT,
+ /** The entry was not found and the table is full */
+ REJECT
+};
+
/**
* EM Resources
* These defines are provisioned during
@@ -949,6 +961,8 @@ int tf_free_tbl_scope(struct tf *tfp,
/**
* @page tcam TCAM Access
*
+ * @ref tf_search_tcam_entry
+ *
* @ref tf_alloc_tcam_entry
*
* @ref tf_set_tcam_entry
@@ -958,6 +972,93 @@ int tf_free_tbl_scope(struct tf *tfp,
* @ref tf_free_tcam_entry
*/
+/**
+ * tf_search_tcam_entry parameter definition (experimental)
+ */
+struct tf_search_tcam_entry_parms {
+ /**
+ * [in] receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] TCAM table type
+ */
+ enum tf_tcam_tbl_type tcam_tbl_type;
+ /**
+ * [in] Key data to match on
+ */
+ uint8_t *key;
+ /**
+ * [in] key size in bits
+ */
+ uint16_t key_sz_in_bits;
+ /**
+ * [in] Mask data to match on
+ */
+ uint8_t *mask;
+ /**
+ * [in] Priority of entry requested (definition TBD)
+ */
+ uint32_t priority;
+ /**
+ * [in] Allocate on miss.
+ */
+ uint8_t alloc;
+ /**
+ * [out] Set if matching entry found
+ */
+ uint8_t hit;
+ /**
+ * [out] Search result status (hit, miss, reject)
+ */
+ enum tf_tcam_search_status search_status;
+ /**
+ * [out] Current refcnt after allocation
+ */
+ uint16_t ref_cnt;
+ /**
+ * [in out] The result data from the search is copied here
+ */
+ uint8_t *result;
+ /**
+ * [in out] result size in bits for the result data
+ */
+ uint16_t result_sz_in_bits;
+ /**
+ * [out] Index found
+ */
+ uint16_t idx;
+};
+
+/**
+ * search TCAM entry (experimental)
+ *
+ * Search for a TCAM entry
+ *
+ * This function searches the shadow copy of the TCAM table for a matching
+ * entry. Key and mask must match for hit to be set. Only TruFlow core data
+ * is accessed. If shadow_copy is not enabled, an error is returned.
+ *
+ * Implementation:
+ *
+ * A hash is performed on the key/mask data and mapped to a shadow copy entry
+ * where the full key/mask is populated. If the full key/mask matches the
+ * entry, hit is set, ref_cnt is incremented, and search_status indicates what
+ * action the caller can take regarding setting the entry.
+ *
+ * search_status should be used as follows:
+ * - On Miss, the caller should create a result and call tf_set_tcam_entry with
+ * returned index.
+ *
+ * - On Reject, the hash table is full and the entry cannot be added.
+ *
+ * - On Hit, the result data is returned to the caller. Additionally, the
+ * ref_cnt is updated.
+ *
+ * Also returns success or failure code.
+ */
+int tf_search_tcam_entry(struct tf *tfp,
+ struct tf_search_tcam_entry_parms *parms);
/**
* tf_alloc_tcam_entry parameter definition
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c
index f38c38efb..afb60989e 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c
@@ -133,7 +133,7 @@ const struct tf_dev_ops tf_dev_ops_p4 = {
.tf_dev_get_bulk_tbl = tf_tbl_bulk_get,
.tf_dev_alloc_tcam = tf_tcam_alloc,
.tf_dev_free_tcam = tf_tcam_free,
- .tf_dev_alloc_search_tcam = NULL,
+ .tf_dev_alloc_search_tcam = tf_tcam_alloc_search,
.tf_dev_set_tcam = tf_tcam_set,
.tf_dev_get_tcam = NULL,
.tf_dev_insert_int_em_entry = tf_em_insert_int_entry,
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index c61b833d7..51aae4ff6 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -3,61 +3,902 @@
* All rights reserved.
*/
-#include <rte_common.h>
-
+#include "tf_common.h"
+#include "tf_util.h"
+#include "tfp.h"
#include "tf_shadow_tcam.h"
/**
- * Shadow tcam DB element
+ * The implementation includes 3 tables per tcam table type.
+ * - hash table
+ * - sized so that a minimum of 4 slots per shadow entry are available to
+ * minimize the likelihood of collisions.
+ * - shadow key table
+ * - sized to the number of entries requested and is directly indexed
+ * - the index is zero based and is the tcam index - the base address
+ * - the key and mask are stored in the key table.
+ * - The stored key is the AND of the key/mask in order to eliminate the need
+ * to compare both the key and mask.
+ * - shadow result table
+ * - the result table is stored separately since it only needs to be accessed
+ * when the key matches.
+ * - the result has a back pointer to the hash table via the hb handle. The
+ * hb handle is a 32 bit represention of the hash with a valid bit, bucket
+ * element index, and the hash index. It is necessary to store the hb handle
+ * with the result since subsequent removes only provide the tcam index.
+ *
+ * - Max entries is limited in the current implementation since bit 15 is the
+ * valid bit in the hash table.
+ * - A 16bit hash is calculated and masked based on the number of entries
+ * - 64b wide bucket is used and broken into 4x16bit elements.
+ * This decision is based on quicker bucket scanning to determine if any
+ * elements are in use.
+ * - bit 15 of each bucket element is the valid, this is done to prevent having
+ * to read the larger key/result data for determining VALID. It also aids
+ * in the more efficient scanning of the bucket for slot usage.
*/
-struct tf_shadow_tcam_element {
- /**
- * Hash table
- */
- void *hash;
- /**
- * Reference count, array of number of tcam entries
- */
- uint16_t *ref_count;
+/*
+ * The maximum number of shadow entries supported. The value also doubles as
+ * the maximum number of hash buckets. There are only 15 bits of data per
+ * bucket to point to the shadow tables.
+ */
+#define TF_SHADOW_TCAM_ENTRIES_MAX (1 << 15)
+
+/* The number of elements(BE) per hash bucket (HB) */
+#define TF_SHADOW_TCAM_HB_NUM_ELEM (4)
+#define TF_SHADOW_TCAM_BE_VALID (1 << 15)
+#define TF_SHADOW_TCAM_BE_IS_VALID(be) (((be) & TF_SHADOW_TCAM_BE_VALID) != 0)
+
+/**
+ * The hash bucket handle is 32b
+ * - bit 31, the Valid bit
+ * - bit 29-30, the element
+ * - bits 0-15, the hash idx (is masked based on the allocated size)
+ */
+#define TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
+#define TF_SHADOW_TCAM_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
+ ((be) << 29) | (idx))
+
+#define TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
+ (TF_SHADOW_TCAM_HB_NUM_ELEM - 1))
+
+#define TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
+ (ctxt)->hash_ctxt.hid_mask)
+
+/**
+ * The idx provided by the caller is within a region, so currently the base is
+ * either added or subtracted from the idx to ensure it can be used as a
+ * compressed index
+ */
+
+/* Convert the tcam index to a shadow index */
+#define TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Convert the shadow index to a tcam index */
+#define TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Simple helper masks for clearing en element from the bucket */
+#define TF_SHADOW_TCAM_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
+#define TF_SHADOW_TCAM_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
+#define TF_SHADOW_TCAM_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
+#define TF_SHADOW_TCAM_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
+
+/**
+ * This should be coming from external, but for now it is assumed that no key
+ * is greater than 1K bits and no result is bigger than 128 bits. This makes
+ * allocation of the hash table easier without having to allocate on the fly.
+ */
+#define TF_SHADOW_TCAM_MAX_KEY_SZ 128
+#define TF_SHADOW_TCAM_MAX_RESULT_SZ 16
+
+/*
+ * Local only defines for the internal data.
+ */
+
+/**
+ * tf_shadow_tcam_shadow_key_entry is the key/mask entry of the key table.
+ * The key stored in the table is the masked version of the key. This is done
+ * to eliminate the need of comparing both the key and mask.
+ */
+struct tf_shadow_tcam_shadow_key_entry {
+ uint8_t key[TF_SHADOW_TCAM_MAX_KEY_SZ];
+ uint8_t mask[TF_SHADOW_TCAM_MAX_KEY_SZ];
};
/**
- * Shadow tcam DB definition
+ * tf_shadow_tcam_shadow_result_entry is the result table entry.
+ * The result table writes are broken into two phases:
+ * - The search phase, which stores the hb_handle and key size and
+ * - The set phase, which writes the result, refcnt, and result size
+ */
+struct tf_shadow_tcam_shadow_result_entry {
+ uint8_t result[TF_SHADOW_TCAM_MAX_RESULT_SZ];
+ uint16_t result_size;
+ uint16_t key_size;
+ uint32_t refcnt;
+ uint32_t hb_handle;
+};
+
+/**
+ * tf_shadow_tcam_shadow_ctxt holds all information for accessing the key and
+ * result tables.
+ */
+struct tf_shadow_tcam_shadow_ctxt {
+ struct tf_shadow_tcam_shadow_key_entry *sh_key_tbl;
+ struct tf_shadow_tcam_shadow_result_entry *sh_res_tbl;
+ uint32_t base_addr;
+ uint16_t num_entries;
+ uint16_t alloc_idx;
+};
+
+/**
+ * tf_shadow_tcam_hash_ctxt holds all information related to accessing the hash
+ * table.
+ */
+struct tf_shadow_tcam_hash_ctxt {
+ uint64_t *hashtbl;
+ uint16_t hid_mask;
+ uint16_t hash_entries;
+};
+
+/**
+ * tf_shadow_tcam_ctxt holds the hash and shadow tables for the current shadow
+ * tcam db. This structure is per tcam table type as each tcam table has it's
+ * own shadow and hash table.
+ */
+struct tf_shadow_tcam_ctxt {
+ struct tf_shadow_tcam_shadow_ctxt shadow_ctxt;
+ struct tf_shadow_tcam_hash_ctxt hash_ctxt;
+};
+
+/**
+ * tf_shadow_tcam_db is the allocated db structure returned as an opaque
+ * void * pointer to the caller during create db. It holds the pointers for
+ * each tcam associated with the db.
*/
struct tf_shadow_tcam_db {
- /**
- * The DB consists of an array of elements
- */
- struct tf_shadow_tcam_element *db;
+ /* Each context holds the shadow and hash table information */
+ struct tf_shadow_tcam_ctxt *ctxt[TF_TCAM_TBL_TYPE_MAX];
+};
+
+/* CRC polynomial 0xedb88320 */
+static const uint32_t tf_shadow_tcam_crc32tbl[] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
+/**
+ * Returns the number of entries in the contexts shadow table.
+ */
+static inline uint16_t
+tf_shadow_tcam_sh_num_entries_get(struct tf_shadow_tcam_ctxt *ctxt)
+{
+ return ctxt->shadow_ctxt.num_entries;
+}
+
+/**
+ * Compare the give key with the key in the shadow table.
+ *
+ * Returns 0 if the keys match
+ */
+static int
+tf_shadow_tcam_key_cmp(struct tf_shadow_tcam_ctxt *ctxt,
+ uint8_t *key,
+ uint8_t *mask,
+ uint16_t sh_idx,
+ uint16_t size)
+{
+ if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
+ sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !key || !mask)
+ return -1;
+
+ return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
+}
+
+/**
+ * Copies the shadow result to the result.
+ *
+ * Returns 0 on failure
+ */
+static void *
+tf_shadow_tcam_res_cpy(struct tf_shadow_tcam_ctxt *ctxt,
+ uint8_t *result,
+ uint16_t sh_idx,
+ uint16_t size)
+{
+ if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) || !result)
+ return 0;
+
+ if (ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result_size != size)
+ return 0;
+
+ return memcpy(result,
+ ctxt->shadow_ctxt.sh_res_tbl[sh_idx].result,
+ size);
+}
+
+/**
+ * Using a software based CRC function for now, but will look into using hw
+ * assisted in the future.
+ */
+static uint32_t
+tf_shadow_tcam_crc32_calc(uint8_t *key, uint32_t len)
+{
+ uint32_t crc = ~0U;
+
+ while (len--)
+ crc = tf_shadow_tcam_crc32tbl[(crc ^ key[len]) & 0xff] ^
+ (crc >> 8);
+
+ return ~crc;
+}
+
+/**
+ * Free the memory associated with the context.
+ */
+static void
+tf_shadow_tcam_ctxt_delete(struct tf_shadow_tcam_ctxt *ctxt)
+{
+ if (!ctxt)
+ return;
+
+ tfp_free(ctxt->hash_ctxt.hashtbl);
+ tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
+ tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
+}
+
+/**
+ * The TF Shadow TCAM context is per TCAM and holds all information relating to
+ * managing the shadow and search capability. This routine allocated data that
+ * needs to be deallocated by the tf_shadow_tcam_ctxt_delete prior when deleting
+ * the shadow db.
+ */
+static int
+tf_shadow_tcam_ctxt_create(struct tf_shadow_tcam_ctxt *ctxt,
+ uint16_t num_entries,
+ uint16_t base_addr)
+{
+ struct tfp_calloc_parms cparms;
+ uint16_t hash_size = 1;
+ uint16_t hash_mask;
+ int rc;
+
+ /* Hash table is a power of two that holds the number of entries */
+ if (num_entries > TF_SHADOW_TCAM_ENTRIES_MAX) {
+ TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
+ num_entries,
+ TF_SHADOW_TCAM_ENTRIES_MAX);
+ return -ENOMEM;
+ }
+
+ while (hash_size < num_entries)
+ hash_size = hash_size << 1;
+
+ hash_mask = hash_size - 1;
+
+ /* Allocate the hash table */
+ cparms.nitems = hash_size;
+ cparms.size = sizeof(uint64_t);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->hash_ctxt.hashtbl = cparms.mem_va;
+ ctxt->hash_ctxt.hid_mask = hash_mask;
+ ctxt->hash_ctxt.hash_entries = hash_size;
+
+ /* allocate the shadow tables */
+ /* allocate the shadow key table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tcam_shadow_key_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
+
+ /* allocate the shadow result table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tcam_shadow_result_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
+
+ ctxt->shadow_ctxt.num_entries = num_entries;
+ ctxt->shadow_ctxt.base_addr = base_addr;
+
+ return 0;
+error:
+ tf_shadow_tcam_ctxt_delete(ctxt);
+
+ return -ENOMEM;
+}
+
+/**
+ * Get a shadow TCAM context given the db and the TCAM type
+ */
+static struct tf_shadow_tcam_ctxt *
+tf_shadow_tcam_ctxt_get(struct tf_shadow_tcam_db *shadow_db,
+ enum tf_tcam_tbl_type type)
+{
+ if (type >= TF_TCAM_TBL_TYPE_MAX ||
+ !shadow_db ||
+ !shadow_db->ctxt[type])
+ return NULL;
+
+ return shadow_db->ctxt[type];
+}
+
+/**
+ * Sets the hash entry into the table given the TCAM context, hash bucket
+ * handle, and shadow index.
+ */
+static inline int
+tf_shadow_tcam_set_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
+ uint32_t hb_handle,
+ uint16_t sh_idx)
+{
+ uint16_t hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ uint16_t be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
+ uint64_t entry = sh_idx | TF_SHADOW_TCAM_BE_VALID;
+
+ if (hid >= ctxt->hash_ctxt.hash_entries)
+ return -EINVAL;
+
+ ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
+ return 0;
+}
+
+/**
+ * Clears the hash entry given the TCAM context and hash bucket handle.
+ */
+static inline void
+tf_shadow_tcam_clear_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
+ uint32_t hb_handle)
+{
+ uint16_t hid, be;
+ uint64_t *bucket;
+
+ if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(hb_handle))
+ return;
+
+ hid = TF_SHADOW_TCAM_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ be = TF_SHADOW_TCAM_HB_HANDLE_BE_GET(hb_handle);
+ bucket = &ctxt->hash_ctxt.hashtbl[hid];
+
+ switch (be) {
+ case 0:
+ *bucket = TF_SHADOW_TCAM_BE0_MASK_CLEAR(*bucket);
+ break;
+ case 1:
+ *bucket = TF_SHADOW_TCAM_BE1_MASK_CLEAR(*bucket);
+ break;
+ case 2:
+ *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
+ break;
+ case 3:
+ *bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
+ break;
+ }
+}
+
+/**
+ * Clears the shadow key and result entries given the TCAM context and
+ * shadow index.
+ */
+static void
+tf_shadow_tcam_clear_sh_entry(struct tf_shadow_tcam_ctxt *ctxt,
+ uint16_t sh_idx)
+{
+ struct tf_shadow_tcam_shadow_key_entry *sk_entry;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+
+ if (sh_idx >= tf_shadow_tcam_sh_num_entries_get(ctxt))
+ return;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
+
+ /*
+ * memset key/result to zero for now, possibly leave the data alone
+ * in the future and rely on the valid bit in the hash table.
+ */
+ memset(sk_entry, 0, sizeof(struct tf_shadow_tcam_shadow_key_entry));
+ memset(sr_entry, 0, sizeof(struct tf_shadow_tcam_shadow_result_entry));
+}
+
+/**
+ * Binds the allocated tcam index with the hash and shadow tables.
+ * The entry will be incomplete until the set has happened with the result
+ * data.
+ */
int
-tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms __rte_unused)
+tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms)
{
+ int rc;
+ int i;
+ uint16_t idx, klen;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_shadow_tcam_db *shadow_db;
+ struct tf_shadow_tcam_shadow_key_entry *sk_entry;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+ uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
+
+ if (!parms || !TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(parms->hb_handle) ||
+ !parms->key || !parms->mask) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, parms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tcam_tbl_2_str(parms->type));
+ return -EINVAL;
+ }
+
+ memset(tkey, 0, sizeof(tkey));
+ idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, parms->idx);
+ klen = parms->key_size;
+ if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt) ||
+ klen > TF_SHADOW_TCAM_MAX_KEY_SZ) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
+ tf_dir_2_str(parms->dir),
+ tf_tcam_tbl_2_str(parms->type),
+ klen,
+ TF_SHADOW_TCAM_MAX_KEY_SZ, idx);
+
+ return -EINVAL;
+ }
+
+ rc = tf_shadow_tcam_set_hash_entry(ctxt, parms->hb_handle, idx);
+ if (rc)
+ return -EINVAL;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /*
+ * Write the masked key to the table for more efficient comparisons
+ * later.
+ */
+ for (i = 0; i < klen; i++)
+ tkey[i] = parms->key[i] & parms->mask[i];
+
+ memcpy(sk_entry->key, tkey, klen);
+ memcpy(sk_entry->mask, parms->mask, klen);
+
+ /* Write the result table */
+ sr_entry->key_size = parms->key_size;
+ sr_entry->hb_handle = parms->hb_handle;
+
return 0;
}
+/**
+ * Deletes hash/shadow information if no more references.
+ *
+ * Returns 0 - The caller should delete the tcam entry in hardware.
+ * Returns non-zero - The number of references to the entry
+ */
int
-tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms __rte_unused)
+tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms)
{
+ uint16_t idx;
+ uint32_t hb_handle;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_shadow_tcam_db *shadow_db;
+ struct tf_tcam_free_parms *fparms;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->fparms) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ fparms = parms->fparms;
+
+ /*
+ * Initialize the reference count to zero. It will only be changed if
+ * non-zero.
+ */
+ fparms->ref_cnt = 0;
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, fparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tcam_tbl_2_str(fparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, fparms->idx);
+ if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
+ tf_tcam_tbl_2_str(fparms->type),
+ fparms->idx,
+ tf_shadow_tcam_sh_num_entries_get(ctxt));
+ return 0;
+ }
+
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+ if (sr_entry->refcnt <= 1) {
+ hb_handle = sr_entry->hb_handle;
+ tf_shadow_tcam_clear_hash_entry(ctxt, hb_handle);
+ tf_shadow_tcam_clear_sh_entry(ctxt, idx);
+ } else {
+ sr_entry->refcnt--;
+ fparms->ref_cnt = sr_entry->refcnt;
+ }
+
return 0;
}
int
-tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms __rte_unused)
+tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms)
{
+ uint16_t len;
+ uint8_t rcopy;
+ uint64_t bucket;
+ uint32_t i, hid32;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_shadow_tcam_db *shadow_db;
+ uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
+ struct tf_tcam_alloc_search_parms *sparms;
+ uint8_t tkey[TF_SHADOW_TCAM_MAX_KEY_SZ];
+ uint32_t be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "tcam search with invalid parms\n");
+ return -EINVAL;
+ }
+
+ memset(tkey, 0, sizeof(tkey));
+ sparms = parms->sparms;
+
+ /* Initialize return values to invalid */
+ sparms->hit = 0;
+ sparms->search_status = REJECT;
+ parms->hb_handle = 0;
+ sparms->ref_cnt = 0;
+ /* see if caller wanted the result */
+ rcopy = sparms->result && sparms->result_size;
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(ERR, "%s Unable to get tcam mgr context\n",
+ tf_tcam_tbl_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ hid_mask = ctxt->hash_ctxt.hid_mask;
+
+ len = sparms->key_size;
+
+ if (len > TF_SHADOW_TCAM_MAX_KEY_SZ ||
+ !sparms->key || !sparms->mask || !len) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p : %p\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type),
+ len,
+ sparms->key,
+ sparms->mask);
+ return -EINVAL;
+ }
+
+ /* Combine the key and mask */
+ for (i = 0; i < len; i++)
+ tkey[i] = sparms->key[i] & sparms->mask[i];
+
+ /*
+ * Calculate the crc32
+ * Fold it to create a 16b value
+ * Reduce it to fit the table
+ */
+ hid32 = tf_shadow_tcam_crc32_calc(tkey, len);
+ hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
+ hb_idx = hid16 & hid_mask;
+
+ bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
+
+ if (!bucket) {
+ /* empty bucket means a miss and available entry */
+ sparms->search_status = MISS;
+ parms->hb_handle = TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, 0);
+ sparms->idx = 0;
+ return 0;
+ }
+
+ /* Set the avail to max so we can detect when there is an avail entry */
+ be_avail = TF_SHADOW_TCAM_HB_NUM_ELEM;
+ for (i = 0; i < TF_SHADOW_TCAM_HB_NUM_ELEM; i++) {
+ shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
+ be_valid = TF_SHADOW_TCAM_BE_IS_VALID(shtbl_idx);
+ if (!be_valid) {
+ /* The element is avail, keep going */
+ be_avail = i;
+ continue;
+ }
+ /* There is a valid entry, compare it */
+ shtbl_key = shtbl_idx & ~TF_SHADOW_TCAM_BE_VALID;
+ if (!tf_shadow_tcam_key_cmp(ctxt,
+ sparms->key,
+ sparms->mask,
+ shtbl_key,
+ sparms->key_size)) {
+ /*
+ * It matches, increment the ref count if the caller
+ * requested allocation and return the info
+ */
+ if (sparms->alloc)
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt + 1;
+
+ sparms->hit = 1;
+ sparms->search_status = HIT;
+ parms->hb_handle =
+ TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, i);
+ sparms->idx = TF_SHADOW_TCAM_SHIDX_TO_IDX(ctxt,
+ shtbl_key);
+ sparms->ref_cnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
+
+ /* copy the result, if caller wanted it. */
+ if (rcopy &&
+ !tf_shadow_tcam_res_cpy(ctxt,
+ sparms->result,
+ shtbl_key,
+ sparms->result_size)) {
+ /*
+ * Should never get here, possible memory
+ * corruption or something unexpected.
+ */
+ TFP_DRV_LOG(ERR, "Error copying result\n");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+ }
+
+ /* No hits, return avail entry if exists */
+ if (be_avail < TF_SHADOW_TCAM_HB_NUM_ELEM) {
+ parms->hb_handle =
+ TF_SHADOW_TCAM_HB_HANDLE_CREATE(hb_idx, be_avail);
+ sparms->search_status = MISS;
+ sparms->hit = 0;
+ sparms->idx = 0;
+ } else {
+ sparms->search_status = REJECT;
+ }
+
return 0;
}
int
-tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms __rte_unused)
+tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms)
{
+ uint16_t idx;
+ struct tf_shadow_tcam_ctxt *ctxt;
+ struct tf_tcam_set_parms *sparms;
+ struct tf_shadow_tcam_db *shadow_db;
+ struct tf_shadow_tcam_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "Null parms\n");
+ return -EINVAL;
+ }
+
+ sparms = parms->sparms;
+ if (!sparms->result || !sparms->result_size) {
+ TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ ctxt = tf_shadow_tcam_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ /* We aren't tracking this table, so return success */
+ TFP_DRV_LOG(DEBUG, "%s Unable to get tcam mgr context\n",
+ tf_tcam_tbl_2_str(sparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_TCAM_IDX_TO_SHIDX(ctxt, sparms->idx);
+ if (idx >= tf_shadow_tcam_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type),
+ sparms->idx);
+ return -EINVAL;
+ }
+
+ /* Write the result table, the key/hash has been written already */
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /*
+ * If the handle is not valid, the bind was never called. We aren't
+ * tracking this entry.
+ */
+ if (!TF_SHADOW_TCAM_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
+ return 0;
+
+ if (sparms->result_size > TF_SHADOW_TCAM_MAX_RESULT_SZ) {
+ TFP_DRV_LOG(ERR, "%s:%s Result length %d > %d\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tcam_tbl_2_str(sparms->type),
+ sparms->result_size,
+ TF_SHADOW_TCAM_MAX_RESULT_SZ);
+ return -EINVAL;
+ }
+
+ memcpy(sr_entry->result, sparms->result, sparms->result_size);
+ sr_entry->result_size = sparms->result_size;
+ sr_entry->refcnt = 1;
+
return 0;
}
int
-tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms __rte_unused)
+tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms)
+{
+ struct tf_shadow_tcam_db *shadow_db;
+ int i;
+
+ TF_CHECK_PARMS1(parms);
+
+ shadow_db = (struct tf_shadow_tcam_db *)parms->shadow_db;
+ if (!shadow_db) {
+ TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
+ return 0;
+}
+
+/**
+ * Allocate the TCAM resources for search and allocate
+ *
+ */
+int tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms)
{
+ int rc;
+ int i;
+ uint16_t base;
+ struct tfp_calloc_parms cparms;
+ struct tf_shadow_tcam_db *shadow_db = NULL;
+
+ TF_CHECK_PARMS1(parms);
+
+ /* Build the shadow DB per the request */
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tcam_db);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+ shadow_db = (void *)cparms.mem_va;
+
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ /* If the element didn't request an allocation no need
+ * to create a pool nor verify if we got a reservation.
+ */
+ if (!parms->cfg->alloc_cnt[i]) {
+ shadow_db->ctxt[i] = NULL;
+ continue;
+ }
+
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tcam_ctxt);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+
+ shadow_db->ctxt[i] = cparms.mem_va;
+ base = parms->cfg->base_addr[i];
+ rc = tf_shadow_tcam_ctxt_create(shadow_db->ctxt[i],
+ parms->cfg->alloc_cnt[i],
+ base);
+ if (rc)
+ goto error;
+ }
+
+ *parms->shadow_db = (void *)shadow_db;
+
+ TFP_DRV_LOG(INFO,
+ "TF SHADOW TCAM - initialized\n");
+
return 0;
+error:
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tcam_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
+ return -ENOMEM;
}
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.h b/drivers/net/bnxt/tf_core/tf_shadow_tcam.h
index e2c4e06c0..ea9f38e7c 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.h
@@ -8,232 +8,188 @@
#include "tf_core.h"
-struct tf;
-
-/**
- * The Shadow tcam module provides shadow DB handling for tcam based
- * TF types. A shadow DB provides the capability that allows for reuse
- * of TF resources.
- *
- * A Shadow tcam DB is intended to be used by the Tcam module only.
- */
-
/**
- * Shadow DB configuration information for a single tcam type.
- *
- * During Device initialization the HCAPI device specifics are learned
- * and as well as the RM DB creation. From that those initial steps
- * this structure can be populated.
+ * Shadow DB configuration information
*
- * NOTE:
- * If used in an array of tcam types then such array must be ordered
- * by the TF type is represents.
+ * The shadow configuration is for all tcam table types for a direction
*/
struct tf_shadow_tcam_cfg_parms {
/**
- * TF tcam type
+ * [in] The number of elements in the alloc_cnt and base_addr
+ * For now, it should always be equal to TF_TCAM_TBL_TYPE_MAX
*/
- enum tf_tcam_tbl_type type;
-
+ int num_entries;
/**
- * Number of entries the Shadow DB needs to hold
+ * [in] Resource allocation count array
+ * This array content originates from the tf_session_resources
+ * that is passed in on session open
+ * Array size is TF_TCAM_TBL_TYPE_MAX
*/
- int num_entries;
-
+ uint16_t *alloc_cnt;
/**
- * Element width for this table type
+ * [in] The base index for each tcam table
*/
- int element_width;
+ uint16_t base_addr[TF_TCAM_TBL_TYPE_MAX];
};
/**
- * Shadow tcam DB creation parameters
+ * Shadow TCAM DB creation parameters. The shadow db for this direction
+ * is returned
*/
struct tf_shadow_tcam_create_db_parms {
/**
- * [in] Configuration information for the shadow db
+ * [in] Receive or transmit direction
*/
- struct tf_shadow_tcam_cfg_parms *cfg;
+ enum tf_dir dir;
/**
- * [in] Number of elements in the parms structure
+ * [in] Configuration information for the shadow db
*/
- uint16_t num_elements;
+ struct tf_shadow_tcam_cfg_parms *cfg;
/**
* [out] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
+ void **shadow_db;
};
/**
- * Shadow tcam DB free parameters
+ * Create the shadow db for a single direction
+ *
+ * The returned shadow db must be free using the free db API when no longer
+ * needed
*/
-struct tf_shadow_tcam_free_db_parms {
- /**
- * Shadow tcam DB handle
- */
- void *tf_shadow_tcam_db;
-};
+int
+tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms);
/**
- * Shadow tcam search parameters
+ * Shadow TCAM free parameters
*/
-struct tf_shadow_tcam_search_parms {
+struct tf_shadow_tcam_free_db_parms {
/**
* [in] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
- /**
- * [in] TCAM tbl type
- */
- enum tf_tcam_tbl_type type;
- /**
- * [in] Pointer to entry blob value in remap table to match
- */
- uint8_t *entry;
- /**
- * [in] Size of the entry blob passed in bytes
- */
- uint16_t entry_sz;
- /**
- * [out] Index of the found element returned if hit
- */
- uint16_t *index;
- /**
- * [out] Reference count incremented if hit
- */
- uint16_t *ref_cnt;
+ void *shadow_db;
};
/**
- * Shadow tcam insert parameters
+ * Free all resources associated with the shadow db
+ */
+int
+tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms);
+
+/**
+ * Shadow TCAM bind index parameters
*/
-struct tf_shadow_tcam_insert_parms {
+struct tf_shadow_tcam_bind_index_parms {
/**
* [in] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
+ void *shadow_db;
/**
- * [in] TCAM tbl type
+ * [in] receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] TCAM table type
*/
enum tf_tcam_tbl_type type;
/**
- * [in] Pointer to entry blob value in remap table to match
+ * [in] index of the entry to program
*/
- uint8_t *entry;
+ uint16_t idx;
/**
- * [in] Size of the entry blob passed in bytes
+ * [in] struct containing key
*/
- uint16_t entry_sz;
+ uint8_t *key;
/**
- * [in] Entry to update
+ * [in] struct containing mask fields
*/
- uint16_t index;
+ uint8_t *mask;
/**
- * [out] Reference count after insert
+ * [in] key size in bits (if search)
*/
- uint16_t *ref_cnt;
+ uint16_t key_size;
+ /**
+ * [in] The hash bucket handled returned from the search
+ */
+ uint32_t hb_handle;
};
/**
- * Shadow tcam remove parameters
+ * Binds the allocated tcam index with the hash and shadow tables
*/
-struct tf_shadow_tcam_remove_parms {
+int
+tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms);
+
+/**
+ * Shadow TCAM insert parameters
+ */
+struct tf_shadow_tcam_insert_parms {
/**
* [in] Shadow tcam DB handle
*/
- void *tf_shadow_tcam_db;
- /**
- * [in] TCAM tbl type
- */
- enum tf_tcam_tbl_type type;
- /**
- * [in] Entry to update
- */
- uint16_t index;
+ void *shadow_db;
/**
- * [out] Reference count after removal
+ * [in] The set parms from tf core
*/
- uint16_t *ref_cnt;
+ struct tf_tcam_set_parms *sparms;
};
/**
- * @page shadow_tcam Shadow tcam DB
- *
- * @ref tf_shadow_tcam_create_db
- *
- * @ref tf_shadow_tcam_free_db
- *
- * @reg tf_shadow_tcam_search
- *
- * @reg tf_shadow_tcam_insert
- *
- * @reg tf_shadow_tcam_remove
- */
-
-/**
- * Creates and fills a Shadow tcam DB. The DB is indexed per the
- * parms structure.
- *
- * [in] parms
- * Pointer to create db parameters
+ * Set the entry into the tcam manager hash and shadow tables
*
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * The search must have been used prior to setting the entry so that the
+ * hash has been calculated and duplicate entries will not be added
*/
-int tf_shadow_tcam_create_db(struct tf_shadow_tcam_create_db_parms *parms);
+int
+tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms);
/**
- * Closes the Shadow tcam DB and frees all allocated
- * resources per the associated database.
- *
- * [in] parms
- * Pointer to the free DB parameters
- *
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * Shadow TCAM remove parameters
*/
-int tf_shadow_tcam_free_db(struct tf_shadow_tcam_free_db_parms *parms);
+struct tf_shadow_tcam_remove_parms {
+ /**
+ * [in] Shadow tcam DB handle
+ */
+ void *shadow_db;
+ /**
+ * [in,out] The set parms from tf core
+ */
+ struct tf_tcam_free_parms *fparms;
+};
/**
- * Search Shadow tcam db for matching result
- *
- * [in] parms
- * Pointer to the search parameters
+ * Remove the entry from the tcam hash and shadow tables
*
- * Returns
- * - (0) if successful, element was found.
- * - (-EINVAL) on failure.
+ * The search must have been used prior to setting the entry so that the
+ * hash has been calculated and duplicate entries will not be added
*/
-int tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms);
+int
+tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms);
/**
- * Inserts an element into the Shadow tcam DB. Will fail if the
- * elements ref_count is different from 0. Ref_count after insert will
- * be incremented.
- *
- * [in] parms
- * Pointer to insert parameters
- *
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * Shadow TCAM search parameters
*/
-int tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms);
+struct tf_shadow_tcam_search_parms {
+ /**
+ * [in] Shadow tcam DB handle
+ */
+ void *shadow_db;
+ /**
+ * [in,out] The search parameters from tf core
+ */
+ struct tf_tcam_alloc_search_parms *sparms;
+ /**
+ * [out] The hash handle to use for the set
+ */
+ uint32_t hb_handle;
+};
/**
- * Removes an element from the Shadow tcam DB. Will fail if the
- * elements ref_count is 0. Ref_count after removal will be
- * decremented.
+ * Search for an entry in the tcam hash/shadow tables
*
- * [in] parms
- * Pointer to remove parameter
- *
- * Returns
- * - (0) if successful.
- * - (-EINVAL) on failure.
+ * If there is a miss, but there is room for insertion, the hb_handle returned
+ * is used for insertion during the bind index API
*/
-int tf_shadow_tcam_remove(struct tf_shadow_tcam_remove_parms *parms);
-
-#endif /* _TF_SHADOW_TCAM_H_ */
+int
+tf_shadow_tcam_search(struct tf_shadow_tcam_search_parms *parms);
+#endif
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.c b/drivers/net/bnxt/tf_core/tf_tcam.c
index cbfaa94ee..7679d09ee 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_tcam.c
@@ -14,6 +14,7 @@
#include "tfp.h"
#include "tf_session.h"
#include "tf_msg.h"
+#include "tf_shadow_tcam.h"
struct tf;
@@ -25,7 +26,7 @@ static void *tcam_db[TF_DIR_MAX];
/**
* TCAM Shadow DBs
*/
-/* static void *shadow_tcam_db[TF_DIR_MAX]; */
+static void *shadow_tcam_db[TF_DIR_MAX];
/**
* Init flag, set on bind and cleared on unbind
@@ -35,16 +36,22 @@ static uint8_t init;
/**
* Shadow init flag, set on bind and cleared on unbind
*/
-/* static uint8_t shadow_init; */
+static uint8_t shadow_init;
int
tf_tcam_bind(struct tf *tfp,
struct tf_tcam_cfg_parms *parms)
{
int rc;
- int i;
+ int i, d;
+ struct tf_rm_alloc_info info;
+ struct tf_rm_free_db_parms fparms;
+ struct tf_rm_create_db_parms db_cfg;
struct tf_tcam_resources *tcam_cnt;
- struct tf_rm_create_db_parms db_cfg = { 0 };
+ struct tf_shadow_tcam_free_db_parms fshadow;
+ struct tf_rm_get_alloc_info_parms ainfo;
+ struct tf_shadow_tcam_cfg_parms shadow_cfg;
+ struct tf_shadow_tcam_create_db_parms shadow_cdb;
TF_CHECK_PARMS2(tfp, parms);
@@ -62,29 +69,91 @@ tf_tcam_bind(struct tf *tfp,
return -EINVAL;
}
+ memset(&db_cfg, 0, sizeof(db_cfg));
+
db_cfg.type = TF_DEVICE_MODULE_TYPE_TCAM;
db_cfg.num_elements = parms->num_elements;
db_cfg.cfg = parms->cfg;
- for (i = 0; i < TF_DIR_MAX; i++) {
- db_cfg.dir = i;
- db_cfg.alloc_cnt = parms->resources->tcam_cnt[i].cnt;
- db_cfg.rm_db = &tcam_db[i];
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ db_cfg.dir = d;
+ db_cfg.alloc_cnt = parms->resources->tcam_cnt[d].cnt;
+ db_cfg.rm_db = &tcam_db[d];
rc = tf_rm_create_db(tfp, &db_cfg);
if (rc) {
TFP_DRV_LOG(ERR,
"%s: TCAM DB creation failed\n",
- tf_dir_2_str(i));
+ tf_dir_2_str(d));
return rc;
}
}
+ /* Initialize the TCAM manager. */
+ if (parms->shadow_copy) {
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ memset(&shadow_cfg, 0, sizeof(shadow_cfg));
+ memset(&shadow_cdb, 0, sizeof(shadow_cdb));
+ /* Get the base addresses of the tcams for tcam mgr */
+ for (i = 0; i < TF_TCAM_TBL_TYPE_MAX; i++) {
+ memset(&info, 0, sizeof(info));
+
+ if (!parms->resources->tcam_cnt[d].cnt[i])
+ continue;
+ ainfo.rm_db = tcam_db[d];
+ ainfo.db_index = i;
+ ainfo.info = &info;
+ rc = tf_rm_get_info(&ainfo);
+ if (rc)
+ goto error;
+
+ shadow_cfg.base_addr[i] = info.entry.start;
+ }
+
+ /* Create the shadow db */
+ shadow_cfg.alloc_cnt =
+ parms->resources->tcam_cnt[d].cnt;
+ shadow_cfg.num_entries = parms->num_elements;
+
+ shadow_cdb.shadow_db = &shadow_tcam_db[d];
+ shadow_cdb.cfg = &shadow_cfg;
+ rc = tf_shadow_tcam_create_db(&shadow_cdb);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "TCAM MGR DB creation failed "
+ "rc=%d\n", rc);
+ goto error;
+ }
+ }
+ shadow_init = 1;
+ }
+
init = 1;
TFP_DRV_LOG(INFO,
"TCAM - initialized\n");
return 0;
+error:
+ for (i = 0; i < TF_DIR_MAX; i++) {
+ memset(&fparms, 0, sizeof(fparms));
+ fparms.dir = i;
+ fparms.rm_db = tcam_db[i];
+ /* Ignoring return here since we are in the error case */
+ (void)tf_rm_free_db(tfp, &fparms);
+
+ if (parms->shadow_copy) {
+ fshadow.shadow_db = shadow_tcam_db[i];
+ tf_shadow_tcam_free_db(&fshadow);
+ shadow_tcam_db[i] = NULL;
+ }
+
+ tcam_db[i] = NULL;
+ }
+
+ shadow_init = 0;
+ init = 0;
+
+ return rc;
}
int
@@ -92,7 +161,8 @@ tf_tcam_unbind(struct tf *tfp)
{
int rc;
int i;
- struct tf_rm_free_db_parms fparms = { 0 };
+ struct tf_rm_free_db_parms fparms;
+ struct tf_shadow_tcam_free_db_parms fshadow;
TF_CHECK_PARMS1(tfp);
@@ -104,6 +174,7 @@ tf_tcam_unbind(struct tf *tfp)
}
for (i = 0; i < TF_DIR_MAX; i++) {
+ memset(&fparms, 0, sizeof(fparms));
fparms.dir = i;
fparms.rm_db = tcam_db[i];
rc = tf_rm_free_db(tfp, &fparms);
@@ -111,8 +182,17 @@ tf_tcam_unbind(struct tf *tfp)
return rc;
tcam_db[i] = NULL;
+
+ if (shadow_init) {
+ memset(&fshadow, 0, sizeof(fshadow));
+
+ fshadow.shadow_db = shadow_tcam_db[i];
+ tf_shadow_tcam_free_db(&fshadow);
+ shadow_tcam_db[i] = NULL;
+ }
}
+ shadow_init = 0;
init = 0;
return 0;
@@ -125,7 +205,7 @@ tf_tcam_alloc(struct tf *tfp,
int rc;
struct tf_session *tfs;
struct tf_dev_info *dev;
- struct tf_rm_allocate_parms aparms = { 0 };
+ struct tf_rm_allocate_parms aparms;
uint16_t num_slice_per_row = 1;
TF_CHECK_PARMS2(tfp, parms);
@@ -165,6 +245,8 @@ tf_tcam_alloc(struct tf *tfp,
return rc;
/* Allocate requested element */
+ memset(&aparms, 0, sizeof(aparms));
+
aparms.rm_db = tcam_db[parms->dir];
aparms.db_index = parms->type;
aparms.priority = parms->priority;
@@ -202,11 +284,12 @@ tf_tcam_free(struct tf *tfp,
int rc;
struct tf_session *tfs;
struct tf_dev_info *dev;
- struct tf_rm_is_allocated_parms aparms = { 0 };
- struct tf_rm_free_parms fparms = { 0 };
- struct tf_rm_get_hcapi_parms hparms = { 0 };
+ struct tf_rm_is_allocated_parms aparms;
+ struct tf_rm_free_parms fparms;
+ struct tf_rm_get_hcapi_parms hparms;
uint16_t num_slice_per_row = 1;
int allocated = 0;
+ struct tf_shadow_tcam_remove_parms shparms;
TF_CHECK_PARMS2(tfp, parms);
@@ -245,6 +328,8 @@ tf_tcam_free(struct tf *tfp,
return rc;
/* Check if element is in use */
+ memset(&aparms, 0, sizeof(aparms));
+
aparms.rm_db = tcam_db[parms->dir];
aparms.db_index = parms->type;
aparms.index = parms->idx / num_slice_per_row;
@@ -262,7 +347,37 @@ tf_tcam_free(struct tf *tfp,
return -EINVAL;
}
+ /*
+ * The Shadow mgmt, if enabled, determines if the entry needs
+ * to be deleted.
+ */
+ if (shadow_init) {
+ shparms.shadow_db = shadow_tcam_db[parms->dir];
+ shparms.fparms = parms;
+ rc = tf_shadow_tcam_remove(&shparms);
+ if (rc) {
+ /*
+ * Should not get here, log it and let the entry be
+ * deleted.
+ */
+ TFP_DRV_LOG(ERR, "%s: Shadow free fail, "
+ "type:%d index:%d deleting the entry.\n",
+ tf_dir_2_str(parms->dir),
+ parms->type,
+ parms->idx);
+ } else {
+ /*
+ * If the entry still has references, just return the
+ * ref count to the caller. No need to remove entry
+ * from rm or hw
+ */
+ if (parms->ref_cnt >= 1)
+ return rc;
+ }
+ }
+
/* Free requested element */
+ memset(&fparms, 0, sizeof(fparms));
fparms.rm_db = tcam_db[parms->dir];
fparms.db_index = parms->type;
fparms.index = parms->idx / num_slice_per_row;
@@ -291,7 +406,8 @@ tf_tcam_free(struct tf *tfp,
rc = tf_rm_free(&fparms);
if (rc) {
TFP_DRV_LOG(ERR,
- "%s: Free failed, type:%d, index:%d\n",
+ "%s: Free failed, type:%d, "
+ "index:%d\n",
tf_dir_2_str(parms->dir),
parms->type,
fparms.index);
@@ -302,6 +418,8 @@ tf_tcam_free(struct tf *tfp,
}
/* Convert TF type to HCAPI RM type */
+ memset(&hparms, 0, sizeof(hparms));
+
hparms.rm_db = tcam_db[parms->dir];
hparms.db_index = parms->type;
hparms.hcapi_type = &parms->hcapi_type;
@@ -326,9 +444,131 @@ tf_tcam_free(struct tf *tfp,
}
int
-tf_tcam_alloc_search(struct tf *tfp __rte_unused,
- struct tf_tcam_alloc_search_parms *parms __rte_unused)
+tf_tcam_alloc_search(struct tf *tfp,
+ struct tf_tcam_alloc_search_parms *parms)
{
+ struct tf_shadow_tcam_search_parms sparms;
+ struct tf_shadow_tcam_bind_index_parms bparms;
+ struct tf_tcam_alloc_parms aparms;
+ struct tf_tcam_free_parms fparms;
+ uint16_t num_slice_per_row = 1;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ int rc;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ if (!init) {
+ TFP_DRV_LOG(ERR,
+ "%s: No TCAM DBs created\n",
+ tf_dir_2_str(parms->dir));
+ return -EINVAL;
+ }
+
+ if (!shadow_init || !shadow_tcam_db[parms->dir]) {
+ TFP_DRV_LOG(ERR, "%s: TCAM Shadow not initialized for %s\n",
+ tf_dir_2_str(parms->dir),
+ tf_tcam_tbl_2_str(parms->type));
+ return -EINVAL;
+ }
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session_internal(tfp, &tfs);
+ if (rc)
+ return rc;
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc)
+ return rc;
+
+ if (dev->ops->tf_dev_get_tcam_slice_info == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Need to retrieve row size etc */
+ rc = dev->ops->tf_dev_get_tcam_slice_info(tfp,
+ parms->type,
+ parms->key_size,
+ &num_slice_per_row);
+ if (rc)
+ return rc;
+
+ /*
+ * Prep the shadow search, reusing the parms from original search
+ * instead of copying them. Shadow will update output in there.
+ */
+ memset(&sparms, 0, sizeof(sparms));
+ sparms.sparms = parms;
+ sparms.shadow_db = shadow_tcam_db[parms->dir];
+
+ rc = tf_shadow_tcam_search(&sparms);
+ if (rc)
+ return rc;
+
+ /*
+ * The app didn't request us to alloc the entry, so return now.
+ * The hit should have been updated in the original search parm.
+ */
+ if (!parms->alloc || parms->search_status != MISS)
+ return rc;
+
+ /* Caller desires an allocate on miss */
+ if (dev->ops->tf_dev_alloc_tcam == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+ memset(&aparms, 0, sizeof(aparms));
+ aparms.dir = parms->dir;
+ aparms.type = parms->type;
+ aparms.key_size = parms->key_size;
+ aparms.priority = parms->priority;
+ rc = dev->ops->tf_dev_alloc_tcam(tfp, &aparms);
+ if (rc)
+ return rc;
+
+ /* Successful allocation, attempt to add it to the shadow */
+ memset(&bparms, 0, sizeof(bparms));
+ bparms.dir = parms->dir;
+ bparms.shadow_db = shadow_tcam_db[parms->dir];
+ bparms.type = parms->type;
+ bparms.key = parms->key;
+ bparms.mask = parms->mask;
+ bparms.key_size = parms->key_size;
+ bparms.idx = aparms.idx;
+ bparms.hb_handle = sparms.hb_handle;
+ rc = tf_shadow_tcam_bind_index(&bparms);
+ if (rc) {
+ /* Error binding entry, need to free the allocated idx */
+ if (dev->ops->tf_dev_free_tcam == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ fparms.dir = parms->dir;
+ fparms.type = parms->type;
+ fparms.idx = aparms.idx;
+ rc = dev->ops->tf_dev_free_tcam(tfp, &fparms);
+ if (rc)
+ return rc;
+ }
+
+ /* Add the allocated index to output and done */
+ parms->idx = aparms.idx;
+
return 0;
}
@@ -339,8 +579,9 @@ tf_tcam_set(struct tf *tfp __rte_unused,
int rc;
struct tf_session *tfs;
struct tf_dev_info *dev;
- struct tf_rm_is_allocated_parms aparms = { 0 };
- struct tf_rm_get_hcapi_parms hparms = { 0 };
+ struct tf_rm_is_allocated_parms aparms;
+ struct tf_rm_get_hcapi_parms hparms;
+ struct tf_shadow_tcam_insert_parms iparms;
uint16_t num_slice_per_row = 1;
int allocated = 0;
@@ -381,6 +622,8 @@ tf_tcam_set(struct tf *tfp __rte_unused,
return rc;
/* Check if element is in use */
+ memset(&aparms, 0, sizeof(aparms));
+
aparms.rm_db = tcam_db[parms->dir];
aparms.db_index = parms->type;
aparms.index = parms->idx / num_slice_per_row;
@@ -399,6 +642,8 @@ tf_tcam_set(struct tf *tfp __rte_unused,
}
/* Convert TF type to HCAPI RM type */
+ memset(&hparms, 0, sizeof(hparms));
+
hparms.rm_db = tcam_db[parms->dir];
hparms.db_index = parms->type;
hparms.hcapi_type = &parms->hcapi_type;
@@ -419,6 +664,23 @@ tf_tcam_set(struct tf *tfp __rte_unused,
return rc;
}
+ /* Successfully added to hw, now for shadow if enabled. */
+ if (!shadow_init || !shadow_tcam_db[parms->dir])
+ return 0;
+
+ iparms.shadow_db = shadow_tcam_db[parms->dir];
+ iparms.sparms = parms;
+ rc = tf_shadow_tcam_insert(&iparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: %s: Entry %d set failed, rc:%s",
+ tf_dir_2_str(parms->dir),
+ tf_tcam_tbl_2_str(parms->type),
+ parms->idx,
+ strerror(-rc));
+ return rc;
+ }
+
return 0;
}
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.h b/drivers/net/bnxt/tf_core/tf_tcam.h
index ee5bacc09..563b08c23 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_tcam.h
@@ -104,19 +104,19 @@ struct tf_tcam_alloc_search_parms {
*/
enum tf_tcam_tbl_type type;
/**
- * [in] Enable search for matching entry
+ * [in] Type of HCAPI
*/
- uint8_t search_enable;
+ uint16_t hcapi_type;
/**
- * [in] Key data to match on (if search)
+ * [in] Key data to match on
*/
uint8_t *key;
/**
- * [in] key size (if search)
+ * [in] key size in bits
*/
uint16_t key_size;
/**
- * [in] Mask data to match on (if search)
+ * [in] Mask data to match on
*/
uint8_t *mask;
/**
@@ -124,16 +124,31 @@ struct tf_tcam_alloc_search_parms {
*/
uint32_t priority;
/**
- * [out] If search, set if matching entry found
+ * [in] Allocate on miss.
+ */
+ uint8_t alloc;
+ /**
+ * [out] Set if matching entry found
*/
uint8_t hit;
+ /**
+ * [out] Search result status (hit, miss, reject)
+ */
+ enum tf_tcam_search_status search_status;
/**
* [out] Current refcnt after allocation
*/
uint16_t ref_cnt;
/**
- * [out] Idx allocated
- *
+ * [in,out] The result data from the search is copied here
+ */
+ uint8_t *result;
+ /**
+ * [in,out] result size in bits for the result data
+ */
+ uint16_t result_size;
+ /**
+ * [out] Index found
*/
uint16_t idx;
};
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* Re: [dpdk-dev] [PATCH v3 01/22] net/bnxt: add shadow and search capability to tcam
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 01/22] net/bnxt: add shadow and search capability to tcam Ajit Khaparde
@ 2020-07-24 18:04 ` Stephen Hemminger
0 siblings, 0 replies; 102+ messages in thread
From: Stephen Hemminger @ 2020-07-24 18:04 UTC (permalink / raw)
To: Ajit Khaparde; +Cc: dev, ferruh.yigit, Mike Baucom, Randy Schacher
On Thu, 23 Jul 2020 22:32:14 -0700
Ajit Khaparde <ajit.khaparde@broadcom.com> wrote:
> +int
> +tf_search_tcam_entry(struct tf *tfp,
> + struct tf_search_tcam_entry_parms *parms)
> +{
> + int rc;
> + struct tf_session *tfs;
> + struct tf_dev_info *dev;
> + struct tf_tcam_alloc_search_parms sparms;
> +
> + TF_CHECK_PARMS2(tfp, parms);
> +
> + memset(&sparms, 0, sizeof(struct tf_tcam_alloc_search_parms));
FYI simpler way to initialize stack variable is:
struct tf_tcam_alloc_search_parms sparms = { };
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 02/22] net/bnxt: add access to nat global register
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 01/22] net/bnxt: add shadow and search capability to tcam Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 03/22] net/bnxt: configure parif for offload miss rules Ajit Khaparde
` (20 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Kishore Padmanabha, Michael Baucom
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Add support to enable or disable the NAT global registers.
The NAT feature is enabled in hardware during initialization
and disabled at deinitialization of the application.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 83 ++++++++++++++++++++++++++++++
drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 4 ++
2 files changed, 87 insertions(+)
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 0869231a0..7c65a4b1b 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -596,6 +596,52 @@ ulp_session_deinit(struct bnxt_ulp_session_state *session)
}
}
+/*
+ * Internal api to enable NAT feature.
+ * Set set_flag to 1 to set the value or zero to reset the value.
+ * returns 0 on success.
+ */
+static int32_t
+bnxt_ulp_global_cfg_update(struct bnxt *bp,
+ enum tf_dir dir,
+ enum tf_global_config_type type,
+ uint32_t offset,
+ uint32_t value,
+ uint32_t set_flag)
+{
+ uint32_t global_cfg = 0;
+ int rc;
+ struct tf_global_cfg_parms parms;
+
+ /* Initialize the params */
+ parms.dir = dir,
+ parms.type = type,
+ parms.offset = offset,
+ parms.config = (uint8_t *)&global_cfg,
+ parms.config_sz_in_bytes = sizeof(global_cfg);
+
+ rc = tf_get_global_cfg(&bp->tfp, &parms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to get global cfg 0x%x rc:%d\n",
+ type, rc);
+ return rc;
+ }
+
+ if (set_flag)
+ global_cfg |= value;
+ else
+ global_cfg &= ~value;
+
+ /* SET the register RE_CFA_REG_ACT_TECT */
+ rc = tf_set_global_cfg(&bp->tfp, &parms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set global cfg 0x%x rc:%d\n",
+ type, rc);
+ return rc;
+ }
+ return rc;
+}
+
/*
* When a port is initialized by dpdk. This functions is called
* and this function initializes the ULP context and rest of the
@@ -732,6 +778,29 @@ bnxt_ulp_init(struct bnxt *bp)
goto jump_to_error;
}
+ /*
+ * Enable NAT feature. Set the global configuration register
+ * Tunnel encap to enable NAT with the reuse of existing inner
+ * L2 header smac and dmac
+ */
+ rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set rx global configuration\n");
+ goto jump_to_error;
+ }
+
+ rc = bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC), 1);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set tx global configuration\n");
+ goto jump_to_error;
+ }
+
return rc;
jump_to_error:
@@ -785,6 +854,19 @@ bnxt_ulp_deinit(struct bnxt *bp)
/* Delete the Port database */
ulp_port_db_deinit(bp->ulp_ctx);
+ /* Disable NAT feature */
+ (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC),
+ 0);
+
+ (void)bnxt_ulp_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP,
+ TF_TUNNEL_ENCAP_NAT,
+ (BNXT_ULP_NAT_INNER_L2_HEADER_SMAC |
+ BNXT_ULP_NAT_INNER_L2_HEADER_DMAC),
+ 0);
+
/* Delete the ulp context and tf session */
ulp_ctx_detach(bp, session);
@@ -942,6 +1024,7 @@ bnxt_ulp_eth_dev_ptr2_cntxt_get(struct rte_eth_dev *dev)
if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
struct bnxt_vf_representor *vfr = dev->data->dev_private;
+
bp = vfr->parent_dev->data->dev_private;
}
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index f9e5e2ba6..7c95ead55 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -14,6 +14,10 @@
#include "ulp_template_db_enum.h"
+/* NAT defines to reuse existing inner L2 SMAC and DMAC */
+#define BNXT_ULP_NAT_INNER_L2_HEADER_SMAC 0x2000
+#define BNXT_ULP_NAT_INNER_L2_HEADER_DMAC 0x100
+
/* defines for the ulp_flags */
#define BNXT_ULP_VF_REP_ENABLED 0x1
#define ULP_VF_REP_IS_ENABLED(flag) ((flag) & BNXT_ULP_VF_REP_ENABLED)
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 03/22] net/bnxt: configure parif for offload miss rules
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 01/22] net/bnxt: add shadow and search capability to tcam Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 02/22] net/bnxt: add access to nat global register Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 04/22] net/bnxt: modify ulp mapper to use tcam search Ajit Khaparde
` (19 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Kishore Padmanabha, Michael Baucom
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
PARIF is handler to a partition of the physical port.
For the offload miss rules, the parif miss path needs to be
considered. The higher parif are reserved for handling this.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_def_rules.c | 4 +--
drivers/net/bnxt/tf_ulp/ulp_port_db.h | 1 +
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 40 ++++++++++++++++++++++++
3 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
index 4d4f7c4ea..d86e4c9ae 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
@@ -12,8 +12,6 @@
#include "ulp_flow_db.h"
#include "ulp_mapper.h"
-#define BNXT_ULP_FREE_PARIF_BASE 11
-
struct bnxt_ulp_def_param_handler {
int32_t (*vfr_func)(struct bnxt_ulp_context *ulp_ctx,
struct ulp_tlv_param *param,
@@ -85,6 +83,8 @@ ulp_set_parif_in_comp_fld(struct bnxt_ulp_context *ulp_ctx,
if (parif_type == BNXT_ULP_PHY_PORT_PARIF) {
idx = BNXT_ULP_CF_IDX_PHY_PORT_PARIF;
+ /* Parif needs to be reset to a free partition */
+ parif += BNXT_ULP_FREE_PARIF_BASE;
} else if (parif_type == BNXT_ULP_DRV_FUNC_PARIF) {
idx = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF;
/* Parif needs to be reset to a free partition */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.h b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
index 393d01b7c..2b323d168 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.h
@@ -10,6 +10,7 @@
#define BNXT_PORT_DB_MAX_INTF_LIST 256
#define BNXT_PORT_DB_MAX_FUNC 2048
+#define BNXT_ULP_FREE_PARIF_BASE 11
enum bnxt_ulp_svif_type {
BNXT_ULP_DRV_FUNC_SVIF = 0,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 3891bcdc1..39f801b2f 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -158,6 +158,43 @@ bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
return BNXT_TF_RC_SUCCESS;
}
+/*
+ * Function to handle the post processing of the computed
+ * fields for the interface.
+ */
+static void
+bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
+{
+ uint32_t ifindex;
+ uint16_t port_id, parif;
+ enum bnxt_ulp_direction_type dir;
+
+ /* get the direction details */
+ dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
+
+ if (dir == BNXT_ULP_DIR_INGRESS) {
+ /* read the port id details */
+ port_id = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_INCOMING_IF);
+ if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
+ port_id,
+ &ifindex)) {
+ BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
+ return;
+ }
+ /* Set port PARIF */
+ if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_PHY_PORT_PARIF, &parif)) {
+ BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ /* Parif needs to be reset to a free partition */
+ parif += BNXT_ULP_FREE_PARIF_BASE;
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
+ parif);
+ }
+}
+
/*
* Function to handle the post processing of the parsing details
*/
@@ -213,6 +250,9 @@ bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
/* Merge the hdr_fp_bit into the proto header bit */
params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
+ /* Update the computed interface parameters */
+ bnxt_ulp_comp_fld_intf_update(params);
+
/* TBD: Handle the flow rejection scenarios */
return 0;
}
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 04/22] net/bnxt: modify ulp mapper to use tcam search
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (2 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 03/22] net/bnxt: configure parif for offload miss rules Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 05/22] net/bnxt: add tf hash API Ajit Khaparde
` (18 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Mike Baucom, Kishore Padmanabha
From: Mike Baucom <michael.baucom@broadcom.com>
modified ulp mappper to use the new tf_search_tcam_entry API.
When search before allocation is requested, mapper calls
tc_search_tcam_entry with the alloc flag.
- On HIT, the result and tcam index is returned.
- On MISS, the tcam index is returned but the result is
created and the tcam entry is set.
- On REJECT, the flow request is rejected.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 106 ++++++++++++++++++---------
1 file changed, 71 insertions(+), 35 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 6a727ed34..2d3373df2 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -690,7 +690,7 @@ ulp_mapper_ident_extract(struct bnxt_ulp_mapper_parms *parms,
{
struct ulp_flow_db_res_params fid_parms;
uint64_t id = 0;
- uint32_t idx;
+ uint32_t idx = 0;
struct tf_search_identifier_parms sparms = { 0 };
struct tf_free_identifier_parms free_parms = { 0 };
struct tf *tfp;
@@ -1292,12 +1292,13 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct tf *tfp;
int32_t rc, trc;
struct tf_alloc_tcam_entry_parms aparms = { 0 };
+ struct tf_search_tcam_entry_parms searchparms = { 0 };
struct tf_set_tcam_entry_parms sparms = { 0 };
struct ulp_flow_db_res_params fid_parms = { 0 };
struct tf_free_tcam_entry_parms free_parms = { 0 };
uint32_t hit = 0;
uint16_t tmplen = 0;
- struct ulp_blob res_blob;
+ uint16_t idx;
/* Skip this if was handled by the cache. */
if (parms->tcam_tbl_opc == BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_SKIP) {
@@ -1352,37 +1353,72 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
}
- aparms.dir = tbl->direction;
- aparms.tcam_tbl_type = tbl->resource_type;
- aparms.search_enable = tbl->srch_b4_alloc;
- aparms.key_sz_in_bits = tbl->key_bit_size;
- aparms.key = ulp_blob_data_get(&key, &tmplen);
- if (tbl->key_bit_size != tmplen) {
- BNXT_TF_DBG(ERR, "Key len (%d) != Expected (%d)\n",
- tmplen, tbl->key_bit_size);
- return -EINVAL;
- }
+ if (!tbl->srch_b4_alloc) {
+ /*
+ * No search for re-use is requested, so simply allocate the
+ * tcam index.
+ */
+ aparms.dir = tbl->direction;
+ aparms.tcam_tbl_type = tbl->resource_type;
+ aparms.search_enable = tbl->srch_b4_alloc;
+ aparms.key_sz_in_bits = tbl->key_bit_size;
+ aparms.key = ulp_blob_data_get(&key, &tmplen);
+ if (tbl->key_bit_size != tmplen) {
+ BNXT_TF_DBG(ERR, "Key len (%d) != Expected (%d)\n",
+ tmplen, tbl->key_bit_size);
+ return -EINVAL;
+ }
- aparms.mask = ulp_blob_data_get(&mask, &tmplen);
- if (tbl->key_bit_size != tmplen) {
- BNXT_TF_DBG(ERR, "Mask len (%d) != Expected (%d)\n",
- tmplen, tbl->key_bit_size);
- return -EINVAL;
- }
+ aparms.mask = ulp_blob_data_get(&mask, &tmplen);
+ if (tbl->key_bit_size != tmplen) {
+ BNXT_TF_DBG(ERR, "Mask len (%d) != Expected (%d)\n",
+ tmplen, tbl->key_bit_size);
+ return -EINVAL;
+ }
- aparms.priority = tbl->priority;
+ aparms.priority = tbl->priority;
- /*
- * All failures after this succeeds require the entry to be freed.
- * cannot return directly on failure, but needs to goto error
- */
- rc = tf_alloc_tcam_entry(tfp, &aparms);
- if (rc) {
- BNXT_TF_DBG(ERR, "tcam alloc failed rc=%d.\n", rc);
- return rc;
- }
+ /*
+ * All failures after this succeeds require the entry to be
+ * freed. cannot return directly on failure, but needs to goto
+ * error.
+ */
+ rc = tf_alloc_tcam_entry(tfp, &aparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "tcam alloc failed rc=%d.\n", rc);
+ return rc;
+ }
+ idx = aparms.idx;
+ hit = aparms.hit;
+ } else {
+ /*
+ * Searching before allocation to see if we already have an
+ * entry. This allows re-use of a constrained resource.
+ */
+ searchparms.dir = tbl->direction;
+ searchparms.tcam_tbl_type = tbl->resource_type;
+ searchparms.key = ulp_blob_data_get(&key, &tmplen);
+ searchparms.key_sz_in_bits = tbl->key_bit_size;
+ searchparms.mask = ulp_blob_data_get(&mask, &tmplen);
+ searchparms.priority = tbl->priority;
+ searchparms.alloc = 1;
+ searchparms.result = ulp_blob_data_get(&data, &tmplen);
+ searchparms.result_sz_in_bits = tbl->result_bit_size;
+
+ rc = tf_search_tcam_entry(tfp, &searchparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "tcam search failed rc=%d\n", rc);
+ return rc;
+ }
- hit = aparms.hit;
+ /* Successful search, check the result */
+ if (searchparms.search_status == REJECT) {
+ BNXT_TF_DBG(ERR, "tcam alloc rejected\n");
+ return -ENOMEM;
+ }
+ idx = searchparms.idx;
+ hit = searchparms.hit;
+ }
/* Build the result */
if (!tbl->srch_b4_alloc || !hit) {
@@ -1430,9 +1466,9 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
}
- sparms.dir = aparms.dir;
- sparms.tcam_tbl_type = aparms.tcam_tbl_type;
- sparms.idx = aparms.idx;
+ sparms.dir = tbl->direction;
+ sparms.tcam_tbl_type = tbl->resource_type;
+ sparms.idx = idx;
/* Already verified the key/mask lengths */
sparms.key = ulp_blob_data_get(&key, &tmplen);
sparms.mask = ulp_blob_data_get(&mask, &tmplen);
@@ -1464,7 +1500,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
rc = -EINVAL;
goto error;
}
- parms->cache_ptr->tcam_idx = aparms.idx;
+ parms->cache_ptr->tcam_idx = idx;
}
/* Mark action */
@@ -1483,7 +1519,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
for (i = 0; i < num_idents; i++) {
rc = ulp_mapper_ident_extract(parms, tbl,
- &idents[i], &res_blob);
+ &idents[i], &data);
if (rc) {
BNXT_TF_DBG(ERR,
"Error in ident extraction\n");
@@ -1501,7 +1537,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
fid_parms.resource_func = tbl->resource_func;
fid_parms.resource_type = tbl->resource_type;
fid_parms.critical_resource = tbl->critical_resource;
- fid_parms.resource_hndl = aparms.idx;
+ fid_parms.resource_hndl = idx;
rc = ulp_flow_db_resource_add(parms->ulp_ctx,
parms->tbl_idx,
parms->fid,
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 05/22] net/bnxt: add tf hash API
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (3 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 04/22] net/bnxt: modify ulp mapper to use tcam search Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-27 10:32 ` Ferruh Yigit
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 06/22] net/bnxt: skip mark id injection into mbuf Ajit Khaparde
` (17 subsequent siblings)
22 siblings, 1 reply; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Mike Baucom, Farah Smith, Kishore Padmanabha
From: Mike Baucom <michael.baucom@broadcom.com>
Added tf_hash API for common hash uses across tf_core functions
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Farah Smith <farah.smith@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/meson.build | 1 +
drivers/net/bnxt/tf_core/Makefile | 1 +
drivers/net/bnxt/tf_core/tf_hash.c | 106 +++++++++++++++++++++++++++++
drivers/net/bnxt/tf_core/tf_hash.h | 27 ++++++++
4 files changed, 135 insertions(+)
create mode 100644 drivers/net/bnxt/tf_core/tf_hash.c
create mode 100644 drivers/net/bnxt/tf_core/tf_hash.h
diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build
index 8529b333c..190469e29 100644
--- a/drivers/net/bnxt/meson.build
+++ b/drivers/net/bnxt/meson.build
@@ -47,6 +47,7 @@ sources = files('bnxt_cpr.c',
'tf_core/tf_global_cfg.c',
'tf_core/tf_em_host.c',
'tf_core/tf_shadow_identifier.c',
+ 'tf_core/tf_hash.c',
'hcapi/hcapi_cfa_p4.c',
diff --git a/drivers/net/bnxt/tf_core/Makefile b/drivers/net/bnxt/tf_core/Makefile
index cca0e2e85..cf6aaec6c 100644
--- a/drivers/net/bnxt/tf_core/Makefile
+++ b/drivers/net/bnxt/tf_core/Makefile
@@ -32,3 +32,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_util.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_if_tbl.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_global_cfg.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_shadow_identifier.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += tf_core/tf_hash.c
diff --git a/drivers/net/bnxt/tf_core/tf_hash.c b/drivers/net/bnxt/tf_core/tf_hash.c
new file mode 100644
index 000000000..68476cbc1
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_hash.c
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#include "tf_hash.h"
+
+/* CRC polynomial 0xedb88320 */
+static const uint32_t tf_hash_crc32tbl[] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+/**
+ * Calculate a crc32 on the buffer with an initial value and len
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32i(uint32_t init, uint8_t *buf, uint32_t len)
+{
+ uint32_t crc = init;
+
+ while (len--)
+ crc = tf_hash_crc32tbl[(crc ^ buf[len]) & 0xff] ^
+ (crc >> 8);
+
+ return crc;
+}
+
+/**
+ * Calculate a crc32 on the buffer with a default initial value
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32(uint8_t *buf, uint32_t len)
+{
+ uint32_t crc = ~0U;
+
+ crc = tf_hash_calc_crc32i(crc, buf, len);
+
+ return ~crc;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_hash.h b/drivers/net/bnxt/tf_core/tf_hash.h
new file mode 100644
index 000000000..6b60afff1
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/tf_hash.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _TF_HASH_H_
+#define _TF_HASH_H_
+
+#include "tf_core.h"
+
+/**
+ * Calculate a crc32 on the buffer with an initial value and len
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32i(uint32_t init, uint8_t *buf, uint32_t len);
+
+/**
+ * Calculate a crc32 on the buffer with a default initial value
+ *
+ * Returns the crc32
+ */
+uint32_t
+tf_hash_calc_crc32(uint8_t *buf, uint32_t len);
+
+#endif
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* Re: [dpdk-dev] [PATCH v3 05/22] net/bnxt: add tf hash API
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 05/22] net/bnxt: add tf hash API Ajit Khaparde
@ 2020-07-27 10:32 ` Ferruh Yigit
0 siblings, 0 replies; 102+ messages in thread
From: Ferruh Yigit @ 2020-07-27 10:32 UTC (permalink / raw)
To: Ajit Khaparde, dev; +Cc: Mike Baucom, Farah Smith, Kishore Padmanabha
On 7/24/2020 6:32 AM, Ajit Khaparde wrote:
> From: Mike Baucom <michael.baucom@broadcom.com>
>
> Added tf_hash API for common hash uses across tf_core functions
>
> Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
> Reviewed-by: Farah Smith <farah.smith@broadcom.com>
> Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
'tf' is TruFlow, righ? I think 'tf' is hard to understand, can you please prefer
TruFlow instead?
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 06/22] net/bnxt: skip mark id injection into mbuf
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (4 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 05/22] net/bnxt: add tf hash API Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 07/22] net/bnxt: update nat template Ajit Khaparde
` (16 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Venkat Duvvuru, Sriharsha Basavapatna, Somnath Kotur
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
When a packet is looped back from VF to VFR, it is marked to identify
the VFR interface. However, this mark_id shouldn't be percolated up to
the OVS as it is internal to pmd.
This patch fixes it by skipping mark injection into mbuf if the packet
is received on VFR interface.
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
drivers/net/bnxt/bnxt_rxr.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index baf73cb25..43b1256dc 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -485,6 +485,9 @@ bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1,
rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid,
cfa_code, vfr_flag, &mark_id);
if (!rc) {
+ /* VF to VFR Rx path. So, skip mark_id injection in mbuf */
+ if (vfr_flag && *vfr_flag)
+ return mark_id;
/* Got the mark, write it to the mbuf and return */
mbuf->hash.fdir.hi = mark_id;
mbuf->udata64 = (cfa_code & 0xffffffffull) << 32;
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 07/22] net/bnxt: update nat template
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (5 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 06/22] net/bnxt: skip mark id injection into mbuf Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 08/22] net/bnxt: configure parif for the egress rules Ajit Khaparde
` (15 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Kishore Padmanabha, Michael Baucom
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
The template is updated to support additional combinations
of NAT actions.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_template_db_act.c | 412 +++++++++++-------
.../net/bnxt/tf_ulp/ulp_template_db_class.c | 16 +-
.../net/bnxt/tf_ulp/ulp_template_db_enum.h | 26 +-
drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c | 4 +-
4 files changed, 285 insertions(+), 173 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
index 0f19e8ed1..31fe90577 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
@@ -12,80 +12,88 @@ uint16_t ulp_act_sig_tbl[BNXT_ULP_ACT_SIG_TBL_MAX_SZ] = {
[BNXT_ULP_ACT_HID_015a] = 1,
[BNXT_ULP_ACT_HID_00eb] = 2,
[BNXT_ULP_ACT_HID_0043] = 3,
- [BNXT_ULP_ACT_HID_01d6] = 4,
- [BNXT_ULP_ACT_HID_015e] = 5,
- [BNXT_ULP_ACT_HID_00ef] = 6,
- [BNXT_ULP_ACT_HID_0047] = 7,
- [BNXT_ULP_ACT_HID_01da] = 8,
- [BNXT_ULP_ACT_HID_025b] = 9,
- [BNXT_ULP_ACT_HID_01ec] = 10,
- [BNXT_ULP_ACT_HID_0144] = 11,
- [BNXT_ULP_ACT_HID_02d7] = 12,
- [BNXT_ULP_ACT_HID_025f] = 13,
- [BNXT_ULP_ACT_HID_01f0] = 14,
- [BNXT_ULP_ACT_HID_0148] = 15,
- [BNXT_ULP_ACT_HID_02db] = 16,
- [BNXT_ULP_ACT_HID_0000] = 17,
- [BNXT_ULP_ACT_HID_0002] = 18,
- [BNXT_ULP_ACT_HID_0800] = 19,
- [BNXT_ULP_ACT_HID_0101] = 20,
- [BNXT_ULP_ACT_HID_0020] = 21,
- [BNXT_ULP_ACT_HID_0901] = 22,
- [BNXT_ULP_ACT_HID_0121] = 23,
- [BNXT_ULP_ACT_HID_0004] = 24,
- [BNXT_ULP_ACT_HID_0804] = 25,
- [BNXT_ULP_ACT_HID_0105] = 26,
- [BNXT_ULP_ACT_HID_0024] = 27,
- [BNXT_ULP_ACT_HID_0905] = 28,
- [BNXT_ULP_ACT_HID_0125] = 29,
- [BNXT_ULP_ACT_HID_0001] = 30,
- [BNXT_ULP_ACT_HID_0005] = 31,
- [BNXT_ULP_ACT_HID_0009] = 32,
- [BNXT_ULP_ACT_HID_000d] = 33,
- [BNXT_ULP_ACT_HID_0021] = 34,
- [BNXT_ULP_ACT_HID_0029] = 35,
- [BNXT_ULP_ACT_HID_0025] = 36,
- [BNXT_ULP_ACT_HID_002d] = 37,
- [BNXT_ULP_ACT_HID_0801] = 38,
- [BNXT_ULP_ACT_HID_0809] = 39,
- [BNXT_ULP_ACT_HID_0805] = 40,
- [BNXT_ULP_ACT_HID_080d] = 41,
- [BNXT_ULP_ACT_HID_0c15] = 42,
- [BNXT_ULP_ACT_HID_0c19] = 43,
- [BNXT_ULP_ACT_HID_02f6] = 44,
- [BNXT_ULP_ACT_HID_04f8] = 45,
- [BNXT_ULP_ACT_HID_01df] = 46,
- [BNXT_ULP_ACT_HID_05e3] = 47,
- [BNXT_ULP_ACT_HID_02fa] = 48,
- [BNXT_ULP_ACT_HID_04fc] = 49,
- [BNXT_ULP_ACT_HID_01e3] = 50,
- [BNXT_ULP_ACT_HID_05e7] = 51,
- [BNXT_ULP_ACT_HID_03f7] = 52,
- [BNXT_ULP_ACT_HID_05f9] = 53,
- [BNXT_ULP_ACT_HID_02e0] = 54,
- [BNXT_ULP_ACT_HID_06e4] = 55,
- [BNXT_ULP_ACT_HID_03fb] = 56,
- [BNXT_ULP_ACT_HID_05fd] = 57,
- [BNXT_ULP_ACT_HID_02e4] = 58,
- [BNXT_ULP_ACT_HID_06e8] = 59,
- [BNXT_ULP_ACT_HID_040d] = 60,
- [BNXT_ULP_ACT_HID_040f] = 61,
- [BNXT_ULP_ACT_HID_0413] = 62,
- [BNXT_ULP_ACT_HID_0c0d] = 63,
- [BNXT_ULP_ACT_HID_0567] = 64,
- [BNXT_ULP_ACT_HID_0a49] = 65,
- [BNXT_ULP_ACT_HID_050e] = 66,
- [BNXT_ULP_ACT_HID_0d0e] = 67,
- [BNXT_ULP_ACT_HID_0668] = 68,
- [BNXT_ULP_ACT_HID_0b4a] = 69,
- [BNXT_ULP_ACT_HID_0411] = 70,
- [BNXT_ULP_ACT_HID_056b] = 71,
- [BNXT_ULP_ACT_HID_0a4d] = 72,
- [BNXT_ULP_ACT_HID_0c11] = 73,
- [BNXT_ULP_ACT_HID_0512] = 74,
- [BNXT_ULP_ACT_HID_0d12] = 75,
- [BNXT_ULP_ACT_HID_066c] = 76,
- [BNXT_ULP_ACT_HID_0b4e] = 77
+ [BNXT_ULP_ACT_HID_03d8] = 4,
+ [BNXT_ULP_ACT_HID_02c1] = 5,
+ [BNXT_ULP_ACT_HID_015e] = 6,
+ [BNXT_ULP_ACT_HID_00ef] = 7,
+ [BNXT_ULP_ACT_HID_0047] = 8,
+ [BNXT_ULP_ACT_HID_03dc] = 9,
+ [BNXT_ULP_ACT_HID_02c5] = 10,
+ [BNXT_ULP_ACT_HID_025b] = 11,
+ [BNXT_ULP_ACT_HID_01ec] = 12,
+ [BNXT_ULP_ACT_HID_0144] = 13,
+ [BNXT_ULP_ACT_HID_04d9] = 14,
+ [BNXT_ULP_ACT_HID_03c2] = 15,
+ [BNXT_ULP_ACT_HID_025f] = 16,
+ [BNXT_ULP_ACT_HID_01f0] = 17,
+ [BNXT_ULP_ACT_HID_0148] = 18,
+ [BNXT_ULP_ACT_HID_04dd] = 19,
+ [BNXT_ULP_ACT_HID_03c6] = 20,
+ [BNXT_ULP_ACT_HID_0000] = 21,
+ [BNXT_ULP_ACT_HID_0002] = 22,
+ [BNXT_ULP_ACT_HID_0800] = 23,
+ [BNXT_ULP_ACT_HID_0101] = 24,
+ [BNXT_ULP_ACT_HID_0020] = 25,
+ [BNXT_ULP_ACT_HID_0901] = 26,
+ [BNXT_ULP_ACT_HID_0121] = 27,
+ [BNXT_ULP_ACT_HID_0004] = 28,
+ [BNXT_ULP_ACT_HID_0804] = 29,
+ [BNXT_ULP_ACT_HID_0105] = 30,
+ [BNXT_ULP_ACT_HID_0024] = 31,
+ [BNXT_ULP_ACT_HID_0905] = 32,
+ [BNXT_ULP_ACT_HID_0125] = 33,
+ [BNXT_ULP_ACT_HID_0001] = 34,
+ [BNXT_ULP_ACT_HID_0005] = 35,
+ [BNXT_ULP_ACT_HID_0009] = 36,
+ [BNXT_ULP_ACT_HID_000d] = 37,
+ [BNXT_ULP_ACT_HID_0021] = 38,
+ [BNXT_ULP_ACT_HID_0029] = 39,
+ [BNXT_ULP_ACT_HID_0025] = 40,
+ [BNXT_ULP_ACT_HID_002d] = 41,
+ [BNXT_ULP_ACT_HID_0801] = 42,
+ [BNXT_ULP_ACT_HID_0809] = 43,
+ [BNXT_ULP_ACT_HID_0805] = 44,
+ [BNXT_ULP_ACT_HID_080d] = 45,
+ [BNXT_ULP_ACT_HID_0c15] = 46,
+ [BNXT_ULP_ACT_HID_0c19] = 47,
+ [BNXT_ULP_ACT_HID_02f6] = 48,
+ [BNXT_ULP_ACT_HID_04f8] = 49,
+ [BNXT_ULP_ACT_HID_01df] = 50,
+ [BNXT_ULP_ACT_HID_07e5] = 51,
+ [BNXT_ULP_ACT_HID_06ce] = 52,
+ [BNXT_ULP_ACT_HID_02fa] = 53,
+ [BNXT_ULP_ACT_HID_04fc] = 54,
+ [BNXT_ULP_ACT_HID_01e3] = 55,
+ [BNXT_ULP_ACT_HID_07e9] = 56,
+ [BNXT_ULP_ACT_HID_06d2] = 57,
+ [BNXT_ULP_ACT_HID_03f7] = 58,
+ [BNXT_ULP_ACT_HID_05f9] = 59,
+ [BNXT_ULP_ACT_HID_02e0] = 60,
+ [BNXT_ULP_ACT_HID_08e6] = 61,
+ [BNXT_ULP_ACT_HID_07cf] = 62,
+ [BNXT_ULP_ACT_HID_03fb] = 63,
+ [BNXT_ULP_ACT_HID_05fd] = 64,
+ [BNXT_ULP_ACT_HID_02e4] = 65,
+ [BNXT_ULP_ACT_HID_08ea] = 66,
+ [BNXT_ULP_ACT_HID_07d3] = 67,
+ [BNXT_ULP_ACT_HID_040d] = 68,
+ [BNXT_ULP_ACT_HID_040f] = 69,
+ [BNXT_ULP_ACT_HID_0413] = 70,
+ [BNXT_ULP_ACT_HID_0c0d] = 71,
+ [BNXT_ULP_ACT_HID_0567] = 72,
+ [BNXT_ULP_ACT_HID_0a49] = 73,
+ [BNXT_ULP_ACT_HID_050e] = 74,
+ [BNXT_ULP_ACT_HID_0d0e] = 75,
+ [BNXT_ULP_ACT_HID_0668] = 76,
+ [BNXT_ULP_ACT_HID_0b4a] = 77,
+ [BNXT_ULP_ACT_HID_0411] = 78,
+ [BNXT_ULP_ACT_HID_056b] = 79,
+ [BNXT_ULP_ACT_HID_0a4d] = 80,
+ [BNXT_ULP_ACT_HID_0c11] = 81,
+ [BNXT_ULP_ACT_HID_0512] = 82,
+ [BNXT_ULP_ACT_HID_0d12] = 83,
+ [BNXT_ULP_ACT_HID_066c] = 84,
+ [BNXT_ULP_ACT_HID_0b4e] = 85
};
struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
@@ -112,14 +120,25 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
.act_tid = 1
},
[4] = {
- .act_hid = BNXT_ULP_ACT_HID_01d6,
+ .act_hid = BNXT_ULP_ACT_HID_03d8,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
[5] = {
+ .act_hid = BNXT_ULP_ACT_HID_02c1,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [6] = {
.act_hid = BNXT_ULP_ACT_HID_015e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -127,7 +146,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [6] = {
+ [7] = {
.act_hid = BNXT_ULP_ACT_HID_00ef,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -136,7 +155,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [7] = {
+ [8] = {
.act_hid = BNXT_ULP_ACT_HID_0047,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -144,16 +163,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [8] = {
- .act_hid = BNXT_ULP_ACT_HID_01da,
+ [9] = {
+ .act_hid = BNXT_ULP_ACT_HID_03dc,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [9] = {
+ [10] = {
+ .act_hid = BNXT_ULP_ACT_HID_02c5,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [11] = {
.act_hid = BNXT_ULP_ACT_HID_025b,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -161,7 +192,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [10] = {
+ [12] = {
.act_hid = BNXT_ULP_ACT_HID_01ec,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -170,7 +201,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [11] = {
+ [13] = {
.act_hid = BNXT_ULP_ACT_HID_0144,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -178,16 +209,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [12] = {
- .act_hid = BNXT_ULP_ACT_HID_02d7,
+ [14] = {
+ .act_hid = BNXT_ULP_ACT_HID_04d9,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [13] = {
+ [15] = {
+ .act_hid = BNXT_ULP_ACT_HID_03c2,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [16] = {
.act_hid = BNXT_ULP_ACT_HID_025f,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -196,7 +239,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [14] = {
+ [17] = {
.act_hid = BNXT_ULP_ACT_HID_01f0,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -206,7 +249,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [15] = {
+ [18] = {
.act_hid = BNXT_ULP_ACT_HID_0148,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -215,51 +258,64 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [16] = {
- .act_hid = BNXT_ULP_ACT_HID_02db,
+ [19] = {
+ .act_hid = BNXT_ULP_ACT_HID_04dd,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 1
},
- [17] = {
+ [20] = {
+ .act_hid = BNXT_ULP_ACT_HID_03c6,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_ING },
+ .act_tid = 1
+ },
+ [21] = {
.act_hid = BNXT_ULP_ACT_HID_0000,
.act_sig = { .bits =
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [18] = {
+ [22] = {
.act_hid = BNXT_ULP_ACT_HID_0002,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DROP |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [19] = {
+ [23] = {
.act_hid = BNXT_ULP_ACT_HID_0800,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_POP_VLAN |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [20] = {
+ [24] = {
.act_hid = BNXT_ULP_ACT_HID_0101,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [21] = {
+ [25] = {
.act_hid = BNXT_ULP_ACT_HID_0020,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_DECAP |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [22] = {
+ [26] = {
.act_hid = BNXT_ULP_ACT_HID_0901,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -267,7 +323,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [23] = {
+ [27] = {
.act_hid = BNXT_ULP_ACT_HID_0121,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_DECAP |
@@ -275,14 +331,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [24] = {
+ [28] = {
.act_hid = BNXT_ULP_ACT_HID_0004,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [25] = {
+ [29] = {
.act_hid = BNXT_ULP_ACT_HID_0804,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -290,7 +346,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [26] = {
+ [30] = {
.act_hid = BNXT_ULP_ACT_HID_0105,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -298,7 +354,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [27] = {
+ [31] = {
.act_hid = BNXT_ULP_ACT_HID_0024,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -306,7 +362,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [28] = {
+ [32] = {
.act_hid = BNXT_ULP_ACT_HID_0905,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -315,7 +371,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [29] = {
+ [33] = {
.act_hid = BNXT_ULP_ACT_HID_0125,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -324,14 +380,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 2
},
- [30] = {
+ [34] = {
.act_hid = BNXT_ULP_ACT_HID_0001,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [31] = {
+ [35] = {
.act_hid = BNXT_ULP_ACT_HID_0005,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -339,7 +395,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [32] = {
+ [36] = {
.act_hid = BNXT_ULP_ACT_HID_0009,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -347,7 +403,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [33] = {
+ [37] = {
.act_hid = BNXT_ULP_ACT_HID_000d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -356,7 +412,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [34] = {
+ [38] = {
.act_hid = BNXT_ULP_ACT_HID_0021,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -364,7 +420,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [35] = {
+ [39] = {
.act_hid = BNXT_ULP_ACT_HID_0029,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -373,7 +429,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [36] = {
+ [40] = {
.act_hid = BNXT_ULP_ACT_HID_0025,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -382,7 +438,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [37] = {
+ [41] = {
.act_hid = BNXT_ULP_ACT_HID_002d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -392,7 +448,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [38] = {
+ [42] = {
.act_hid = BNXT_ULP_ACT_HID_0801,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -400,7 +456,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [39] = {
+ [43] = {
.act_hid = BNXT_ULP_ACT_HID_0809,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -409,7 +465,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [40] = {
+ [44] = {
.act_hid = BNXT_ULP_ACT_HID_0805,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -418,7 +474,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [41] = {
+ [45] = {
.act_hid = BNXT_ULP_ACT_HID_080d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_MARK |
@@ -428,14 +484,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_ING },
.act_tid = 3
},
- [42] = {
+ [46] = {
.act_hid = BNXT_ULP_ACT_HID_0c15,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_ENCAP |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 4
},
- [43] = {
+ [47] = {
.act_hid = BNXT_ULP_ACT_HID_0c19,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_VXLAN_ENCAP |
@@ -443,14 +499,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 4
},
- [44] = {
+ [48] = {
.act_hid = BNXT_ULP_ACT_HID_02f6,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [45] = {
+ [49] = {
.act_hid = BNXT_ULP_ACT_HID_04f8,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
@@ -458,22 +514,33 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [46] = {
+ [50] = {
.act_hid = BNXT_ULP_ACT_HID_01df,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [47] = {
- .act_hid = BNXT_ULP_ACT_HID_05e3,
+ [51] = {
+ .act_hid = BNXT_ULP_ACT_HID_07e5,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [48] = {
+ [52] = {
+ .act_hid = BNXT_ULP_ACT_HID_06ce,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [53] = {
.act_hid = BNXT_ULP_ACT_HID_02fa,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -481,7 +548,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [49] = {
+ [54] = {
.act_hid = BNXT_ULP_ACT_HID_04fc,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -490,7 +557,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [50] = {
+ [55] = {
.act_hid = BNXT_ULP_ACT_HID_01e3,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -498,16 +565,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [51] = {
- .act_hid = BNXT_ULP_ACT_HID_05e7,
+ [56] = {
+ .act_hid = BNXT_ULP_ACT_HID_07e9,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [52] = {
+ [57] = {
+ .act_hid = BNXT_ULP_ACT_HID_06d2,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [58] = {
.act_hid = BNXT_ULP_ACT_HID_03f7,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -515,7 +594,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [53] = {
+ [59] = {
.act_hid = BNXT_ULP_ACT_HID_05f9,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -524,7 +603,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [54] = {
+ [60] = {
.act_hid = BNXT_ULP_ACT_HID_02e0,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -532,16 +611,28 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [55] = {
- .act_hid = BNXT_ULP_ACT_HID_06e4,
+ [61] = {
+ .act_hid = BNXT_ULP_ACT_HID_08e6,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [56] = {
+ [62] = {
+ .act_hid = BNXT_ULP_ACT_HID_07cf,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [63] = {
.act_hid = BNXT_ULP_ACT_HID_03fb,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -550,7 +641,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [57] = {
+ [64] = {
.act_hid = BNXT_ULP_ACT_HID_05fd,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -560,7 +651,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [58] = {
+ [65] = {
.act_hid = BNXT_ULP_ACT_HID_02e4,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -569,30 +660,43 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [59] = {
- .act_hid = BNXT_ULP_ACT_HID_06e8,
+ [66] = {
+ .act_hid = BNXT_ULP_ACT_HID_08ea,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
BNXT_ULP_ACTION_BIT_SET_TP_DST |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 5
},
- [60] = {
+ [67] = {
+ .act_hid = BNXT_ULP_ACT_HID_07d3,
+ .act_sig = { .bits =
+ BNXT_ULP_ACTION_BIT_DEC_TTL |
+ BNXT_ULP_ACTION_BIT_COUNT |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_SRC |
+ BNXT_ULP_ACTION_BIT_SET_IPV4_DST |
+ BNXT_ULP_ACTION_BIT_SET_TP_SRC |
+ BNXT_ULP_ACTION_BIT_SET_TP_DST |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .act_tid = 5
+ },
+ [68] = {
.act_hid = BNXT_ULP_ACT_HID_040d,
.act_sig = { .bits =
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [61] = {
+ [69] = {
.act_hid = BNXT_ULP_ACT_HID_040f,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DROP |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [62] = {
+ [70] = {
.act_hid = BNXT_ULP_ACT_HID_0413,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DROP |
@@ -600,14 +704,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [63] = {
+ [71] = {
.act_hid = BNXT_ULP_ACT_HID_0c0d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_POP_VLAN |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [64] = {
+ [72] = {
.act_hid = BNXT_ULP_ACT_HID_0567,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_VLAN_PCP |
@@ -616,7 +720,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [65] = {
+ [73] = {
.act_hid = BNXT_ULP_ACT_HID_0a49,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_SET_VLAN_VID |
@@ -624,14 +728,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [66] = {
+ [74] = {
.act_hid = BNXT_ULP_ACT_HID_050e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [67] = {
+ [75] = {
.act_hid = BNXT_ULP_ACT_HID_0d0e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -639,7 +743,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [68] = {
+ [76] = {
.act_hid = BNXT_ULP_ACT_HID_0668,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -649,7 +753,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [69] = {
+ [77] = {
.act_hid = BNXT_ULP_ACT_HID_0b4a,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_DEC_TTL |
@@ -658,14 +762,14 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [70] = {
+ [78] = {
.act_hid = BNXT_ULP_ACT_HID_0411,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [71] = {
+ [79] = {
.act_hid = BNXT_ULP_ACT_HID_056b,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -675,7 +779,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [72] = {
+ [80] = {
.act_hid = BNXT_ULP_ACT_HID_0a4d,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -684,7 +788,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [73] = {
+ [81] = {
.act_hid = BNXT_ULP_ACT_HID_0c11,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -692,7 +796,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [74] = {
+ [82] = {
.act_hid = BNXT_ULP_ACT_HID_0512,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -700,7 +804,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [75] = {
+ [83] = {
.act_hid = BNXT_ULP_ACT_HID_0d12,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -709,7 +813,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [76] = {
+ [84] = {
.act_hid = BNXT_ULP_ACT_HID_066c,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
@@ -720,7 +824,7 @@ struct bnxt_ulp_act_match_info ulp_act_match_list[] = {
BNXT_ULP_FLOW_DIR_BITMASK_EGR },
.act_tid = 6
},
- [77] = {
+ [85] = {
.act_hid = BNXT_ULP_ACT_HID_0b4e,
.act_sig = { .bits =
BNXT_ULP_ACTION_BIT_COUNT |
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
index 200a5a6cc..9de45cdc4 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
@@ -17241,7 +17241,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17311,7 +17311,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17325,7 +17325,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17339,7 +17339,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17353,7 +17353,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17367,7 +17367,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17381,7 +17381,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
@@ -17451,7 +17451,7 @@ struct bnxt_ulp_mapper_ident_info ulp_ident_list[] = {
.ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH,
.regfile_idx = BNXT_ULP_REGFILE_INDEX_L2_CNTXT_ID_0,
.ident_bit_size = 10,
- .ident_bit_pos = 54
+ .ident_bit_pos = 0
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index b5deaf6c6..c9fe1bc47 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -18,7 +18,7 @@
#define BNXT_ULP_CLASS_HID_SHFTL 31
#define BNXT_ULP_CLASS_HID_MASK 2047
#define BNXT_ULP_ACT_SIG_TBL_MAX_SZ 4096
-#define BNXT_ULP_ACT_MATCH_LIST_MAX_SZ 78
+#define BNXT_ULP_ACT_MATCH_LIST_MAX_SZ 86
#define BNXT_ULP_ACT_HID_LOW_PRIME 7919
#define BNXT_ULP_ACT_HID_HIGH_PRIME 4721
#define BNXT_ULP_ACT_HID_SHFTR 23
@@ -786,19 +786,23 @@ enum bnxt_ulp_act_hid {
BNXT_ULP_ACT_HID_015a = 0x015a,
BNXT_ULP_ACT_HID_00eb = 0x00eb,
BNXT_ULP_ACT_HID_0043 = 0x0043,
- BNXT_ULP_ACT_HID_01d6 = 0x01d6,
+ BNXT_ULP_ACT_HID_03d8 = 0x03d8,
+ BNXT_ULP_ACT_HID_02c1 = 0x02c1,
BNXT_ULP_ACT_HID_015e = 0x015e,
BNXT_ULP_ACT_HID_00ef = 0x00ef,
BNXT_ULP_ACT_HID_0047 = 0x0047,
- BNXT_ULP_ACT_HID_01da = 0x01da,
+ BNXT_ULP_ACT_HID_03dc = 0x03dc,
+ BNXT_ULP_ACT_HID_02c5 = 0x02c5,
BNXT_ULP_ACT_HID_025b = 0x025b,
BNXT_ULP_ACT_HID_01ec = 0x01ec,
BNXT_ULP_ACT_HID_0144 = 0x0144,
- BNXT_ULP_ACT_HID_02d7 = 0x02d7,
+ BNXT_ULP_ACT_HID_04d9 = 0x04d9,
+ BNXT_ULP_ACT_HID_03c2 = 0x03c2,
BNXT_ULP_ACT_HID_025f = 0x025f,
BNXT_ULP_ACT_HID_01f0 = 0x01f0,
BNXT_ULP_ACT_HID_0148 = 0x0148,
- BNXT_ULP_ACT_HID_02db = 0x02db,
+ BNXT_ULP_ACT_HID_04dd = 0x04dd,
+ BNXT_ULP_ACT_HID_03c6 = 0x03c6,
BNXT_ULP_ACT_HID_0000 = 0x0000,
BNXT_ULP_ACT_HID_0002 = 0x0002,
BNXT_ULP_ACT_HID_0800 = 0x0800,
@@ -829,19 +833,23 @@ enum bnxt_ulp_act_hid {
BNXT_ULP_ACT_HID_02f6 = 0x02f6,
BNXT_ULP_ACT_HID_04f8 = 0x04f8,
BNXT_ULP_ACT_HID_01df = 0x01df,
- BNXT_ULP_ACT_HID_05e3 = 0x05e3,
+ BNXT_ULP_ACT_HID_07e5 = 0x07e5,
+ BNXT_ULP_ACT_HID_06ce = 0x06ce,
BNXT_ULP_ACT_HID_02fa = 0x02fa,
BNXT_ULP_ACT_HID_04fc = 0x04fc,
BNXT_ULP_ACT_HID_01e3 = 0x01e3,
- BNXT_ULP_ACT_HID_05e7 = 0x05e7,
+ BNXT_ULP_ACT_HID_07e9 = 0x07e9,
+ BNXT_ULP_ACT_HID_06d2 = 0x06d2,
BNXT_ULP_ACT_HID_03f7 = 0x03f7,
BNXT_ULP_ACT_HID_05f9 = 0x05f9,
BNXT_ULP_ACT_HID_02e0 = 0x02e0,
- BNXT_ULP_ACT_HID_06e4 = 0x06e4,
+ BNXT_ULP_ACT_HID_08e6 = 0x08e6,
+ BNXT_ULP_ACT_HID_07cf = 0x07cf,
BNXT_ULP_ACT_HID_03fb = 0x03fb,
BNXT_ULP_ACT_HID_05fd = 0x05fd,
BNXT_ULP_ACT_HID_02e4 = 0x02e4,
- BNXT_ULP_ACT_HID_06e8 = 0x06e8,
+ BNXT_ULP_ACT_HID_08ea = 0x08ea,
+ BNXT_ULP_ACT_HID_07d3 = 0x07d3,
BNXT_ULP_ACT_HID_040d = 0x040d,
BNXT_ULP_ACT_HID_040f = 0x040f,
BNXT_ULP_ACT_HID_0413 = 0x0413,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c
index 4388a0a42..f2e2a5950 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_tbl.c
@@ -259,8 +259,8 @@ struct bnxt_ulp_rte_act_info ulp_act_info[] = {
.proto_act_func = NULL
},
[RTE_FLOW_ACTION_TYPE_DEC_TTL] = {
- .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
- .proto_act_func = ulp_rte_dec_ttl_act_handler
+ .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED,
+ .proto_act_func = ulp_rte_dec_ttl_act_handler
},
[RTE_FLOW_ACTION_TYPE_SET_TTL] = {
.act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED,
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 08/22] net/bnxt: configure parif for the egress rules
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (6 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 07/22] net/bnxt: update nat template Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 09/22] net/bnxt: ignore VLAN priority mask Ajit Khaparde
` (14 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Kishore Padmanabha, Michael Baucom
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
The parif for the egress rules need to be dynamically
configured based on the port type.
PARIF is handler to a partition of the physical port.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_def_rules.c | 11 ++--
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 35 ++++++++++++
drivers/net/bnxt/tf_ulp/ulp_port_db.c | 2 +
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 54 +++++++++++++++----
drivers/net/bnxt/tf_ulp/ulp_template_db_act.c | 16 ++++--
.../net/bnxt/tf_ulp/ulp_template_db_class.c | 25 +++++++--
.../net/bnxt/tf_ulp/ulp_template_db_enum.h | 14 ++---
7 files changed, 123 insertions(+), 34 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
index d86e4c9ae..ddc6da8a8 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
@@ -81,17 +81,12 @@ ulp_set_parif_in_comp_fld(struct bnxt_ulp_context *ulp_ctx,
if (rc)
return rc;
- if (parif_type == BNXT_ULP_PHY_PORT_PARIF) {
+ if (parif_type == BNXT_ULP_PHY_PORT_PARIF)
idx = BNXT_ULP_CF_IDX_PHY_PORT_PARIF;
- /* Parif needs to be reset to a free partition */
- parif += BNXT_ULP_FREE_PARIF_BASE;
- } else if (parif_type == BNXT_ULP_DRV_FUNC_PARIF) {
+ else if (parif_type == BNXT_ULP_DRV_FUNC_PARIF)
idx = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF;
- /* Parif needs to be reset to a free partition */
- parif += BNXT_ULP_FREE_PARIF_BASE;
- } else {
+ else
idx = BNXT_ULP_CF_IDX_VF_FUNC_PARIF;
- }
ULP_COMP_FLD_IDX_WR(mapper_params, idx, parif);
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 2d3373df2..a071c0750 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -998,6 +998,41 @@ ulp_mapper_result_field_process(struct bnxt_ulp_mapper_parms *parms,
return -EINVAL;
}
break;
+ case BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF:
+ if (!ulp_operand_read(fld->result_operand,
+ (uint8_t *)&idx,
+ sizeof(uint16_t))) {
+ BNXT_TF_DBG(ERR, "%s key operand read failed.\n", name);
+ return -EINVAL;
+ }
+ idx = tfp_be_to_cpu_16(idx);
+ if (idx >= BNXT_ULP_CF_IDX_LAST) {
+ BNXT_TF_DBG(ERR, "%s invalid index %u\n", name, idx);
+ return -EINVAL;
+ }
+ /* check if the computed field is set */
+ if (ULP_COMP_FLD_IDX_RD(parms, idx))
+ val = fld->result_operand_true;
+ else
+ val = fld->result_operand_false;
+
+ /* read the appropriate computed field */
+ if (!ulp_operand_read(val, (uint8_t *)&idx, sizeof(uint16_t))) {
+ BNXT_TF_DBG(ERR, "%s val operand read failed\n", name);
+ return -EINVAL;
+ }
+ idx = tfp_be_to_cpu_16(idx);
+ if (idx >= BNXT_ULP_CF_IDX_LAST) {
+ BNXT_TF_DBG(ERR, "%s invalid index %u\n", name, idx);
+ return -EINVAL;
+ }
+ val = ulp_blob_push_32(blob, &parms->comp_fld[idx],
+ fld->field_bit_size);
+ if (!val) {
+ BNXT_TF_DBG(ERR, "%s push to key blob failed\n", name);
+ return -EINVAL;
+ }
+ break;
default:
BNXT_TF_DBG(ERR, "invalid result mapper opcode 0x%x\n",
fld->result_opcode);
diff --git a/drivers/net/bnxt/tf_ulp/ulp_port_db.c b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
index 0fc7c0ab2..30876478d 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_port_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_port_db.c
@@ -372,6 +372,8 @@ ulp_port_db_parif_get(struct bnxt_ulp_context *ulp_ctxt,
phy_port_id = port_db->ulp_func_id_tbl[func_id].phy_port_id;
*parif = port_db->phy_port_list[phy_port_id].port_parif;
}
+ /* Parif needs to be reset to a free partition */
+ *parif += BNXT_ULP_FREE_PARIF_BASE;
return 0;
}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 39f801b2f..67f9319d6 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -167,31 +167,63 @@ bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
{
uint32_t ifindex;
uint16_t port_id, parif;
+ uint32_t mtype;
enum bnxt_ulp_direction_type dir;
/* get the direction details */
dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
+ /* read the port id details */
+ port_id = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_INCOMING_IF);
+ if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
+ port_id,
+ &ifindex)) {
+ BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
+ return;
+ }
+
if (dir == BNXT_ULP_DIR_INGRESS) {
- /* read the port id details */
- port_id = ULP_COMP_FLD_IDX_RD(params,
- BNXT_ULP_CF_IDX_INCOMING_IF);
- if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
- port_id,
- &ifindex)) {
- BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
- return;
- }
/* Set port PARIF */
if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
BNXT_ULP_PHY_PORT_PARIF, &parif)) {
BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
return;
}
- /* Parif needs to be reset to a free partition */
- parif += BNXT_ULP_FREE_PARIF_BASE;
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
parif);
+ } else {
+ /* Get the match port type */
+ mtype = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
+ if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
+ 1);
+ /* Set VF func PARIF */
+ if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_VF_FUNC_PARIF,
+ &parif)) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
+ parif);
+ } else {
+ /* Set DRV func PARIF */
+ if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_DRV_FUNC_PARIF,
+ &parif)) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
+ parif);
+ }
}
}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
index 31fe90577..58b581cf6 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_act.c
@@ -1808,11 +1808,19 @@ struct bnxt_ulp_mapper_result_field_info ulp_act_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_ACT_BIT_THEN_CONST_ELSE_CONST,
.result_operand = {
- BNXT_ULP_SYM_DECAP_FUNC_THRU_TUN,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 56) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 48) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 40) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 32) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 24) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 16) & 0xff,
+ ((uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP >> 8) & 0xff,
+ (uint64_t)BNXT_ULP_ACTION_BIT_VXLAN_DECAP & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 12,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
index 9de45cdc4..330c5ecdd 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
@@ -5058,7 +5058,9 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 2,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
.spec_operand = {0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
@@ -5149,7 +5151,9 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 2,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
.spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
.spec_operand = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
@@ -17054,11 +17058,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index c9fe1bc47..f08065b28 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -127,11 +127,12 @@ enum bnxt_ulp_cf_idx {
BNXT_ULP_CF_IDX_ACT_PORT_IS_SET = 35,
BNXT_ULP_CF_IDX_ACT_PORT_TYPE = 36,
BNXT_ULP_CF_IDX_MATCH_PORT_TYPE = 37,
- BNXT_ULP_CF_IDX_VF_TO_VF = 38,
- BNXT_ULP_CF_IDX_L3_HDR_CNT = 39,
- BNXT_ULP_CF_IDX_L4_HDR_CNT = 40,
- BNXT_ULP_CF_IDX_VFR_MODE = 41,
- BNXT_ULP_CF_IDX_LAST = 42
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP = 38,
+ BNXT_ULP_CF_IDX_VF_TO_VF = 39,
+ BNXT_ULP_CF_IDX_L3_HDR_CNT = 40,
+ BNXT_ULP_CF_IDX_L4_HDR_CNT = 41,
+ BNXT_ULP_CF_IDX_VFR_MODE = 42,
+ BNXT_ULP_CF_IDX_LAST = 43
};
enum bnxt_ulp_cond_opcode {
@@ -215,7 +216,8 @@ enum bnxt_ulp_mapper_opc {
BNXT_ULP_MAPPER_OPC_SET_TO_ENCAP_ACT_PROP_SZ = 8,
BNXT_ULP_MAPPER_OPC_IF_ACT_BIT_THEN_ACT_PROP_ELSE_CONST = 9,
BNXT_ULP_MAPPER_OPC_IF_ACT_BIT_THEN_CONST_ELSE_CONST = 10,
- BNXT_ULP_MAPPER_OPC_LAST = 11
+ BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF = 11,
+ BNXT_ULP_MAPPER_OPC_LAST = 12
};
enum bnxt_ulp_mark_db_opcode {
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 09/22] net/bnxt: ignore VLAN priority mask
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (7 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 08/22] net/bnxt: configure parif for the egress rules Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-27 10:30 ` Ferruh Yigit
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 10/22] net/bnxt: add egress template with VLAN tag match Ajit Khaparde
` (13 subsequent siblings)
22 siblings, 1 reply; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Kishore Padmanabha, Michael Baucom
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
This is a work around for the OVS setting offload rules that
are passing vlan priority mask as wild card and currently we
do not support it.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
index 67f9319d6..665f5d381 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
@@ -709,8 +709,17 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
vlan_tag |= ~ULP_VLAN_TAG_MASK;
vlan_tag = htons(vlan_tag);
+#ifdef ULP_DONT_IGNORE_TOS
ulp_rte_prsr_mask_copy(params, &idx, &priority,
sizeof(priority));
+#else
+ /*
+ * The priority field is ignored since OVS is setting it as
+ * wild card match and it is not supported. This is a work
+ * around and shall be addressed in the future.
+ */
+ idx += 1;
+#endif
ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
sizeof(vlan_tag));
ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* Re: [dpdk-dev] [PATCH v3 09/22] net/bnxt: ignore VLAN priority mask
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 09/22] net/bnxt: ignore VLAN priority mask Ajit Khaparde
@ 2020-07-27 10:30 ` Ferruh Yigit
2020-07-28 5:22 ` Ajit Khaparde
0 siblings, 1 reply; 102+ messages in thread
From: Ferruh Yigit @ 2020-07-27 10:30 UTC (permalink / raw)
To: Ajit Khaparde, dev; +Cc: Kishore Padmanabha, Michael Baucom
On 7/24/2020 6:32 AM, Ajit Khaparde wrote:
> From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
>
> This is a work around for the OVS setting offload rules that
> are passing vlan priority mask as wild card and currently we
> do not support it.
>
> Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
> Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
> ---
> drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 9 +++++++++
> 1 file changed, 9 insertions(+)
>
> diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
> index 67f9319d6..665f5d381 100644
> --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
> +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
> @@ -709,8 +709,17 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
> vlan_tag |= ~ULP_VLAN_TAG_MASK;
> vlan_tag = htons(vlan_tag);
>
> +#ifdef ULP_DONT_IGNORE_TOS
> ulp_rte_prsr_mask_copy(params, &idx, &priority,
> sizeof(priority));
> +#else
> + /*
> + * The priority field is ignored since OVS is setting it as
> + * wild card match and it is not supported. This is a work
> + * around and shall be addressed in the future.
> + */
> + idx += 1;
> +#endif
> ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
> sizeof(vlan_tag));
> ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
>
Hi Ajit,
What sets the 'ULP_DONT_IGNORE_TOS', I don't see it in our build system. If ways
'else' leg is taken, why not drop the macro completely?
^ permalink raw reply [flat|nested] 102+ messages in thread
* Re: [dpdk-dev] [PATCH v3 09/22] net/bnxt: ignore VLAN priority mask
2020-07-27 10:30 ` Ferruh Yigit
@ 2020-07-28 5:22 ` Ajit Khaparde
0 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-28 5:22 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: dpdk-dev, Kishore Padmanabha, Michael Baucom
On Mon, Jul 27, 2020 at 3:30 AM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
> On 7/24/2020 6:32 AM, Ajit Khaparde wrote:
> > From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
> >
> > This is a work around for the OVS setting offload rules that
> > are passing vlan priority mask as wild card and currently we
> > do not support it.
> >
> > Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
> > Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
> > ---
> > drivers/net/bnxt/tf_ulp/ulp_rte_parser.c | 9 +++++++++
> > 1 file changed, 9 insertions(+)
> >
> > diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
> b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
> > index 67f9319d6..665f5d381 100644
> > --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
> > +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c
> > @@ -709,8 +709,17 @@ ulp_rte_vlan_hdr_handler(const struct rte_flow_item
> *item,
> > vlan_tag |= ~ULP_VLAN_TAG_MASK;
> > vlan_tag = htons(vlan_tag);
> >
> > +#ifdef ULP_DONT_IGNORE_TOS
> > ulp_rte_prsr_mask_copy(params, &idx, &priority,
> > sizeof(priority));
> > +#else
> > + /*
> > + * The priority field is ignored since OVS is setting it as
> > + * wild card match and it is not supported. This is a work
> > + * around and shall be addressed in the future.
> > + */
> > + idx += 1;
> > +#endif
> > ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
> > sizeof(vlan_tag));
> > ulp_rte_prsr_mask_copy(params, &idx,
> &vlan_mask->inner_type,
> >
>
> Hi Ajit,
>
> What sets the 'ULP_DONT_IGNORE_TOS', I don't see it in our build system.
> If ways
> 'else' leg is taken, why not drop the macro completely?
>
Ferruh,
Yes. I will submit a v4 to take care of that.
Thanks
Ajit
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 10/22] net/bnxt: add egress template with VLAN tag match
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (8 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 09/22] net/bnxt: ignore VLAN priority mask Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 11/22] net/bnxt: modify tf shadow tcam to use tf hash Ajit Khaparde
` (12 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Kishore Padmanabha, Shahaji Bhosle
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Added egress template with VLAN tag match
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Shahaji Bhosle <shahaji.bhosle@broadcom.com>
---
.../net/bnxt/tf_ulp/ulp_template_db_class.c | 501 +++++++++++++++++-
.../net/bnxt/tf_ulp/ulp_template_db_enum.h | 28 +-
2 files changed, 509 insertions(+), 20 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
index 330c5ecdd..41d1d8772 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_class.c
@@ -162,7 +162,31 @@ uint16_t ulp_class_sig_tbl[BNXT_ULP_CLASS_SIG_TBL_MAX_SZ] = {
[BNXT_ULP_CLASS_HID_01d1] = 151,
[BNXT_ULP_CLASS_HID_0319] = 152,
[BNXT_ULP_CLASS_HID_01cd] = 153,
- [BNXT_ULP_CLASS_HID_0305] = 154
+ [BNXT_ULP_CLASS_HID_0305] = 154,
+ [BNXT_ULP_CLASS_HID_01e2] = 155,
+ [BNXT_ULP_CLASS_HID_032a] = 156,
+ [BNXT_ULP_CLASS_HID_0650] = 157,
+ [BNXT_ULP_CLASS_HID_0198] = 158,
+ [BNXT_ULP_CLASS_HID_01de] = 159,
+ [BNXT_ULP_CLASS_HID_0316] = 160,
+ [BNXT_ULP_CLASS_HID_066c] = 161,
+ [BNXT_ULP_CLASS_HID_01a4] = 162,
+ [BNXT_ULP_CLASS_HID_01c2] = 163,
+ [BNXT_ULP_CLASS_HID_030a] = 164,
+ [BNXT_ULP_CLASS_HID_0670] = 165,
+ [BNXT_ULP_CLASS_HID_01b8] = 166,
+ [BNXT_ULP_CLASS_HID_003e] = 167,
+ [BNXT_ULP_CLASS_HID_02f6] = 168,
+ [BNXT_ULP_CLASS_HID_078c] = 169,
+ [BNXT_ULP_CLASS_HID_0044] = 170,
+ [BNXT_ULP_CLASS_HID_01d2] = 171,
+ [BNXT_ULP_CLASS_HID_031a] = 172,
+ [BNXT_ULP_CLASS_HID_0660] = 173,
+ [BNXT_ULP_CLASS_HID_01a8] = 174,
+ [BNXT_ULP_CLASS_HID_01ce] = 175,
+ [BNXT_ULP_CLASS_HID_0306] = 176,
+ [BNXT_ULP_CLASS_HID_067c] = 177,
+ [BNXT_ULP_CLASS_HID_01b4] = 178
};
struct bnxt_ulp_class_match_info ulp_class_match_list[] = {
@@ -2833,6 +2857,382 @@ struct bnxt_ulp_class_match_info ulp_class_match_list[] = {
BNXT_ULP_MATCH_TYPE_BITMASK_EM },
.class_tid = 21,
.wc_pri = 11
+ },
+ [155] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01e2,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 12
+ },
+ [156] = {
+ .class_hid = BNXT_ULP_CLASS_HID_032a,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 13
+ },
+ [157] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0650,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 14
+ },
+ [158] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0198,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 15
+ },
+ [159] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01de,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 16
+ },
+ [160] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0316,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 17
+ },
+ [161] = {
+ .class_hid = BNXT_ULP_CLASS_HID_066c,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 18
+ },
+ [162] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01a4,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 19
+ },
+ [163] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01c2,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 20
+ },
+ [164] = {
+ .class_hid = BNXT_ULP_CLASS_HID_030a,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 21
+ },
+ [165] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0670,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 22
+ },
+ [166] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01b8,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 23
+ },
+ [167] = {
+ .class_hid = BNXT_ULP_CLASS_HID_003e,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 24
+ },
+ [168] = {
+ .class_hid = BNXT_ULP_CLASS_HID_02f6,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 25
+ },
+ [169] = {
+ .class_hid = BNXT_ULP_CLASS_HID_078c,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 26
+ },
+ [170] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0044,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_UDP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 27
+ },
+ [171] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01d2,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 28
+ },
+ [172] = {
+ .class_hid = BNXT_ULP_CLASS_HID_031a,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 29
+ },
+ [173] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0660,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 30
+ },
+ [174] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01a8,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV4 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 31
+ },
+ [175] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01ce,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 32
+ },
+ [176] = {
+ .class_hid = BNXT_ULP_CLASS_HID_0306,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 33
+ },
+ [177] = {
+ .class_hid = BNXT_ULP_CLASS_HID_067c,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_TYPE |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 34
+ },
+ [178] = {
+ .class_hid = BNXT_ULP_CLASS_HID_01b4,
+ .hdr_sig = { .bits =
+ BNXT_ULP_HDR_BIT_O_ETH |
+ BNXT_ULP_HDR_BIT_OO_VLAN |
+ BNXT_ULP_HDR_BIT_O_IPV6 |
+ BNXT_ULP_HDR_BIT_O_TCP |
+ BNXT_ULP_FLOW_DIR_BITMASK_EGR },
+ .field_sig = { .bits =
+ BNXT_ULP_HF21_BITMASK_O_ETH_SMAC |
+ BNXT_ULP_HF21_BITMASK_O_ETH_DMAC |
+ BNXT_ULP_HF21_BITMASK_OO_VLAN_VID |
+ BNXT_ULP_MATCH_TYPE_BITMASK_EM },
+ .class_tid = 21,
+ .wc_pri = 35
}
};
@@ -3236,7 +3636,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE,
- .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW,
+ .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
.srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
@@ -3255,7 +3655,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE,
- .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW,
+ .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
.srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
@@ -3346,7 +3746,7 @@ struct bnxt_ulp_mapper_tbl_info ulp_class_tbl_list[] = {
},
{
.resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE,
- .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW,
+ .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH,
.direction = TF_DIR_RX,
.priority = BNXT_ULP_PRIORITY_LEVEL_0,
.srch_b4_alloc = BNXT_ULP_SEARCH_BEFORE_ALLOC_NO,
@@ -12534,8 +12934,18 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 12,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
- .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_HDR_FIELD,
+ .mask_operand = {
+ (BNXT_ULP_HF21_IDX_OO_VLAN_VID >> 8) & 0xff,
+ BNXT_ULP_HF21_IDX_OO_VLAN_VID & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_HDR_FIELD,
+ .spec_operand = {
+ (BNXT_ULP_HF21_IDX_OO_VLAN_VID >> 8) & 0xff,
+ BNXT_ULP_HF21_IDX_OO_VLAN_VID & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 12,
@@ -12594,8 +13004,15 @@ struct bnxt_ulp_mapper_class_key_field_info ulp_class_key_field_list[] = {
},
{
.field_bit_size = 2,
- .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO,
- .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO
+ .mask_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .mask_operand = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ .spec_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_COMP_FIELD,
+ .spec_operand = {
+ (BNXT_ULP_CF_IDX_O_VTAG_NUM >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_O_VTAG_NUM & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 2,
@@ -16307,11 +16724,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
@@ -16498,11 +16926,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
@@ -16689,7 +17128,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_ZERO
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
+ .result_operand = {
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
@@ -16876,11 +17330,22 @@ struct bnxt_ulp_mapper_result_field_info ulp_class_result_field_list[] = {
},
{
.field_bit_size = 4,
- .result_opcode = BNXT_ULP_MAPPER_OPC_SET_TO_CONSTANT,
+ .result_opcode = BNXT_ULP_MAPPER_OPC_IF_COMP_FIELD_THEN_CF_ELSE_CF,
.result_operand = {
- BNXT_ULP_SYM_VF_FUNC_PARIF,
+ (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_true = {
+ (BNXT_ULP_CF_IDX_VF_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ .result_operand_false = {
+ (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
},
{
.field_bit_size = 8,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
index f08065b28..ac651f63f 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_db_enum.h
@@ -11,7 +11,7 @@
#define BNXT_ULP_LOG2_MAX_NUM_DEV 2
#define BNXT_ULP_CACHE_TBL_MAX_SZ 4
#define BNXT_ULP_CLASS_SIG_TBL_MAX_SZ 2048
-#define BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ 155
+#define BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ 179
#define BNXT_ULP_CLASS_HID_LOW_PRIME 7919
#define BNXT_ULP_CLASS_HID_HIGH_PRIME 7907
#define BNXT_ULP_CLASS_HID_SHFTR 32
@@ -781,7 +781,31 @@ enum bnxt_ulp_class_hid {
BNXT_ULP_CLASS_HID_01d1 = 0x01d1,
BNXT_ULP_CLASS_HID_0319 = 0x0319,
BNXT_ULP_CLASS_HID_01cd = 0x01cd,
- BNXT_ULP_CLASS_HID_0305 = 0x0305
+ BNXT_ULP_CLASS_HID_0305 = 0x0305,
+ BNXT_ULP_CLASS_HID_01e2 = 0x01e2,
+ BNXT_ULP_CLASS_HID_032a = 0x032a,
+ BNXT_ULP_CLASS_HID_0650 = 0x0650,
+ BNXT_ULP_CLASS_HID_0198 = 0x0198,
+ BNXT_ULP_CLASS_HID_01de = 0x01de,
+ BNXT_ULP_CLASS_HID_0316 = 0x0316,
+ BNXT_ULP_CLASS_HID_066c = 0x066c,
+ BNXT_ULP_CLASS_HID_01a4 = 0x01a4,
+ BNXT_ULP_CLASS_HID_01c2 = 0x01c2,
+ BNXT_ULP_CLASS_HID_030a = 0x030a,
+ BNXT_ULP_CLASS_HID_0670 = 0x0670,
+ BNXT_ULP_CLASS_HID_01b8 = 0x01b8,
+ BNXT_ULP_CLASS_HID_003e = 0x003e,
+ BNXT_ULP_CLASS_HID_02f6 = 0x02f6,
+ BNXT_ULP_CLASS_HID_078c = 0x078c,
+ BNXT_ULP_CLASS_HID_0044 = 0x0044,
+ BNXT_ULP_CLASS_HID_01d2 = 0x01d2,
+ BNXT_ULP_CLASS_HID_031a = 0x031a,
+ BNXT_ULP_CLASS_HID_0660 = 0x0660,
+ BNXT_ULP_CLASS_HID_01a8 = 0x01a8,
+ BNXT_ULP_CLASS_HID_01ce = 0x01ce,
+ BNXT_ULP_CLASS_HID_0306 = 0x0306,
+ BNXT_ULP_CLASS_HID_067c = 0x067c,
+ BNXT_ULP_CLASS_HID_01b4 = 0x01b4
};
enum bnxt_ulp_act_hid {
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 11/22] net/bnxt: modify tf shadow tcam to use tf hash
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (9 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 10/22] net/bnxt: add egress template with VLAN tag match Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 12/22] net/bnxt: add shadow table capability with search Ajit Khaparde
` (11 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Mike Baucom, Kishore Padmanabha, Farah Smith
From: Mike Baucom <michael.baucom@broadcom.com>
Removed the hash calculation from tf_shadow_tcam in favor of using a
new common implementation.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Farah Smith <farah.smith@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 77 +----------------------
1 file changed, 2 insertions(+), 75 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index 51aae4ff6..beaea0340 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -7,6 +7,7 @@
#include "tf_util.h"
#include "tfp.h"
#include "tf_shadow_tcam.h"
+#include "tf_hash.h"
/**
* The implementation includes 3 tables per tcam table type.
@@ -164,74 +165,6 @@ struct tf_shadow_tcam_db {
struct tf_shadow_tcam_ctxt *ctxt[TF_TCAM_TBL_TYPE_MAX];
};
-/* CRC polynomial 0xedb88320 */
-static const uint32_t tf_shadow_tcam_crc32tbl[] = {
- 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
- 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
- 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
- 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
- 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
- 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
- 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
- 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
- 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
- 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
- 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
- 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
- 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
- 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
- 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
- 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
- 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
- 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
- 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
- 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
- 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
- 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
- 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
- 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
- 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
- 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
- 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
- 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
- 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
- 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
- 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
- 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
- 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
- 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
- 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
- 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
- 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
- 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
- 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
- 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
- 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
- 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
- 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
- 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
- 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
- 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
- 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
- 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
- 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
- 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
- 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
- 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
- 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
- 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
- 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
- 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
- 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
- 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
- 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
- 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
- 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
- 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
- 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
- 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
-};
-
/**
* Returns the number of entries in the contexts shadow table.
*/
@@ -289,13 +222,7 @@ tf_shadow_tcam_res_cpy(struct tf_shadow_tcam_ctxt *ctxt,
static uint32_t
tf_shadow_tcam_crc32_calc(uint8_t *key, uint32_t len)
{
- uint32_t crc = ~0U;
-
- while (len--)
- crc = tf_shadow_tcam_crc32tbl[(crc ^ key[len]) & 0xff] ^
- (crc >> 8);
-
- return ~crc;
+ return tf_hash_calc_crc32(key, len);
}
/**
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 12/22] net/bnxt: add shadow table capability with search
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (10 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 11/22] net/bnxt: modify tf shadow tcam to use tf hash Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 13/22] net/bnxt: modify ulp mapper to use tbl search Ajit Khaparde
` (10 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Mike Baucom, Farah Smith
From: Mike Baucom <michael.baucom@broadcom.com>
- Added Index Table shadow tables for searching
- Added Search API to allow reuse of Table entries
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Farah Smith <farah.smith@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_core.c | 66 +-
drivers/net/bnxt/tf_core/tf_core.h | 79 ++-
drivers/net/bnxt/tf_core/tf_device_p4.c | 2 +-
drivers/net/bnxt/tf_core/tf_shadow_tbl.c | 768 +++++++++++++++++++++-
drivers/net/bnxt/tf_core/tf_shadow_tbl.h | 124 ++--
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 6 +
drivers/net/bnxt/tf_core/tf_tbl.c | 246 ++++++-
drivers/net/bnxt/tf_core/tf_tbl.h | 22 +-
drivers/net/bnxt/tf_core/tf_tcam.h | 2 +-
9 files changed, 1211 insertions(+), 104 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_core.c b/drivers/net/bnxt/tf_core/tf_core.c
index ca3280b6b..0dbde1de2 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -75,7 +75,6 @@ tf_open_session(struct tf *tfp,
/* Session vs session client is decided in
* tf_session_open_session()
*/
- printf("TF_OPEN, %s\n", parms->ctrl_chan_name);
rc = tf_session_open_session(tfp, &oparms);
/* Logging handled by tf_session_open_session */
if (rc)
@@ -953,6 +952,71 @@ tf_alloc_tbl_entry(struct tf *tfp,
return 0;
}
+int
+tf_search_tbl_entry(struct tf *tfp,
+ struct tf_search_tbl_entry_parms *parms)
+{
+ int rc;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_tbl_alloc_search_parms sparms;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ if (dev->ops->tf_dev_alloc_search_tbl == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ memset(&sparms, 0, sizeof(struct tf_tbl_alloc_search_parms));
+ sparms.dir = parms->dir;
+ sparms.type = parms->type;
+ sparms.result = parms->result;
+ sparms.result_sz_in_bytes = parms->result_sz_in_bytes;
+ sparms.alloc = parms->alloc;
+ sparms.tbl_scope_id = parms->tbl_scope_id;
+ rc = dev->ops->tf_dev_alloc_search_tbl(tfp, &sparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: TBL allocation failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Return the outputs from the search */
+ parms->hit = sparms.hit;
+ parms->search_status = sparms.search_status;
+ parms->ref_cnt = sparms.ref_cnt;
+ parms->idx = sparms.idx;
+
+ return 0;
+}
+
int
tf_free_tbl_entry(struct tf *tfp,
struct tf_free_tbl_entry_parms *parms)
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index 349a1f1a7..db1093515 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -291,9 +291,9 @@ enum tf_tcam_tbl_type {
};
/**
- * TCAM SEARCH STATUS
+ * SEARCH STATUS
*/
-enum tf_tcam_search_status {
+enum tf_search_status {
/** The entry was not found, but an idx was allocated if requested. */
MISS,
/** The entry was found, and the result/idx are valid */
@@ -1011,7 +1011,7 @@ struct tf_search_tcam_entry_parms {
/**
* [out] Search result status (hit, miss, reject)
*/
- enum tf_tcam_search_status search_status;
+ enum tf_search_status search_status;
/**
* [out] Current refcnt after allocation
*/
@@ -1285,6 +1285,79 @@ int tf_free_tcam_entry(struct tf *tfp,
* @ref tf_bulk_get_tbl_entry
*/
+/**
+ * tf_alloc_tbl_entry parameter definition
+ */
+struct tf_search_tbl_entry_parms {
+ /**
+ * [in] Receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] Type of the allocation
+ */
+ enum tf_tbl_type type;
+ /**
+ * [in] Table scope identifier (ignored unless TF_TBL_TYPE_EXT)
+ */
+ uint32_t tbl_scope_id;
+ /**
+ * [in] Result data to search for
+ */
+ uint8_t *result;
+ /**
+ * [in] Result data size in bytes
+ */
+ uint16_t result_sz_in_bytes;
+ /**
+ * [in] Allocate on miss.
+ */
+ uint8_t alloc;
+ /**
+ * [out] Set if matching entry found
+ */
+ uint8_t hit;
+ /**
+ * [out] Search result status (hit, miss, reject)
+ */
+ enum tf_search_status search_status;
+ /**
+ * [out] Current ref count after allocation
+ */
+ uint16_t ref_cnt;
+ /**
+ * [out] Idx of allocated entry or found entry
+ */
+ uint32_t idx;
+};
+
+/**
+ * search Table Entry (experimental)
+ *
+ * This function searches the shadow copy of an index table for a matching
+ * entry. The result data must match for hit to be set. Only TruFlow core
+ * data is accessed. If shadow_copy is not enabled, an error is returned.
+ *
+ * Implementation:
+ *
+ * A hash is performed on the result data and mappe3d to a shadow copy entry
+ * where the result is populated. If the result matches the entry, hit is set,
+ * ref_cnt is incremented (if alloc), and the search status indicates what
+ * action the caller can take regarding setting the entry.
+ *
+ * search status should be used as follows:
+ * - On MISS, the caller should set the result into the returned index.
+ *
+ * - On REJECT, the caller should reject the flow since there are no resources.
+ *
+ * - On Hit, the matching index is returned to the caller. Additionally, the
+ * ref_cnt is updated.
+ *
+ * Also returns success or failure code.
+ */
+int tf_search_tbl_entry(struct tf *tfp,
+ struct tf_search_tbl_entry_parms *parms);
+
/**
* tf_alloc_tbl_entry parameter definition
*/
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c
index afb60989e..fe8dec3af 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c
@@ -126,7 +126,7 @@ const struct tf_dev_ops tf_dev_ops_p4 = {
.tf_dev_alloc_ext_tbl = tf_tbl_ext_alloc,
.tf_dev_free_tbl = tf_tbl_free,
.tf_dev_free_ext_tbl = tf_tbl_ext_free,
- .tf_dev_alloc_search_tbl = NULL,
+ .tf_dev_alloc_search_tbl = tf_tbl_alloc_search,
.tf_dev_set_tbl = tf_tbl_set,
.tf_dev_set_ext_tbl = tf_tbl_ext_common_set,
.tf_dev_get_tbl = tf_tbl_get,
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
index 8f2b6de70..019a26eba 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
@@ -3,61 +3,785 @@
* All rights reserved.
*/
-#include <rte_common.h>
-
+#include "tf_common.h"
+#include "tf_util.h"
+#include "tfp.h"
+#include "tf_core.h"
#include "tf_shadow_tbl.h"
+#include "tf_hash.h"
/**
- * Shadow table DB element
+ * The implementation includes 3 tables per table table type.
+ * - hash table
+ * - sized so that a minimum of 4 slots per shadow entry are available to
+ * minimize the likelihood of collisions.
+ * - shadow key table
+ * - sized to the number of entries requested and is directly indexed
+ * - the index is zero based and is the table index - the base address
+ * - the data associated with the entry is stored in the key table.
+ * - The stored key is actually the data associated with the entry.
+ * - shadow result table
+ * - the result table is stored separately since it only needs to be accessed
+ * when the key matches.
+ * - the result has a back pointer to the hash table via the hb handle. The
+ * hb handle is a 32 bit represention of the hash with a valid bit, bucket
+ * element index, and the hash index. It is necessary to store the hb handle
+ * with the result since subsequent removes only provide the table index.
+ *
+ * - Max entries is limited in the current implementation since bit 15 is the
+ * valid bit in the hash table.
+ * - A 16bit hash is calculated and masked based on the number of entries
+ * - 64b wide bucket is used and broken into 4x16bit elements.
+ * This decision is based on quicker bucket scanning to determine if any
+ * elements are in use.
+ * - bit 15 of each bucket element is the valid, this is done to prevent having
+ * to read the larger key/result data for determining VALID. It also aids
+ * in the more efficient scanning of the bucket for slot usage.
*/
-struct tf_shadow_tbl_element {
- /**
- * Hash table
- */
- void *hash;
- /**
- * Reference count, array of number of table type entries
- */
- uint16_t *ref_count;
+/*
+ * The maximum number of shadow entries supported. The value also doubles as
+ * the maximum number of hash buckets. There are only 15 bits of data per
+ * bucket to point to the shadow tables.
+ */
+#define TF_SHADOW_ENTRIES_MAX (1 << 15)
+
+/* The number of elements(BE) per hash bucket (HB) */
+#define TF_SHADOW_HB_NUM_ELEM (4)
+#define TF_SHADOW_BE_VALID (1 << 15)
+#define TF_SHADOW_BE_IS_VALID(be) (((be) & TF_SHADOW_BE_VALID) != 0)
+
+/**
+ * The hash bucket handle is 32b
+ * - bit 31, the Valid bit
+ * - bit 29-30, the element
+ * - bits 0-15, the hash idx (is masked based on the allocated size)
+ */
+#define TF_SHADOW_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
+#define TF_SHADOW_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
+ ((be) << 29) | (idx))
+
+#define TF_SHADOW_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
+ (TF_SHADOW_HB_NUM_ELEM - 1))
+
+#define TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
+ (ctxt)->hash_ctxt.hid_mask)
+
+/**
+ * The idx provided by the caller is within a region, so currently the base is
+ * either added or subtracted from the idx to ensure it can be used as a
+ * compressed index
+ */
+
+/* Convert the table index to a shadow index */
+#define TF_SHADOW_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Convert the shadow index to a tbl index */
+#define TF_SHADOW_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
+ (ctxt)->shadow_ctxt.base_addr)
+
+/* Simple helper masks for clearing en element from the bucket */
+#define TF_SHADOW_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
+#define TF_SHADOW_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
+#define TF_SHADOW_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
+#define TF_SHADOW_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
+
+/**
+ * This should be coming from external, but for now it is assumed that no key
+ * is greater than 512 bits (64B). This makes allocation of the key table
+ * easier without having to allocate on the fly.
+ */
+#define TF_SHADOW_MAX_KEY_SZ 64
+
+/*
+ * Local only defines for the internal data.
+ */
+
+/**
+ * tf_shadow_tbl_shadow_key_entry is the key entry of the key table.
+ * The key stored in the table is the result data of the index table.
+ */
+struct tf_shadow_tbl_shadow_key_entry {
+ uint8_t key[TF_SHADOW_MAX_KEY_SZ];
+};
+
+/**
+ * tf_shadow_tbl_shadow_result_entry is the result table entry.
+ * The result table writes are broken into two phases:
+ * - The search phase, which stores the hb_handle and key size and
+ * - The set phase, which writes the refcnt
+ */
+struct tf_shadow_tbl_shadow_result_entry {
+ uint16_t key_size;
+ uint32_t refcnt;
+ uint32_t hb_handle;
+};
+
+/**
+ * tf_shadow_tbl_shadow_ctxt holds all information for accessing the key and
+ * result tables.
+ */
+struct tf_shadow_tbl_shadow_ctxt {
+ struct tf_shadow_tbl_shadow_key_entry *sh_key_tbl;
+ struct tf_shadow_tbl_shadow_result_entry *sh_res_tbl;
+ uint32_t base_addr;
+ uint16_t num_entries;
+ uint16_t alloc_idx;
+};
+
+/**
+ * tf_shadow_tbl_hash_ctxt holds all information related to accessing the hash
+ * table.
+ */
+struct tf_shadow_tbl_hash_ctxt {
+ uint64_t *hashtbl;
+ uint16_t hid_mask;
+ uint16_t hash_entries;
};
/**
- * Shadow table DB definition
+ * tf_shadow_tbl_ctxt holds the hash and shadow tables for the current shadow
+ * table db. This structure is per table table type as each table table has
+ * it's own shadow and hash table.
+ */
+struct tf_shadow_tbl_ctxt {
+ struct tf_shadow_tbl_shadow_ctxt shadow_ctxt;
+ struct tf_shadow_tbl_hash_ctxt hash_ctxt;
+};
+
+/**
+ * tf_shadow_tbl_db is the allocated db structure returned as an opaque
+ * void * pointer to the caller during create db. It holds the pointers for
+ * each table associated with the db.
*/
struct tf_shadow_tbl_db {
- /**
- * The DB consists of an array of elements
- */
- struct tf_shadow_tbl_element *db;
+ /* Each context holds the shadow and hash table information */
+ struct tf_shadow_tbl_ctxt *ctxt[TF_TBL_TYPE_MAX];
};
+/**
+ * Simple routine that decides what table types can be searchable.
+ *
+ */
+static int tf_shadow_tbl_is_searchable(enum tf_tbl_type type)
+{
+ int rc = 0;
+
+ switch (type) {
+ case TF_TBL_TYPE_ACT_ENCAP_8B:
+ case TF_TBL_TYPE_ACT_ENCAP_16B:
+ case TF_TBL_TYPE_ACT_ENCAP_32B:
+ case TF_TBL_TYPE_ACT_ENCAP_64B:
+ case TF_TBL_TYPE_ACT_SP_SMAC:
+ case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
+ case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
+ case TF_TBL_TYPE_ACT_MODIFY_IPV4:
+ case TF_TBL_TYPE_ACT_MODIFY_SPORT:
+ case TF_TBL_TYPE_ACT_MODIFY_DPORT:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ };
+
+ return rc;
+}
+
+/**
+ * Returns the number of entries in the contexts shadow table.
+ */
+static inline uint16_t
+tf_shadow_tbl_sh_num_entries_get(struct tf_shadow_tbl_ctxt *ctxt)
+{
+ return ctxt->shadow_ctxt.num_entries;
+}
+
+/**
+ * Compare the give key with the key in the shadow table.
+ *
+ * Returns 0 if the keys match
+ */
+static int
+tf_shadow_tbl_key_cmp(struct tf_shadow_tbl_ctxt *ctxt,
+ uint8_t *key,
+ uint16_t sh_idx,
+ uint16_t size)
+{
+ if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
+ sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt) || !key)
+ return -1;
+
+ return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
+}
+
+/**
+ * Free the memory associated with the context.
+ */
+static void
+tf_shadow_tbl_ctxt_delete(struct tf_shadow_tbl_ctxt *ctxt)
+{
+ if (!ctxt)
+ return;
+
+ tfp_free(ctxt->hash_ctxt.hashtbl);
+ tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
+ tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
+}
+
+/**
+ * The TF Shadow TBL context is per TBL and holds all information relating to
+ * managing the shadow and search capability. This routine allocated data that
+ * needs to be deallocated by the tf_shadow_tbl_ctxt_delete prior when deleting
+ * the shadow db.
+ */
+static int
+tf_shadow_tbl_ctxt_create(struct tf_shadow_tbl_ctxt *ctxt,
+ uint16_t num_entries,
+ uint16_t base_addr)
+{
+ struct tfp_calloc_parms cparms;
+ uint16_t hash_size = 1;
+ uint16_t hash_mask;
+ int rc;
+
+ /* Hash table is a power of two that holds the number of entries */
+ if (num_entries > TF_SHADOW_ENTRIES_MAX) {
+ TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
+ num_entries,
+ TF_SHADOW_ENTRIES_MAX);
+ return -ENOMEM;
+ }
+
+ while (hash_size < num_entries)
+ hash_size = hash_size << 1;
+
+ hash_mask = hash_size - 1;
+
+ /* Allocate the hash table */
+ cparms.nitems = hash_size;
+ cparms.size = sizeof(uint64_t);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->hash_ctxt.hashtbl = cparms.mem_va;
+ ctxt->hash_ctxt.hid_mask = hash_mask;
+ ctxt->hash_ctxt.hash_entries = hash_size;
+
+ /* allocate the shadow tables */
+ /* allocate the shadow key table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tbl_shadow_key_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
+
+ /* allocate the shadow result table */
+ cparms.nitems = num_entries;
+ cparms.size = sizeof(struct tf_shadow_tbl_shadow_result_entry);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+ ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
+
+ ctxt->shadow_ctxt.num_entries = num_entries;
+ ctxt->shadow_ctxt.base_addr = base_addr;
+
+ return 0;
+error:
+ tf_shadow_tbl_ctxt_delete(ctxt);
+
+ return -ENOMEM;
+}
+
+/**
+ * Get a shadow table context given the db and the table type
+ */
+static struct tf_shadow_tbl_ctxt *
+tf_shadow_tbl_ctxt_get(struct tf_shadow_tbl_db *shadow_db,
+ enum tf_tbl_type type)
+{
+ if (type >= TF_TBL_TYPE_MAX ||
+ !shadow_db ||
+ !shadow_db->ctxt[type])
+ return NULL;
+
+ return shadow_db->ctxt[type];
+}
+
+/**
+ * Sets the hash entry into the table given the table context, hash bucket
+ * handle, and shadow index.
+ */
+static inline int
+tf_shadow_tbl_set_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint32_t hb_handle,
+ uint16_t sh_idx)
+{
+ uint16_t hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ uint16_t be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
+ uint64_t entry = sh_idx | TF_SHADOW_BE_VALID;
+
+ if (hid >= ctxt->hash_ctxt.hash_entries)
+ return -EINVAL;
+
+ ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
+ return 0;
+}
+
+/**
+ * Clears the hash entry given the TBL context and hash bucket handle.
+ */
+static inline void
+tf_shadow_tbl_clear_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint32_t hb_handle)
+{
+ uint16_t hid, be;
+ uint64_t *bucket;
+
+ if (!TF_SHADOW_HB_HANDLE_IS_VALID(hb_handle))
+ return;
+
+ hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
+ be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
+ bucket = &ctxt->hash_ctxt.hashtbl[hid];
+
+ switch (be) {
+ case 0:
+ *bucket = TF_SHADOW_BE0_MASK_CLEAR(*bucket);
+ break;
+ case 1:
+ *bucket = TF_SHADOW_BE1_MASK_CLEAR(*bucket);
+ break;
+ case 2:
+ *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
+ break;
+ case 3:
+ *bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
+ break;
+ default:
+ /*
+ * Since the BE_GET masks non-inclusive bits, this will not
+ * happen.
+ */
+ break;
+ }
+}
+
+/**
+ * Clears the shadow key and result entries given the table context and
+ * shadow index.
+ */
+static void
+tf_shadow_tbl_clear_sh_entry(struct tf_shadow_tbl_ctxt *ctxt,
+ uint16_t sh_idx)
+{
+ struct tf_shadow_tbl_shadow_key_entry *sk_entry;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt))
+ return;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
+
+ /*
+ * memset key/result to zero for now, possibly leave the data alone
+ * in the future and rely on the valid bit in the hash table.
+ */
+ memset(sk_entry, 0, sizeof(struct tf_shadow_tbl_shadow_key_entry));
+ memset(sr_entry, 0, sizeof(struct tf_shadow_tbl_shadow_result_entry));
+}
+
+/**
+ * Binds the allocated tbl index with the hash and shadow tables.
+ * The entry will be incomplete until the set has happened with the result
+ * data.
+ */
int
-tf_shadow_tbl_create_db(struct tf_shadow_tbl_create_db_parms *parms __rte_unused)
+tf_shadow_tbl_bind_index(struct tf_shadow_tbl_bind_index_parms *parms)
{
+ int rc;
+ uint16_t idx, len;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_shadow_tbl_db *shadow_db;
+ struct tf_shadow_tbl_shadow_key_entry *sk_entry;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (!parms || !TF_SHADOW_HB_HANDLE_IS_VALID(parms->hb_handle) ||
+ !parms->data) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, parms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tbl_type_2_str(parms->type));
+ return -EINVAL;
+ }
+
+ idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, parms->idx);
+ len = parms->data_sz_in_bytes;
+ if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt) ||
+ len > TF_SHADOW_MAX_KEY_SZ) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
+ tf_dir_2_str(parms->dir),
+ tf_tbl_type_2_str(parms->type),
+ len,
+ TF_SHADOW_MAX_KEY_SZ, idx);
+
+ return -EINVAL;
+ }
+
+ rc = tf_shadow_tbl_set_hash_entry(ctxt, parms->hb_handle, idx);
+ if (rc)
+ return -EINVAL;
+
+ sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /* For tables, the data is the key */
+ memcpy(sk_entry->key, parms->data, len);
+
+ /* Write the result table */
+ sr_entry->key_size = len;
+ sr_entry->hb_handle = parms->hb_handle;
+ sr_entry->refcnt = 1;
+
return 0;
}
+/**
+ * Deletes hash/shadow information if no more references.
+ *
+ * Returns 0 - The caller should delete the table entry in hardware.
+ * Returns non-zero - The number of references to the entry
+ */
int
-tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms __rte_unused)
+tf_shadow_tbl_remove(struct tf_shadow_tbl_remove_parms *parms)
{
+ uint16_t idx;
+ uint32_t hb_handle;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_shadow_tbl_db *shadow_db;
+ struct tf_tbl_free_parms *fparms;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->fparms) {
+ TFP_DRV_LOG(ERR, "Invalid parms\n");
+ return -EINVAL;
+ }
+
+ fparms = parms->fparms;
+ if (!tf_shadow_tbl_is_searchable(fparms->type))
+ return 0;
+ /*
+ * Initialize the ref count to zero. The default would be to remove
+ * the entry.
+ */
+ fparms->ref_cnt = 0;
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, fparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
+ tf_tbl_type_2_str(fparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, fparms->idx);
+ if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
+ tf_tbl_type_2_str(fparms->type),
+ fparms->idx,
+ tf_shadow_tbl_sh_num_entries_get(ctxt));
+ return 0;
+ }
+
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+ if (sr_entry->refcnt <= 1) {
+ hb_handle = sr_entry->hb_handle;
+ tf_shadow_tbl_clear_hash_entry(ctxt, hb_handle);
+ tf_shadow_tbl_clear_sh_entry(ctxt, idx);
+ } else {
+ sr_entry->refcnt--;
+ fparms->ref_cnt = sr_entry->refcnt;
+ }
+
return 0;
}
int
-tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms __rte_unused)
+tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms)
{
+ uint16_t len;
+ uint64_t bucket;
+ uint32_t i, hid32;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_shadow_tbl_db *shadow_db;
+ uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
+ struct tf_tbl_alloc_search_parms *sparms;
+ uint32_t be_avail = TF_SHADOW_HB_NUM_ELEM;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "tbl search with invalid parms\n");
+ return -EINVAL;
+ }
+
+ sparms = parms->sparms;
+ /* Check that caller was supposed to call search */
+ if (!tf_shadow_tbl_is_searchable(sparms->type))
+ return -EINVAL;
+
+ /* Initialize return values to invalid */
+ sparms->hit = 0;
+ sparms->search_status = REJECT;
+ parms->hb_handle = 0;
+ sparms->ref_cnt = 0;
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ TFP_DRV_LOG(ERR, "%s Unable to get tbl mgr context\n",
+ tf_tbl_type_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ len = sparms->result_sz_in_bytes;
+ if (len > TF_SHADOW_MAX_KEY_SZ || !sparms->result || !len) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tbl_type_2_str(sparms->type),
+ len,
+ sparms->result);
+ return -EINVAL;
+ }
+
+ /*
+ * Calculate the crc32
+ * Fold it to create a 16b value
+ * Reduce it to fit the table
+ */
+ hid32 = tf_hash_calc_crc32(sparms->result, len);
+ hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
+ hid_mask = ctxt->hash_ctxt.hid_mask;
+ hb_idx = hid16 & hid_mask;
+
+ bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
+ if (!bucket) {
+ /* empty bucket means a miss and available entry */
+ sparms->search_status = MISS;
+ parms->hb_handle = TF_SHADOW_HB_HANDLE_CREATE(hb_idx, 0);
+ sparms->idx = 0;
+ return 0;
+ }
+
+ /* Set the avail to max so we can detect when there is an avail entry */
+ be_avail = TF_SHADOW_HB_NUM_ELEM;
+ for (i = 0; i < TF_SHADOW_HB_NUM_ELEM; i++) {
+ shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
+ be_valid = TF_SHADOW_BE_IS_VALID(shtbl_idx);
+ if (!be_valid) {
+ /* The element is avail, keep going */
+ be_avail = i;
+ continue;
+ }
+ /* There is a valid entry, compare it */
+ shtbl_key = shtbl_idx & ~TF_SHADOW_BE_VALID;
+ if (!tf_shadow_tbl_key_cmp(ctxt,
+ sparms->result,
+ shtbl_key,
+ len)) {
+ /*
+ * It matches, increment the ref count if the caller
+ * requested allocation and return the info
+ */
+ if (sparms->alloc)
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt + 1;
+
+ sparms->hit = 1;
+ sparms->search_status = HIT;
+ parms->hb_handle =
+ TF_SHADOW_HB_HANDLE_CREATE(hb_idx, i);
+ sparms->idx = TF_SHADOW_SHIDX_TO_IDX(ctxt, shtbl_key);
+ sparms->ref_cnt =
+ ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
+
+ return 0;
+ }
+ }
+
+ /* No hits, return avail entry if exists */
+ if (be_avail < TF_SHADOW_HB_NUM_ELEM) {
+ /*
+ * There is an available hash entry, so return MISS and the
+ * hash handle for the subsequent bind.
+ */
+ parms->hb_handle = TF_SHADOW_HB_HANDLE_CREATE(hb_idx, be_avail);
+ sparms->search_status = MISS;
+ sparms->hit = 0;
+ sparms->idx = 0;
+ } else {
+ /* No room for the entry in the hash table, must REJECT */
+ sparms->search_status = REJECT;
+ }
+
return 0;
}
int
-tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms __rte_unused)
+tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms)
{
+ uint16_t idx;
+ struct tf_shadow_tbl_ctxt *ctxt;
+ struct tf_tbl_set_parms *sparms;
+ struct tf_shadow_tbl_db *shadow_db;
+ struct tf_shadow_tbl_shadow_result_entry *sr_entry;
+
+ if (!parms || !parms->sparms) {
+ TFP_DRV_LOG(ERR, "Null parms\n");
+ return -EINVAL;
+ }
+
+ sparms = parms->sparms;
+ if (!sparms->data || !sparms->data_sz_in_bytes) {
+ TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tbl_type_2_str(sparms->type));
+ return -EINVAL;
+ }
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
+ if (!ctxt) {
+ /* We aren't tracking this table, so return success */
+ TFP_DRV_LOG(DEBUG, "%s Unable to get tbl mgr context\n",
+ tf_tbl_type_2_str(sparms->type));
+ return 0;
+ }
+
+ idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, sparms->idx);
+ if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
+ TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
+ tf_dir_2_str(sparms->dir),
+ tf_tbl_type_2_str(sparms->type),
+ sparms->idx);
+ return -EINVAL;
+ }
+
+ /* Write the result table, the key/hash has been written already */
+ sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
+
+ /*
+ * If the handle is not valid, the bind was never called. We aren't
+ * tracking this entry.
+ */
+ if (!TF_SHADOW_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
+ return 0;
+
+ sr_entry->refcnt = 1;
+
return 0;
}
int
-tf_shadow_tbl_remove(struct tf_shadow_tbl_remove_parms *parms __rte_unused)
+tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms)
{
+ struct tf_shadow_tbl_db *shadow_db;
+ int i;
+
+ TF_CHECK_PARMS1(parms);
+
+ shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
+ if (!shadow_db) {
+ TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tbl_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
return 0;
}
+
+/**
+ * Allocate the table resources for search and allocate
+ *
+ */
+int tf_shadow_tbl_create_db(struct tf_shadow_tbl_create_db_parms *parms)
+{
+ int rc;
+ int i;
+ uint16_t base;
+ struct tfp_calloc_parms cparms;
+ struct tf_shadow_tbl_db *shadow_db = NULL;
+
+ TF_CHECK_PARMS1(parms);
+
+ /* Build the shadow DB per the request */
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tbl_db);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ return rc;
+ shadow_db = (void *)cparms.mem_va;
+
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ /* If the element didn't request an allocation no need
+ * to create a pool nor verify if we got a reservation.
+ */
+ if (!parms->cfg->alloc_cnt[i] ||
+ !tf_shadow_tbl_is_searchable(i)) {
+ shadow_db->ctxt[i] = NULL;
+ continue;
+ }
+
+ cparms.nitems = 1;
+ cparms.size = sizeof(struct tf_shadow_tbl_ctxt);
+ cparms.alignment = 0;
+ rc = tfp_calloc(&cparms);
+ if (rc)
+ goto error;
+
+ shadow_db->ctxt[i] = cparms.mem_va;
+ base = parms->cfg->base_addr[i];
+ rc = tf_shadow_tbl_ctxt_create(shadow_db->ctxt[i],
+ parms->cfg->alloc_cnt[i],
+ base);
+ if (rc)
+ goto error;
+ }
+
+ *parms->shadow_db = (void *)shadow_db;
+
+ TFP_DRV_LOG(INFO,
+ "TF SHADOW TABLE - initialized\n");
+
+ return 0;
+error:
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ if (shadow_db->ctxt[i]) {
+ tf_shadow_tbl_ctxt_delete(shadow_db->ctxt[i]);
+ tfp_free(shadow_db->ctxt[i]);
+ }
+ }
+
+ tfp_free(shadow_db);
+
+ return -ENOMEM;
+}
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h b/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
index dfd336e53..e73381f25 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
@@ -8,8 +8,6 @@
#include "tf_core.h"
-struct tf;
-
/**
* The Shadow Table module provides shadow DB handling for table based
* TF types. A shadow DB provides the capability that allows for reuse
@@ -32,19 +30,22 @@ struct tf;
*/
struct tf_shadow_tbl_cfg_parms {
/**
- * TF Table type
+ * [in] The number of elements in the alloc_cnt and base_addr
+ * For now, it should always be equal to TF_TBL_TYPE_MAX
*/
- enum tf_tbl_type type;
+ int num_entries;
/**
- * Number of entries the Shadow DB needs to hold
+ * [in] Resource allocation count array
+ * This array content originates from the tf_session_resources
+ * that is passed in on session open
+ * Array size is TF_TBL_TYPE_MAX
*/
- int num_entries;
-
+ uint16_t *alloc_cnt;
/**
- * Element width for this table type
+ * [in] The base index for each table
*/
- int element_width;
+ uint16_t base_addr[TF_TBL_TYPE_MAX];
};
/**
@@ -52,17 +53,17 @@ struct tf_shadow_tbl_cfg_parms {
*/
struct tf_shadow_tbl_create_db_parms {
/**
- * [in] Configuration information for the shadow db
+ * [in] Receive or transmit direction
*/
- struct tf_shadow_tbl_cfg_parms *cfg;
+ enum tf_dir dir;
/**
- * [in] Number of elements in the parms structure
+ * [in] Configuration information for the shadow db
*/
- uint16_t num_elements;
+ struct tf_shadow_tbl_cfg_parms *cfg;
/**
* [out] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void **shadow_db;
};
/**
@@ -70,9 +71,9 @@ struct tf_shadow_tbl_create_db_parms {
*/
struct tf_shadow_tbl_free_db_parms {
/**
- * Shadow table DB handle
+ * [in] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
};
/**
@@ -82,79 +83,77 @@ struct tf_shadow_tbl_search_parms {
/**
* [in] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
/**
- * [in] Table type
+ * [inout] The search parms from tf core
*/
- enum tf_tbl_type type;
- /**
- * [in] Pointer to entry blob value in remap table to match
- */
- uint8_t *entry;
- /**
- * [in] Size of the entry blob passed in bytes
- */
- uint16_t entry_sz;
- /**
- * [out] Index of the found element returned if hit
- */
- uint16_t *index;
+ struct tf_tbl_alloc_search_parms *sparms;
/**
* [out] Reference count incremented if hit
*/
- uint16_t *ref_cnt;
+ uint32_t hb_handle;
};
/**
- * Shadow table insert parameters
+ * Shadow Table bind index parameters
*/
-struct tf_shadow_tbl_insert_parms {
+struct tf_shadow_tbl_bind_index_parms {
/**
- * [in] Shadow table DB handle
+ * [in] Shadow tcam DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
/**
- * [in] Tbl type
+ * [in] receive or transmit direction
+ */
+ enum tf_dir dir;
+ /**
+ * [in] TCAM table type
*/
enum tf_tbl_type type;
/**
- * [in] Pointer to entry blob value in remap table to match
+ * [in] index of the entry to program
*/
- uint8_t *entry;
+ uint16_t idx;
/**
- * [in] Size of the entry blob passed in bytes
+ * [in] struct containing key
*/
- uint16_t entry_sz;
+ uint8_t *data;
/**
- * [in] Entry to update
+ * [in] data size in bytes
*/
- uint16_t index;
+ uint16_t data_sz_in_bytes;
/**
- * [out] Reference count after insert
+ * [in] The hash bucket handled returned from the search
*/
- uint16_t *ref_cnt;
+ uint32_t hb_handle;
};
/**
- * Shadow table remove parameters
+ * Shadow table insert parameters
*/
-struct tf_shadow_tbl_remove_parms {
+struct tf_shadow_tbl_insert_parms {
/**
* [in] Shadow table DB handle
*/
- void *tf_shadow_tbl_db;
+ void *shadow_db;
/**
- * [in] Tbl type
+ * [in] The insert parms from tf core
*/
- enum tf_tbl_type type;
+ struct tf_tbl_set_parms *sparms;
+};
+
+/**
+ * Shadow table remove parameters
+ */
+struct tf_shadow_tbl_remove_parms {
/**
- * [in] Entry to update
+ * [in] Shadow table DB handle
*/
- uint16_t index;
+ void *shadow_db;
/**
- * [out] Reference count after removal
+ * [in] The free parms from tf core
*/
- uint16_t *ref_cnt;
+ struct tf_tbl_free_parms *fparms;
};
/**
@@ -206,9 +205,26 @@ int tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms);
* Returns
* - (0) if successful, element was found.
* - (-EINVAL) on failure.
+ *
+ * If there is a miss, but there is room for insertion, the hb_handle returned
+ * is used for insertion during the bind index API
*/
int tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms);
+/**
+ * Bind Shadow table db hash and result tables with result from search/alloc
+ *
+ * [in] parms
+ * Pointer to the search parameters
+ *
+ * Returns
+ * - (0) if successful
+ * - (-EINVAL) on failure.
+ *
+ * This is only called after a MISS in the search returns a hb_handle
+ */
+int tf_shadow_tbl_bind_index(struct tf_shadow_tbl_bind_index_parms *parms);
+
/**
* Inserts an element into the Shadow table DB. Will fail if the
* elements ref_count is different from 0. Ref_count after insert will
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index beaea0340..a0130d6a8 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -373,6 +373,12 @@ tf_shadow_tcam_clear_hash_entry(struct tf_shadow_tcam_ctxt *ctxt,
case 3:
*bucket = TF_SHADOW_TCAM_BE2_MASK_CLEAR(*bucket);
break;
+ default:
+ /*
+ * Since the BE_GET masks non-inclusive bits, this will not
+ * happen.
+ */
+ break;
}
}
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.c b/drivers/net/bnxt/tf_core/tf_tbl.c
index 9ebaa34e4..bec52105e 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_tbl.c
@@ -13,6 +13,9 @@
#include "tf_util.h"
#include "tf_msg.h"
#include "tfp.h"
+#include "tf_shadow_tbl.h"
+#include "tf_session.h"
+#include "tf_device.h"
struct tf;
@@ -25,7 +28,7 @@ static void *tbl_db[TF_DIR_MAX];
/**
* Table Shadow DBs
*/
-/* static void *shadow_tbl_db[TF_DIR_MAX]; */
+static void *shadow_tbl_db[TF_DIR_MAX];
/**
* Init flag, set on bind and cleared on unbind
@@ -35,14 +38,19 @@ static uint8_t init;
/**
* Shadow init flag, set on bind and cleared on unbind
*/
-/* static uint8_t shadow_init; */
+static uint8_t shadow_init;
int
tf_tbl_bind(struct tf *tfp,
struct tf_tbl_cfg_parms *parms)
{
- int rc;
- int i;
+ int rc, d, i;
+ struct tf_rm_alloc_info info;
+ struct tf_rm_free_db_parms fparms;
+ struct tf_shadow_tbl_free_db_parms fshadow;
+ struct tf_rm_get_alloc_info_parms ainfo;
+ struct tf_shadow_tbl_cfg_parms shadow_cfg;
+ struct tf_shadow_tbl_create_db_parms shadow_cdb;
struct tf_rm_create_db_parms db_cfg = { 0 };
TF_CHECK_PARMS2(tfp, parms);
@@ -58,26 +66,86 @@ tf_tbl_bind(struct tf *tfp,
db_cfg.num_elements = parms->num_elements;
db_cfg.cfg = parms->cfg;
- for (i = 0; i < TF_DIR_MAX; i++) {
- db_cfg.dir = i;
- db_cfg.alloc_cnt = parms->resources->tbl_cnt[i].cnt;
- db_cfg.rm_db = &tbl_db[i];
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ db_cfg.dir = d;
+ db_cfg.alloc_cnt = parms->resources->tbl_cnt[d].cnt;
+ db_cfg.rm_db = &tbl_db[d];
rc = tf_rm_create_db(tfp, &db_cfg);
if (rc) {
TFP_DRV_LOG(ERR,
"%s: Table DB creation failed\n",
- tf_dir_2_str(i));
+ tf_dir_2_str(d));
return rc;
}
}
+ /* Initialize the Shadow Table. */
+ if (parms->shadow_copy) {
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ memset(&shadow_cfg, 0, sizeof(shadow_cfg));
+ memset(&shadow_cdb, 0, sizeof(shadow_cdb));
+ /* Get the base addresses of the tables */
+ for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
+ memset(&info, 0, sizeof(info));
+
+ if (!parms->resources->tbl_cnt[d].cnt[i])
+ continue;
+ ainfo.rm_db = tbl_db[d];
+ ainfo.db_index = i;
+ ainfo.info = &info;
+ rc = tf_rm_get_info(&ainfo);
+ if (rc)
+ goto error;
+
+ shadow_cfg.base_addr[i] = info.entry.start;
+ }
+
+ /* Create the shadow db */
+ shadow_cfg.alloc_cnt =
+ parms->resources->tbl_cnt[d].cnt;
+ shadow_cfg.num_entries = parms->num_elements;
+
+ shadow_cdb.shadow_db = &shadow_tbl_db[d];
+ shadow_cdb.cfg = &shadow_cfg;
+ rc = tf_shadow_tbl_create_db(&shadow_cdb);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "Shadow TBL DB creation failed "
+ "rc=%d\n", rc);
+ goto error;
+ }
+ }
+ shadow_init = 1;
+ }
+
init = 1;
TFP_DRV_LOG(INFO,
"Table Type - initialized\n");
return 0;
+error:
+ for (d = 0; d < TF_DIR_MAX; d++) {
+ memset(&fparms, 0, sizeof(fparms));
+ fparms.dir = d;
+ fparms.rm_db = tbl_db[d];
+ /* Ignoring return here since we are in the error case */
+ (void)tf_rm_free_db(tfp, &fparms);
+
+ if (parms->shadow_copy) {
+ fshadow.shadow_db = shadow_tbl_db[d];
+ tf_shadow_tbl_free_db(&fshadow);
+ shadow_tbl_db[d] = NULL;
+ }
+
+ tbl_db[d] = NULL;
+ }
+
+ shadow_init = 0;
+ init = 0;
+
+ return rc;
}
int
@@ -86,6 +154,7 @@ tf_tbl_unbind(struct tf *tfp)
int rc;
int i;
struct tf_rm_free_db_parms fparms = { 0 };
+ struct tf_shadow_tbl_free_db_parms fshadow;
TF_CHECK_PARMS1(tfp);
@@ -104,9 +173,17 @@ tf_tbl_unbind(struct tf *tfp)
return rc;
tbl_db[i] = NULL;
+
+ if (shadow_init) {
+ memset(&fshadow, 0, sizeof(fshadow));
+ fshadow.shadow_db = shadow_tbl_db[i];
+ tf_shadow_tbl_free_db(&fshadow);
+ shadow_tbl_db[i] = NULL;
+ }
}
init = 0;
+ shadow_init = 0;
return 0;
}
@@ -153,6 +230,7 @@ tf_tbl_free(struct tf *tfp __rte_unused,
int rc;
struct tf_rm_is_allocated_parms aparms = { 0 };
struct tf_rm_free_parms fparms = { 0 };
+ struct tf_shadow_tbl_remove_parms shparms;
int allocated = 0;
TF_CHECK_PARMS2(tfp, parms);
@@ -182,6 +260,36 @@ tf_tbl_free(struct tf *tfp __rte_unused,
return -EINVAL;
}
+ /*
+ * The Shadow mgmt, if enabled, determines if the entry needs
+ * to be deleted.
+ */
+ if (shadow_init) {
+ memset(&shparms, 0, sizeof(shparms));
+ shparms.shadow_db = shadow_tbl_db[parms->dir];
+ shparms.fparms = parms;
+ rc = tf_shadow_tbl_remove(&shparms);
+ if (rc) {
+ /*
+ * Should not get here, log it and let the entry be
+ * deleted.
+ */
+ TFP_DRV_LOG(ERR, "%s: Shadow free fail, "
+ "type:%d index:%d deleting the entry.\n",
+ tf_dir_2_str(parms->dir),
+ parms->type,
+ parms->idx);
+ } else {
+ /*
+ * If the entry still has references, just return the
+ * ref count to the caller. No need to remove entry
+ * from rm.
+ */
+ if (parms->ref_cnt >= 1)
+ return rc;
+ }
+ }
+
/* Free requested element */
fparms.rm_db = tbl_db[parms->dir];
fparms.db_index = parms->type;
@@ -200,10 +308,124 @@ tf_tbl_free(struct tf *tfp __rte_unused,
}
int
-tf_tbl_alloc_search(struct tf *tfp __rte_unused,
- struct tf_tbl_alloc_search_parms *parms __rte_unused)
+tf_tbl_alloc_search(struct tf *tfp,
+ struct tf_tbl_alloc_search_parms *parms)
{
- return 0;
+ int rc, frc;
+ uint32_t idx;
+ struct tf_session *tfs;
+ struct tf_dev_info *dev;
+ struct tf_tbl_alloc_parms aparms;
+ struct tf_shadow_tbl_search_parms sparms;
+ struct tf_shadow_tbl_bind_index_parms bparms;
+ struct tf_tbl_free_parms fparms;
+
+ TF_CHECK_PARMS2(tfp, parms);
+
+ if (!shadow_init || !shadow_tbl_db[parms->dir]) {
+ TFP_DRV_LOG(ERR, "%s: Shadow TBL not initialized.\n",
+ tf_dir_2_str(parms->dir));
+ return -EINVAL;
+ }
+
+ memset(&sparms, 0, sizeof(sparms));
+ sparms.sparms = parms;
+ sparms.shadow_db = shadow_tbl_db[parms->dir];
+ rc = tf_shadow_tbl_search(&sparms);
+ if (rc)
+ return rc;
+
+ /*
+ * The app didn't request us to alloc the entry, so return now.
+ * The hit should have been updated in the original search parm.
+ */
+ if (!parms->alloc || parms->search_status != MISS)
+ return rc;
+
+ /* Retrieve the session information */
+ rc = tf_session_get_session(tfp, &tfs);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup session, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Retrieve the device information */
+ rc = tf_session_get_device(tfs, &dev);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed to lookup device, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Allocate the index */
+ if (dev->ops->tf_dev_alloc_tbl == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return -EOPNOTSUPP;
+ }
+
+ memset(&aparms, 0, sizeof(aparms));
+ aparms.dir = parms->dir;
+ aparms.type = parms->type;
+ aparms.tbl_scope_id = parms->tbl_scope_id;
+ aparms.idx = &idx;
+ rc = dev->ops->tf_dev_alloc_tbl(tfp, &aparms);
+ if (rc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Table allocation failed, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ /* Bind the allocated index to the data */
+ memset(&bparms, 0, sizeof(bparms));
+ bparms.shadow_db = shadow_tbl_db[parms->dir];
+ bparms.dir = parms->dir;
+ bparms.type = parms->type;
+ bparms.idx = idx;
+ bparms.data = parms->result;
+ bparms.data_sz_in_bytes = parms->result_sz_in_bytes;
+ bparms.hb_handle = sparms.hb_handle;
+ rc = tf_shadow_tbl_bind_index(&bparms);
+ if (rc) {
+ /* Error binding entry, need to free the allocated idx */
+ if (dev->ops->tf_dev_free_tbl == NULL) {
+ rc = -EOPNOTSUPP;
+ TFP_DRV_LOG(ERR,
+ "%s: Operation not supported, rc:%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-rc));
+ return rc;
+ }
+
+ memset(&fparms, 0, sizeof(fparms));
+ fparms.dir = parms->dir;
+ fparms.type = parms->type;
+ fparms.idx = idx;
+ frc = dev->ops->tf_dev_free_tbl(tfp, &fparms);
+ if (frc) {
+ TFP_DRV_LOG(ERR,
+ "%s: Failed free index allocated during "
+ "search. rc=%s\n",
+ tf_dir_2_str(parms->dir),
+ strerror(-frc));
+ /* return the original failure. */
+ return rc;
+ }
+ }
+
+ parms->idx = idx;
+
+ return rc;
}
int
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.h b/drivers/net/bnxt/tf_core/tf_tbl.h
index f20e8d729..930fcc324 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl.h
+++ b/drivers/net/bnxt/tf_core/tf_tbl.h
@@ -144,29 +144,31 @@ struct tf_tbl_alloc_search_parms {
*/
uint32_t tbl_scope_id;
/**
- * [in] Enable search for matching entry. If the table type is
- * internal the shadow copy will be searched before
- * alloc. Session must be configured with shadow copy enabled.
- */
- uint8_t search_enable;
- /**
- * [in] Result data to search for (if search_enable)
+ * [in] Result data to search for
*/
uint8_t *result;
/**
- * [in] Result data size in bytes (if search_enable)
+ * [in] Result data size in bytes
*/
uint16_t result_sz_in_bytes;
+ /**
+ * [in] Whether or not to allocate on MISS, 1 is allocate.
+ */
+ uint8_t alloc;
/**
* [out] If search_enable, set if matching entry found
*/
uint8_t hit;
/**
- * [out] Current ref count after allocation (if search_enable)
+ * [out] The status of the search (REJECT, MISS, HIT)
+ */
+ enum tf_search_status search_status;
+ /**
+ * [out] Current ref count after allocation
*/
uint16_t ref_cnt;
/**
- * [out] Idx of allocated entry or found entry (if search_enable)
+ * [out] Idx of allocated entry or found entry
*/
uint32_t idx;
};
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.h b/drivers/net/bnxt/tf_core/tf_tcam.h
index 563b08c23..280f138dd 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.h
+++ b/drivers/net/bnxt/tf_core/tf_tcam.h
@@ -134,7 +134,7 @@ struct tf_tcam_alloc_search_parms {
/**
* [out] Search result status (hit, miss, reject)
*/
- enum tf_tcam_search_status search_status;
+ enum tf_search_status search_status;
/**
* [out] Current refcnt after allocation
*/
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 13/22] net/bnxt: modify ulp mapper to use tbl search
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (11 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 12/22] net/bnxt: add shadow table capability with search Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-27 10:36 ` Ferruh Yigit
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 14/22] net/bnxt: fix port default rule create and destroy Ajit Khaparde
` (9 subsequent siblings)
22 siblings, 1 reply; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Mike Baucom, Kishore Padmanabha
From: Mike Baucom <michael.baucom@broadcom.com>
modified ulp mappper to use the new tf_search_tbl_entry API.
When search before allocation is requested, mapper calls
tc_search_tbl_entry with the alloc flag.
- On HIT, the result and table index is returned.
- On MISS, the table index is returned but the result is
created and the table entry is set.
- On REJECT, the flow request is rejected.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 75 +++++++++++++++++++---------
1 file changed, 51 insertions(+), 24 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index a071c0750..4dee65971 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -1764,9 +1764,10 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct ulp_blob data;
uint64_t idx = 0;
uint16_t tmplen;
- uint32_t i, num_flds;
+ uint32_t i, num_flds, index, hit;
int32_t rc = 0, trc = 0;
struct tf_alloc_tbl_entry_parms aparms = { 0 };
+ struct tf_search_tbl_entry_parms srchparms = { 0 };
struct tf_set_tbl_entry_parms sparms = { 0 };
struct tf_free_tbl_entry_parms free_parms = { 0 };
uint32_t tbl_scope_id;
@@ -1868,33 +1869,59 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
return 0; /* success */
}
+ index = 0;
+ hit = 0;
/* Perform the tf table allocation by filling the alloc params */
- aparms.dir = tbl->direction;
- aparms.type = tbl->resource_type;
- aparms.search_enable = tbl->srch_b4_alloc;
- aparms.result = ulp_blob_data_get(&data, &tmplen);
- aparms.result_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
- aparms.tbl_scope_id = tbl_scope_id;
-
- /* All failures after the alloc succeeds require a free */
- rc = tf_alloc_tbl_entry(tfp, &aparms);
- if (rc) {
- BNXT_TF_DBG(ERR, "Alloc table[%d][%s] failed rc=%d\n",
- aparms.type,
- (aparms.dir == TF_DIR_RX) ? "RX" : "TX",
- rc);
- return rc;
- }
+ if (tbl->srch_b4_alloc) {
+ memset(&srchparms, 0, sizeof(srchparms));
+ srchparms.dir = tbl->direction;
+ srchparms.type = tbl->resource_type;
+ srchparms.alloc = 1;
+ srchparms.result = ulp_blob_data_get(&data, &tmplen);
+ srchparms.result_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
+ srchparms.tbl_scope_id = tbl_scope_id;
+ rc = tf_search_tbl_entry(tfp, &srchparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Alloc table[%s][%s] failed rc=%d\n",
+ tf_tbl_type_2_str(tbl->resource_type),
+ tf_dir_2_str(tbl->direction), rc);
+ return rc;
+ }
+ if (srchparms.search_status == REJECT) {
+ BNXT_TF_DBG(ERR, "Alloc table[%s][%s] rejected.\n",
+ tf_tbl_type_2_str(tbl->resource_type),
+ tf_dir_2_str(tbl->direction));
+ return -ENOMEM;
+ }
+ index = srchparms.idx;
+ hit = srchparms.hit;
+ } else {
+ aparms.dir = tbl->direction;
+ aparms.type = tbl->resource_type;
+ aparms.search_enable = tbl->srch_b4_alloc;
+ aparms.result = ulp_blob_data_get(&data, &tmplen);
+ aparms.result_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
+ aparms.tbl_scope_id = tbl_scope_id;
+ /* All failures after the alloc succeeds require a free */
+ rc = tf_alloc_tbl_entry(tfp, &aparms);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Alloc table[%s][%s] failed rc=%d\n",
+ tf_tbl_type_2_str(tbl->resource_type),
+ tf_dir_2_str(tbl->direction), rc);
+ return rc;
+ }
+ index = aparms.idx;
+ }
/*
* calculate the idx for the result record, for external EM the offset
* needs to be shifted accordingly. If external non-inline table types
* are used then need to revisit this logic.
*/
- if (aparms.type == TF_TBL_TYPE_EXT)
- idx = TF_ACT_REC_OFFSET_2_PTR(aparms.idx);
+ if (tbl->resource_type == TF_TBL_TYPE_EXT)
+ idx = TF_ACT_REC_OFFSET_2_PTR(index);
else
- idx = aparms.idx;
+ idx = index;
/* Always storing values in Regfile in BE */
idx = tfp_cpu_to_be_64(idx);
@@ -1908,12 +1935,12 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
/* Perform the tf table set by filling the set params */
- if (!tbl->srch_b4_alloc || !aparms.hit) {
+ if (!tbl->srch_b4_alloc || !hit) {
sparms.dir = tbl->direction;
sparms.type = tbl->resource_type;
sparms.data = ulp_blob_data_get(&data, &tmplen);
sparms.data_sz_in_bytes = ULP_BITS_2_BYTE(tmplen);
- sparms.idx = aparms.idx;
+ sparms.idx = index;
sparms.tbl_scope_id = tbl_scope_id;
rc = tf_set_tbl_entry(tfp, &sparms);
@@ -1933,7 +1960,7 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
fid_parms.resource_func = tbl->resource_func;
fid_parms.resource_type = tbl->resource_type;
fid_parms.resource_sub_type = tbl->resource_sub_type;
- fid_parms.resource_hndl = aparms.idx;
+ fid_parms.resource_hndl = index;
fid_parms.critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO;
rc = ulp_flow_db_resource_add(parms->ulp_ctx,
@@ -1960,7 +1987,7 @@ ulp_mapper_index_tbl_process(struct bnxt_ulp_mapper_parms *parms,
*/
free_parms.dir = tbl->direction;
free_parms.type = tbl->resource_type;
- free_parms.idx = aparms.idx;
+ free_parms.idx = index;
free_parms.tbl_scope_id = tbl_scope_id;
trc = tf_free_tbl_entry(tfp, &free_parms);
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* Re: [dpdk-dev] [PATCH v3 13/22] net/bnxt: modify ulp mapper to use tbl search
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 13/22] net/bnxt: modify ulp mapper to use tbl search Ajit Khaparde
@ 2020-07-27 10:36 ` Ferruh Yigit
2020-07-27 10:50 ` Somnath Kotur
0 siblings, 1 reply; 102+ messages in thread
From: Ferruh Yigit @ 2020-07-27 10:36 UTC (permalink / raw)
To: Ajit Khaparde, dev; +Cc: Mike Baucom, Kishore Padmanabha
On 7/24/2020 6:32 AM, Ajit Khaparde wrote:
> From: Mike Baucom <michael.baucom@broadcom.com>
>
> modified ulp mappper to use the new tf_search_tbl_entry API.
> When search before allocation is requested, mapper calls
> tc_search_tbl_entry with the alloc flag.
>
> - On HIT, the result and table index is returned.
> - On MISS, the table index is returned but the result is
> created and the table entry is set.
> - On REJECT, the flow request is rejected.
>
> Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
> Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
> Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Is 'tbl' in patch title, short for 'table'?
^ permalink raw reply [flat|nested] 102+ messages in thread
* Re: [dpdk-dev] [PATCH v3 13/22] net/bnxt: modify ulp mapper to use tbl search
2020-07-27 10:36 ` Ferruh Yigit
@ 2020-07-27 10:50 ` Somnath Kotur
0 siblings, 0 replies; 102+ messages in thread
From: Somnath Kotur @ 2020-07-27 10:50 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: Ajit Khaparde, dev, Mike Baucom, Kishore Padmanabha
On Mon, Jul 27, 2020 at 4:06 PM Ferruh Yigit <ferruh.yigit@intel.com> wrote:
>
> On 7/24/2020 6:32 AM, Ajit Khaparde wrote:
> > From: Mike Baucom <michael.baucom@broadcom.com>
> >
> > modified ulp mappper to use the new tf_search_tbl_entry API.
> > When search before allocation is requested, mapper calls
> > tc_search_tbl_entry with the alloc flag.
> >
> > - On HIT, the result and table index is returned.
> > - On MISS, the table index is returned but the result is
> > created and the table entry is set.
> > - On REJECT, the flow request is rejected.
> >
> > Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
> > Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
> > Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
>
> Is 'tbl' in patch title, short for 'table'?
>
I believe your guess is right Ferruh
Thanks
Som
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 14/22] net/bnxt: fix port default rule create and destroy
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (12 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 13/22] net/bnxt: modify ulp mapper to use tbl search Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 15/22] net/bnxt: delete VF FW rules on representor create Ajit Khaparde
` (8 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Venkat Duvvuru, Somnath Kotur, Kishore Padmanabha
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Currently, the flow_ids of port_to_app/app_to_port & tx_cfa_action
for the first port are getting over-written by the second port because
these fields are stored in the ulp context which is common across the
ports.
This patch fixes the problem by having per port structure to store these
fields.
Fixes: 9f702636d7ba ("net/bnxt: add port default rules for ingress and egress")
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/bnxt.h | 5 +-
drivers/net/bnxt/bnxt_ethdev.c | 81 +----------------
drivers/net/bnxt/bnxt_txq.h | 1 -
drivers/net/bnxt/bnxt_txr.c | 6 +-
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 12 ++-
drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 14 ++-
drivers/net/bnxt/tf_ulp/ulp_def_rules.c | 116 ++++++++++++++++++++++++
drivers/net/bnxt/tf_ulp/ulp_flow_db.c | 2 +-
drivers/net/bnxt/tf_ulp/ulp_flow_db.h | 2 +-
9 files changed, 151 insertions(+), 88 deletions(-)
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 50f93ff5b..41e7ae5bd 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -784,6 +784,7 @@ struct bnxt {
struct bnxt_flow_stat_info *flow_stat;
uint8_t flow_xstat;
uint16_t max_num_kflows;
+ uint16_t tx_cfa_action;
};
#define BNXT_FC_TIMER 1 /* Timer freq in Sec Flow Counters */
@@ -797,7 +798,7 @@ struct bnxt_vf_representor {
uint16_t fw_fid;
uint16_t dflt_vnic_id;
uint16_t svif;
- uint32_t vfr_tx_cfa_action;
+ uint16_t vfr_tx_cfa_action;
uint16_t rx_cfa_code;
uint32_t rep2vf_flow_id;
uint32_t vf2rep_flow_id;
@@ -872,6 +873,8 @@ extern int bnxt_logtype_driver;
extern const struct rte_flow_ops bnxt_ulp_rte_flow_ops;
int32_t bnxt_ulp_init(struct bnxt *bp);
void bnxt_ulp_deinit(struct bnxt *bp);
+int32_t bnxt_ulp_create_df_rules(struct bnxt *bp);
+void bnxt_ulp_destroy_df_rules(struct bnxt *bp, bool global);
uint16_t bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type);
uint16_t bnxt_get_svif(uint16_t port_id, bool func_svif,
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 0829493ea..dfc4b4190 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1168,73 +1168,6 @@ static int bnxt_handle_if_change_status(struct bnxt *bp)
return rc;
}
-static int32_t
-bnxt_create_port_app_df_rule(struct bnxt *bp, uint8_t flow_type,
- uint32_t *flow_id)
-{
- uint16_t port_id = bp->eth_dev->data->port_id;
- struct ulp_tlv_param param_list[] = {
- {
- .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
- .length = 2,
- .value = {(port_id >> 8) & 0xff, port_id & 0xff}
- },
- {
- .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
- .length = 0,
- .value = {0}
- }
- };
-
- return ulp_default_flow_create(bp->eth_dev, param_list, flow_type,
- flow_id);
-}
-
-static int32_t
-bnxt_create_df_rules(struct bnxt *bp)
-{
- struct bnxt_ulp_data *cfg_data;
- int rc;
-
- cfg_data = bp->ulp_ctx->cfg_data;
- rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_PORT_TO_VS,
- &cfg_data->port_to_app_flow_id);
- if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to create port to app default rule\n");
- return rc;
- }
-
- BNXT_TF_DBG(DEBUG, "***** created port to app default rule ******\n");
- rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_VS_TO_PORT,
- &cfg_data->app_to_port_flow_id);
- if (!rc) {
- rc = ulp_default_flow_db_cfa_action_get(bp->ulp_ctx,
- cfg_data->app_to_port_flow_id,
- &cfg_data->tx_cfa_action);
- if (rc)
- goto err;
-
- BNXT_TF_DBG(DEBUG,
- "***** created app to port default rule *****\n");
- return 0;
- }
-
-err:
- BNXT_TF_DBG(DEBUG, "Failed to create app to port default rule\n");
- return rc;
-}
-
-static void
-bnxt_destroy_df_rules(struct bnxt *bp)
-{
- struct bnxt_ulp_data *cfg_data;
-
- cfg_data = bp->ulp_ctx->cfg_data;
- ulp_default_flow_destroy(bp->eth_dev, cfg_data->port_to_app_flow_id);
- ulp_default_flow_destroy(bp->eth_dev, cfg_data->app_to_port_flow_id);
-}
-
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
@@ -1296,8 +1229,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
bnxt_schedule_fw_health_check(bp);
pthread_mutex_unlock(&bp->def_cp_lock);
- if (BNXT_TRUFLOW_EN(bp))
- bnxt_ulp_init(bp);
+ bnxt_ulp_init(bp);
return 0;
@@ -1358,6 +1290,9 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
+ bnxt_ulp_destroy_df_rules(bp, false);
+ bnxt_ulp_deinit(bp);
+
bnxt_cancel_fw_health_check(bp);
bnxt_dev_set_link_down_op(eth_dev);
@@ -1403,11 +1338,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
bnxt_cancel_fc_thread(bp);
- if (BNXT_TRUFLOW_EN(bp)) {
- bnxt_destroy_df_rules(bp);
- bnxt_ulp_deinit(bp);
- }
-
if (eth_dev->data->dev_started)
bnxt_dev_stop_op(eth_dev);
@@ -1656,8 +1586,7 @@ static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
if (rc != 0)
vnic->flags = old_flags;
- if (BNXT_TRUFLOW_EN(bp))
- bnxt_create_df_rules(bp);
+ bnxt_ulp_create_df_rules(bp);
return rc;
}
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index a1ab3f39a..83a98536d 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -29,7 +29,6 @@ struct bnxt_tx_queue {
struct bnxt *bp;
int index;
int tx_wake_thresh;
- uint32_t tx_cfa_action;
uint32_t vfr_tx_cfa_action;
struct bnxt_tx_ring_info *tx_ring;
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index c741c7188..1113aca44 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -133,8 +133,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST |
PKT_TX_QINQ_PKT) ||
(BNXT_TRUFLOW_EN(txq->bp) &&
- (txq->bp->ulp_ctx->cfg_data->tx_cfa_action ||
- txq->vfr_tx_cfa_action)))
+ (txq->bp->tx_cfa_action || txq->vfr_tx_cfa_action)))
long_bd = true;
nr_bds = long_bd + tx_pkt->nb_segs;
@@ -192,8 +191,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
if (txq->vfr_tx_cfa_action)
cfa_action = txq->vfr_tx_cfa_action;
else
- cfa_action =
- txq->bp->ulp_ctx->cfg_data->tx_cfa_action;
+ cfa_action = txq->bp->tx_cfa_action;
}
/* HW can accelerate only outer vlan in QinQ mode */
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 7c65a4b1b..2febd5814 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -9,9 +9,9 @@
#include <rte_flow_driver.h>
#include <rte_tailq.h>
+#include "bnxt.h"
#include "bnxt_ulp.h"
#include "bnxt_tf_common.h"
-#include "bnxt.h"
#include "tf_core.h"
#include "tf_ext_flow_handle.h"
@@ -381,6 +381,7 @@ ulp_ctx_init(struct bnxt *bp,
(void)ulp_ctx_deinit(bp, session);
return rc;
}
+
bnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, session->g_tfp);
return rc;
}
@@ -654,6 +655,9 @@ bnxt_ulp_init(struct bnxt *bp)
bool init;
int rc;
+ if (!BNXT_TRUFLOW_EN(bp))
+ return 0;
+
if (bp->ulp_ctx) {
BNXT_TF_DBG(DEBUG, "ulp ctx already allocated\n");
return -EINVAL;
@@ -822,6 +826,9 @@ bnxt_ulp_deinit(struct bnxt *bp)
struct rte_pci_device *pci_dev;
struct rte_pci_addr *pci_addr;
+ if (!BNXT_TRUFLOW_EN(bp))
+ return;
+
/* Get the session first */
pci_dev = RTE_DEV_TO_PCI(bp->eth_dev->device);
pci_addr = &pci_dev->addr;
@@ -833,6 +840,9 @@ bnxt_ulp_deinit(struct bnxt *bp)
if (!session)
return;
+ /* clean up default flows */
+ bnxt_ulp_destroy_df_rules(bp, true);
+
/* clean up regular flows */
ulp_flow_db_flush_flows(bp->ulp_ctx, BNXT_ULP_REGULAR_FLOW_TABLE);
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index 7c95ead55..d53245215 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -22,6 +22,12 @@
#define BNXT_ULP_VF_REP_ENABLED 0x1
#define ULP_VF_REP_IS_ENABLED(flag) ((flag) & BNXT_ULP_VF_REP_ENABLED)
+struct bnxt_ulp_df_rule_info {
+ uint32_t port_to_app_flow_id;
+ uint32_t app_to_port_flow_id;
+ uint8_t valid;
+};
+
struct bnxt_ulp_data {
uint32_t tbl_scope_id;
struct bnxt_ulp_mark_tbl *mark_tbl;
@@ -32,9 +38,7 @@ struct bnxt_ulp_data {
struct bnxt_ulp_port_db *port_db;
struct bnxt_ulp_fc_info *fc_info;
uint32_t ulp_flags;
- uint32_t port_to_app_flow_id;
- uint32_t app_to_port_flow_id;
- uint32_t tx_cfa_action;
+ struct bnxt_ulp_df_rule_info df_rule_info[RTE_MAX_ETHPORTS];
};
struct bnxt_ulp_context {
@@ -175,4 +179,8 @@ int32_t
bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx,
uint32_t *flags);
+int32_t
+bnxt_ulp_get_df_rule_info(uint8_t port_id, struct bnxt_ulp_context *ulp_ctx,
+ struct bnxt_ulp_df_rule_info *info);
+
#endif /* _BNXT_ULP_H_ */
diff --git a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
index ddc6da8a8..9fb1a028f 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_def_rules.c
@@ -392,3 +392,119 @@ ulp_default_flow_destroy(struct rte_eth_dev *eth_dev, uint32_t flow_id)
return rc;
}
+
+void
+bnxt_ulp_destroy_df_rules(struct bnxt *bp, bool global)
+{
+ struct bnxt_ulp_df_rule_info *info;
+ uint8_t port_id;
+
+ if (!BNXT_TRUFLOW_EN(bp) ||
+ BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
+ return;
+
+ if (!bp->ulp_ctx || !bp->ulp_ctx->cfg_data)
+ return;
+
+ /* Delete default rules per port */
+ if (!global) {
+ port_id = bp->eth_dev->data->port_id;
+ info = &bp->ulp_ctx->cfg_data->df_rule_info[port_id];
+ if (!info->valid)
+ return;
+
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->port_to_app_flow_id);
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->app_to_port_flow_id);
+ info->valid = false;
+ return;
+ }
+
+ /* Delete default rules for all ports */
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ info = &bp->ulp_ctx->cfg_data->df_rule_info[port_id];
+ if (!info->valid)
+ continue;
+
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->port_to_app_flow_id);
+ ulp_default_flow_destroy(bp->eth_dev,
+ info->app_to_port_flow_id);
+ info->valid = false;
+ }
+}
+
+static int32_t
+bnxt_create_port_app_df_rule(struct bnxt *bp, uint8_t flow_type,
+ uint32_t *flow_id)
+{
+ uint16_t port_id = bp->eth_dev->data->port_id;
+ struct ulp_tlv_param param_list[] = {
+ {
+ .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
+ .length = 2,
+ .value = {(port_id >> 8) & 0xff, port_id & 0xff}
+ },
+ {
+ .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
+ .length = 0,
+ .value = {0}
+ }
+ };
+
+ return ulp_default_flow_create(bp->eth_dev, param_list, flow_type,
+ flow_id);
+}
+
+int32_t
+bnxt_ulp_create_df_rules(struct bnxt *bp)
+{
+ struct bnxt_ulp_df_rule_info *info;
+ uint8_t port_id;
+ int rc;
+
+ if (!BNXT_TRUFLOW_EN(bp) ||
+ BNXT_ETH_DEV_IS_REPRESENTOR(bp->eth_dev))
+ return 0;
+
+ port_id = bp->eth_dev->data->port_id;
+ info = &bp->ulp_ctx->cfg_data->df_rule_info[port_id];
+ BNXT_TF_DBG(INFO, "*** creating port to app default rule ***\n");
+ rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_PORT_TO_VS,
+ &info->port_to_app_flow_id);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to create port to app default rule\n");
+ return rc;
+ }
+ BNXT_TF_DBG(INFO, "*** created port to app default rule ***\n");
+
+ bp->tx_cfa_action = 0;
+ BNXT_TF_DBG(INFO, "*** creating app to port default rule ***\n");
+ rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_VS_TO_PORT,
+ &info->app_to_port_flow_id);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to create app to port default rule\n");
+ goto port_to_app_free;
+ }
+
+ rc = ulp_default_flow_db_cfa_action_get(bp->ulp_ctx,
+ info->app_to_port_flow_id,
+ &bp->tx_cfa_action);
+ if (rc)
+ goto app_to_port_free;
+
+ info->valid = true;
+ BNXT_TF_DBG(INFO, "*** created app to port default rule ***\n");
+ return 0;
+
+app_to_port_free:
+ ulp_default_flow_destroy(bp->eth_dev, info->app_to_port_flow_id);
+port_to_app_free:
+ ulp_default_flow_destroy(bp->eth_dev, info->port_to_app_flow_id);
+ info->valid = false;
+
+ return rc;
+}
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
index a3cfe54bf..714451740 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.c
@@ -962,7 +962,7 @@ ulp_flow_db_resource_hndl_get(struct bnxt_ulp_context *ulp_ctx,
int32_t
ulp_default_flow_db_cfa_action_get(struct bnxt_ulp_context *ulp_ctx,
uint32_t flow_id,
- uint32_t *cfa_action)
+ uint16_t *cfa_action)
{
uint8_t sub_type = BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TYPE_VFR_CFA_ACTION;
uint64_t hndl;
diff --git a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
index 1fc06012f..117e250d6 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_flow_db.h
@@ -234,7 +234,7 @@ ulp_flow_db_validate_flow_func(struct bnxt_ulp_context *ulp_ctx,
int32_t
ulp_default_flow_db_cfa_action_get(struct bnxt_ulp_context *ulp_ctx,
uint32_t flow_id,
- uint32_t *cfa_action);
+ uint16_t *cfa_action);
#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
/*
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 15/22] net/bnxt: delete VF FW rules on representor create
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (13 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 14/22] net/bnxt: fix port default rule create and destroy Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 16/22] net/bnxt: modify shadow tcam and tbl reference count logic Ajit Khaparde
` (7 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Venkat Duvvuru, Somnath Kotur, Shahaji Bhosle
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Truflow stack adds VFR to VF and VF to VFR conduits when VF
representor is created. However, in the ingress direction the
VF's fw rules conflict with Truflow rules, resulting in not hitting
the Truflow VFR rules. To fix this, fw is going to remove it’s
VF rules when vf representor is created in Truflow mode and will
restore the removed rules when vf representor is destroyed.
This patch invokes the vf representor alloc and free hwrm commands
as part of which fw will do the above mentioned actions.
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Shahaji Bhosle <shahaji.bhosle@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/bnxt_hwrm.c | 49 +++++++++
drivers/net/bnxt/bnxt_hwrm.h | 2 +
drivers/net/bnxt/bnxt_reps.c | 19 +++-
drivers/net/bnxt/hsi_struct_def_dpdk.h | 138 +++++++++++++++++++++++++
4 files changed, 205 insertions(+), 3 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 7ea13a8b2..f5f0dfe73 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -5486,6 +5486,55 @@ int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
return 0;
}
+int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx)
+{
+ struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_vfr_alloc_input req = {0};
+ int rc;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_VFR_ALLOC, BNXT_USE_CHIMP_MB);
+ req.vf_id = rte_cpu_to_le_16(vf_idx);
+ snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
+ bp->eth_dev->data->name, vf_idx);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "VFR %d allocated\n", vf_idx);
+ return rc;
+}
+
+int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx)
+{
+ struct hwrm_cfa_vfr_free_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_vfr_free_input req = {0};
+ int rc;
+
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
+ PMD_DRV_LOG(DEBUG,
+ "Not a PF or trusted VF. Command not supported\n");
+ return 0;
+ }
+
+ HWRM_PREP(&req, HWRM_CFA_VFR_FREE, BNXT_USE_CHIMP_MB);
+ req.vf_id = rte_cpu_to_le_16(vf_idx);
+ snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
+ bp->eth_dev->data->name, vf_idx);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "VFR %d freed\n", vf_idx);
+ return rc;
+}
+
#ifdef RTE_LIBRTE_BNXT_PMD_SYSTEM
int
bnxt_hwrm_oem_cmd(struct bnxt *bp, uint32_t entry_num)
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 01201a7a4..4a2af13c9 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -278,4 +278,6 @@ int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp);
int bnxt_hwrm_oem_cmd(struct bnxt *bp, uint32_t entry_num);
int bnxt_clear_one_vnic_filter(struct bnxt *bp,
struct bnxt_filter_info *filter);
+int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx);
+int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx);
#endif
diff --git a/drivers/net/bnxt/bnxt_reps.c b/drivers/net/bnxt/bnxt_reps.c
index c425e69aa..2f775e0c0 100644
--- a/drivers/net/bnxt/bnxt_reps.c
+++ b/drivers/net/bnxt/bnxt_reps.c
@@ -272,7 +272,7 @@ static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
if (rc) {
BNXT_TF_DBG(DEBUG,
"Default flow rule creation for VFR->VF failed!\n");
- return -EIO;
+ goto err;
}
BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VFR->VF! ***\n");
@@ -283,7 +283,7 @@ static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
if (rc) {
BNXT_TF_DBG(DEBUG,
"Failed to get action_ptr for VFR->VF dflt rule\n");
- return -EIO;
+ goto rep2vf_free;
}
BNXT_TF_DBG(DEBUG, "tx_cfa_action = %d\n", vfr->vfr_tx_cfa_action);
rc = ulp_default_flow_create(parent_dev, param_list,
@@ -292,13 +292,24 @@ static int bnxt_tf_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
if (rc) {
BNXT_TF_DBG(DEBUG,
"Default flow rule creation for VF->VFR failed!\n");
- return -EIO;
+ goto rep2vf_free;
}
BNXT_TF_DBG(DEBUG, "*** Default flow rule created for VF->VFR! ***\n");
BNXT_TF_DBG(DEBUG, "vfr2rep_flow_id = %d\n", vfr->vf2rep_flow_id);
+ rc = bnxt_hwrm_cfa_vfr_alloc(parent_bp, vfr->vf_id);
+ if (rc)
+ goto vf2rep_free;
+
return 0;
+
+vf2rep_free:
+ ulp_default_flow_destroy(vfr->parent_dev, vfr->vf2rep_flow_id);
+rep2vf_free:
+ ulp_default_flow_destroy(vfr->parent_dev, vfr->rep2vf_flow_id);
+err:
+ return -EIO;
}
static int bnxt_vfr_alloc(struct rte_eth_dev *vfr_ethdev)
@@ -414,6 +425,8 @@ static int bnxt_vfr_free(struct bnxt_vf_representor *vfr)
vfr->vfr_tx_cfa_action = 0;
vfr->rx_cfa_code = 0;
+ rc = bnxt_hwrm_cfa_vfr_free(parent_bp, vfr->vf_id);
+
return rc;
}
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index 598da7153..fb4f712ce 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -35127,6 +35127,144 @@ struct hwrm_cfa_pair_info_output {
uint8_t valid;
} __rte_packed;
+/**********************
+ * hwrm_cfa_vfr_alloc *
+ **********************/
+
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vfr_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint16_t reserved;
+ uint8_t unused_0[4];
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+} __rte_packed;
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vfr_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+/*********************
+ * hwrm_cfa_vfr_free *
+ *********************/
+
+
+/* hwrm_cfa_vfr_free_input (size:448b/56B) */
+struct hwrm_cfa_vfr_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ uint16_t reserved;
+ uint8_t unused_0[4];
+} __rte_packed;
+
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+struct hwrm_cfa_vfr_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __rte_packed;
+
+
+
/***************************************
* hwrm_cfa_redirect_query_tunnel_type *
***************************************/
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 16/22] net/bnxt: modify shadow tcam and tbl reference count logic
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (14 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 15/22] net/bnxt: delete VF FW rules on representor create Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 17/22] net/bnxt: add tcam table processing for search and alloc Ajit Khaparde
` (6 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Mike Baucom, Kishore Padmanabha
From: Mike Baucom <michael.baucom@broadcom.com>
Moved setting the refcnt for shadow tcam and table entries to the
allocation path only. The insert can be called multiple times for
updates and was resetting the refcnt to 1 each time. Now multiple
insertion/modifications will not change the reference count.
Signed-off-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_shadow_tbl.c | 2 --
drivers/net/bnxt/tf_core/tf_shadow_tcam.c | 2 +-
2 files changed, 1 insertion(+), 3 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
index 019a26eba..a4207eb3a 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
@@ -687,8 +687,6 @@ tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms)
if (!TF_SHADOW_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
return 0;
- sr_entry->refcnt = 1;
-
return 0;
}
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
index a0130d6a8..e2c347a1e 100644
--- a/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_shadow_tcam.c
@@ -472,6 +472,7 @@ tf_shadow_tcam_bind_index(struct tf_shadow_tcam_bind_index_parms *parms)
/* Write the result table */
sr_entry->key_size = parms->key_size;
sr_entry->hb_handle = parms->hb_handle;
+ sr_entry->refcnt = 1;
return 0;
}
@@ -738,7 +739,6 @@ tf_shadow_tcam_insert(struct tf_shadow_tcam_insert_parms *parms)
memcpy(sr_entry->result, sparms->result, sparms->result_size);
sr_entry->result_size = sparms->result_size;
- sr_entry->refcnt = 1;
return 0;
}
--
2.21.1 (Apple Git-122.3)
^ permalink raw reply [flat|nested] 102+ messages in thread
* [dpdk-dev] [PATCH v3 17/22] net/bnxt: add tcam table processing for search and alloc
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 00/22] bnxt patches Ajit Khaparde
` (15 preceding siblings ...)
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 16/22] net/bnxt: modify shadow tcam and tbl reference count logic Ajit Khaparde
@ 2020-07-24 5:32 ` Ajit Khaparde
2020-07-24 5:32 ` [dpdk-dev] [PATCH v3 18/22] net/bnxt: add templates for search before alloc Ajit Khaparde
` (5 subsequent siblings)
22 siblings, 0 replies; 102+ messages in thread
From: Ajit Khaparde @ 2020-07-24 5:32 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, Kishore Padmanabha, Michael Baucom
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Added support for tcam table processing to enable the search
and allocate support. This also includes the tcam entry update
support.
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_ulp/ulp_mapper.c | 317 ++++++++++++------
.../net/bnxt/tf_ulp/ulp_template_db_enum.h | 5 +-
drivers/net/bnxt/tf_ulp/ulp_template_struct.h | 8 +-
3 files changed, 213 insertions(+), 117 deletions(-)
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 4dee65971..6ac4b0f83 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -1317,20 +1317,177 @@ ulp_mapper_mark_vfr_idx_process(struct bnxt_ulp_mapper_parms *parms,
return rc;
}
+/*
+ * Tcam table - create the result blob.
+ * data [out] - the result blob data
+ */
+static int32_t
+ulp_mapper_tcam_tbl_result_create(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl,
+ struct ulp_blob *data)
+{
+ struct bnxt_ulp_mapper_result_field_info *dflds;
+ uint32_t num_dflds;
+ uint32_t encap_flds = 0;
+ uint32_t i;
+ int32_t rc = 0;
+
+ /* Create the result data blob */
+ dflds = ulp_mapper_result_fields_get(tbl, &num_dflds,
+ &encap_flds);
+ if (!dflds || !num_dflds || encap_flds) {
+ BNXT_TF_DBG(ERR, "Failed to get data fields.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_dflds; i++) {
+ rc = ulp_mapper_result_field_process(parms,
+ tbl->direction,
+ &dflds[i],
+ data,
+ "TCAM Result");
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Failed to set data fields\n");
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+/* Tcam table scan the identifier list and allocate each identifier */
+static int32_t
+ulp_mapper_tcam_tbl_scan_ident_alloc(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl)
+{
+ struct bnxt_ulp_mapper_ident_info *idents;
+ uint32_t num_idents;
+ uint32_t i;
+
+ /*
+ * Since the cache entry is responsible for allocating
+ * identifiers when in use, allocate the identifiers only
+ * during normal processing.
+ */
+ if (parms->tcam_tbl_opc ==
+ BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL) {
+ idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
+
+ for (i = 0; i < num_idents; i++) {
+ if (ulp_mapper_ident_process(parms, tbl,
+ &idents[i], NULL))
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Tcam table scan the identifier list and extract the identifier from
+ * the result blob.
+ */
+static int32_t
+ulp_mapper_tcam_tbl_scan_ident_extract(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl,
+ struct ulp_blob *data)
+{
+ struct bnxt_ulp_mapper_ident_info *idents;
+ uint32_t num_idents = 0, i;
+ int32_t rc = 0;
+
+ /*
+ * Extract the listed identifiers from the result field,
+ * no need to allocate them.
+ */
+ idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
+ for (i = 0; i < num_idents; i++) {
+ rc = ulp_mapper_ident_extract(parms, tbl, &idents[i], data);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "Error in identifier extraction\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
+/* Internal function to write the tcam entry */
+static int32_t
+ulp_mapper_tcam_tbl_entry_write(struct bnxt_ulp_mapper_parms *parms,
+ struct bnxt_ulp_mapper_tbl_info *tbl,
+ struct ulp_blob *key,
+ struct ulp_blob *mask,
+ struct ulp_blob *data,
+ uint16_t idx)
+{
+ struct tf_set_tcam_entry_parms sparms = { 0 };
+ struct tf *tfp;
+ uint16_t tmplen;
+ int32_t rc;
+
+ tfp = bnxt_ulp_cntxt_tfp_get(parms->ulp_ctx);
+ if (!tfp) {
+ BNXT_TF_DBG(ERR, "Failed to get truflow pointer\n");
+ return -EINVAL;
+ }
+
+ sparms.dir = tbl->direction;
+ sparms.tcam_tbl_type = tbl->resource_type;
+ sparms.idx = idx;
+ /* Already verified the key/mask lengths */
+ sparms.key = ulp_blob_data_get(key, &tmplen);
+ sparms.mask = ulp_blob_data_get(mask, &tmplen);
+ sparms.key_sz_in_bits = tbl->key_bit_size;
+ sparms.result = ulp_blob_data_get(data, &tmplen);
+
+ if (tbl->result_bit_size != tmplen) {
+ BNXT_TF_DBG(ERR, "Result len (%d) != Expected (%d)\n",
+ tmplen, tbl->result_bit_size);
+ return -EINVAL;
+ }
+ sparms.result_sz_in_bits = tbl->result_bit_size;
+ if (tf_set_tcam_entry(tfp, &sparms)) {
+ BNXT_TF_DBG(ERR, "tcam[%s][%s][%x] write failed.\n",
+ tf_tcam_tbl_2_str(sparms.tcam_tbl_type),
+ tf_dir_2_str(sparms.dir), sparms.idx);
+ return -EIO;
+ }
+ BNXT_TF_DBG(INFO, "tcam[%s][%s][%x] write success.\n",
+ tf_tcam_tbl_2_str(sparms.tcam_tbl_type),
+ tf_dir_2_str(sparms.dir), sparms.idx);
+
+ /* Update cache with TCAM index if the was cache allocated. */
+ if (parms->tcam_tbl_opc ==
+ BNXT_ULP_MAPPER_TCAM_TBL_OPC_CACHE_ALLOC) {
+ if (!parms->cache_ptr) {
+ BNXT_TF_DBG(ERR, "Unable to update cache");
+ return -EINVAL;
+ }
+ parms->cache_ptr->tcam_idx = idx;
+ }
+
+ /* Mark action */
+ rc = ulp_mapper_mark_act_ptr_process(parms, tbl);
+ if (rc) {
+ BNXT_TF_DBG(ERR, "failed mark action processing\n");
+ return rc;
+ }
+
+ return rc;
+}
+
static int32_t
ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct bnxt_ulp_mapper_tbl_info *tbl)
{
struct bnxt_ulp_mapper_class_key_field_info *kflds;
- struct ulp_blob key, mask, data;
+ struct ulp_blob key, mask, data, update_data;
uint32_t i, num_kflds;
struct tf *tfp;
int32_t rc, trc;
struct tf_alloc_tcam_entry_parms aparms = { 0 };
struct tf_search_tcam_entry_parms searchparms = { 0 };
- struct tf_set_tcam_entry_parms sparms = { 0 };
struct ulp_flow_db_res_params fid_parms = { 0 };
struct tf_free_tcam_entry_parms free_parms = { 0 };
+ enum bnxt_ulp_search_before_alloc search_flag;
uint32_t hit = 0;
uint16_t tmplen = 0;
uint16_t idx;
@@ -1358,6 +1515,8 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
!ulp_blob_init(&mask, tbl->key_bit_size,
parms->device_params->byte_order) ||
!ulp_blob_init(&data, tbl->result_bit_size,
+ parms->device_params->byte_order) ||
+ !ulp_blob_init(&update_data, tbl->result_bit_size,
parms->device_params->byte_order)) {
BNXT_TF_DBG(ERR, "blob inits failed.\n");
return -EINVAL;
@@ -1388,7 +1547,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
}
}
- if (!tbl->srch_b4_alloc) {
+ if (tbl->srch_b4_alloc == BNXT_ULP_SEARCH_BEFORE_ALLOC_NO) {
/*
* No search for re-use is requested, so simply allocate the
* tcam index.
@@ -1455,113 +1614,49 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
hit = searchparms.hit;
}
- /* Build the result */
- if (!tbl->srch_b4_alloc || !hit) {
- struct bnxt_ulp_mapper_result_field_info *dflds;
- struct bnxt_ulp_mapper_ident_info *idents;
- uint32_t num_dflds, num_idents;
- uint32_t encap_flds = 0;
-
- /*
- * Since the cache entry is responsible for allocating
- * identifiers when in use, allocate the identifiers only
- * during normal processing.
- */
- if (parms->tcam_tbl_opc ==
- BNXT_ULP_MAPPER_TCAM_TBL_OPC_NORMAL) {
- idents = ulp_mapper_ident_fields_get(tbl, &num_idents);
-
- for (i = 0; i < num_idents; i++) {
- rc = ulp_mapper_ident_process(parms, tbl,
- &idents[i], NULL);
- /* Already logged the error, just return */
- if (rc)
- goto error;
- }
- }
-
- /* Create the result data blob */
- dflds = ulp_mapper_result_fields_get(tbl, &num_dflds,
- &encap_flds);
- if (!dflds || !num_dflds || encap_flds) {
- BNXT_TF_DBG(ERR, "Failed to get data fields.\n");
- rc = -EINVAL;
- goto error;
- }
-
- for (i = 0; i < num_dflds; i++) {
- rc = ulp_mapper_result_field_process(parms,
- tbl->direction,
- &dflds[i],
- &data,
- "TCAM Result");
- if (rc) {
- BNXT_TF_DBG(ERR, "Failed to set data fields\n");
- goto error;
- }
- }
-
- sparms.dir = tbl->direction;
- sparms.tcam_tbl_type = tbl->resource_type;
- sparms.idx = idx;
- /* Already verified the key/mask lengths */
- sparms.key = ulp_blob_data_get(&key, &tmplen);
- sparms.mask = ulp_blob_data_get(&mask, &tmplen);
- sparms.key_sz_in_bits = tbl->key_bit_size;
- sparms.result = ulp_blob_data_get(&data, &tmplen);
-
- if (tbl->result_bit_size != tmplen) {
- BNXT_TF_DBG(ERR, "Result len (%d) != Expected (%d)\n",
- tmplen, tbl->result_bit_size);
- rc = -EINVAL;
- goto error;
- }
- sparms.result_sz_in_bits = tbl->result_bit_size;
-
- rc = tf_set_tcam_entry(tfp, &sparms);
- if (