DPDK patches and discussions
 help / color / mirror / Atom feed
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
To: dev@dpdk.org
Cc: Peter Spreadborough <peter.spreadborough@broadcom.com>,
	Randy Schacher <stuart.schacher@broadcom.com>,
	Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Subject: [dpdk-dev] [PATCH 15/58] net/bnxt: add dpool allocator for EM allocation
Date: Sun, 30 May 2021 14:28:46 +0530	[thread overview]
Message-ID: <20210530085929.29695-16-venkatkumar.duvvuru@broadcom.com> (raw)
In-Reply-To: <20210530085929.29695-1-venkatkumar.duvvuru@broadcom.com>

From: Peter Spreadborough <peter.spreadborough@broadcom.com>

The dpool allocator supports variable size entries and
also supports defragmentation of the allocation space.
EM will by default use the fixed size stack allocator.
The dynamic allocator may be selected at build time.
The dpool allocator supports variable size entries and
also supports defragmentation of the allocation space.

Signed-off-by: Peter Spreadborough <peter.spreadborough@broadcom.com>
Signed-off-by: Randy Schacher <stuart.schacher@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Farah Smith <farah.smith@broadcom.com>
---
 drivers/net/bnxt/tf_core/dpool.c              | 373 ++++++++++++++++++
 drivers/net/bnxt/tf_core/dpool.h              | 309 +++++++++++++++
 drivers/net/bnxt/tf_core/meson.build          |   1 +
 drivers/net/bnxt/tf_core/tf_core.h            |  42 ++
 drivers/net/bnxt/tf_core/tf_device.h          |  34 ++
 drivers/net/bnxt/tf_core/tf_device_p58.c      |   5 +
 drivers/net/bnxt/tf_core/tf_em.h              |  26 ++
 .../net/bnxt/tf_core/tf_em_hash_internal.c    | 102 ++++-
 drivers/net/bnxt/tf_core/tf_em_internal.c     | 215 ++++++++--
 drivers/net/bnxt/tf_core/tf_msg.c             |  69 ++++
 drivers/net/bnxt/tf_core/tf_msg.h             |  15 +
 drivers/net/bnxt/tf_core/tf_session.h         |   5 +
 12 files changed, 1156 insertions(+), 40 deletions(-)
 create mode 100644 drivers/net/bnxt/tf_core/dpool.c
 create mode 100644 drivers/net/bnxt/tf_core/dpool.h

diff --git a/drivers/net/bnxt/tf_core/dpool.c b/drivers/net/bnxt/tf_core/dpool.c
new file mode 100644
index 0000000000..a5f9f866b7
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/dpool.c
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+
+#include "tfp.h"
+#include "dpool.h"
+
+int dpool_init(struct dpool *dpool,
+	       uint32_t start_index,
+	       uint32_t size,
+	       uint8_t max_alloc_size,
+	       void *user_data,
+	       int (*move_callback)(void *, uint64_t, uint32_t))
+{
+	uint32_t i;
+	int rc;
+	struct tfp_calloc_parms parms;
+
+	parms.nitems = size;
+	parms.size = sizeof(struct dpool_entry);
+	parms.alignment = 0;
+
+	rc = tfp_calloc(&parms);
+
+	if (rc)
+		return rc;
+
+	dpool->entry = parms.mem_va;
+	dpool->start_index = start_index;
+	dpool->size = size;
+	dpool->max_alloc_size = max_alloc_size;
+	dpool->user_data = user_data;
+	dpool->move_callback = move_callback;
+	/*
+	 * Init entries
+	 */
+	for (i = 0; i < size; i++) {
+		dpool->entry[i].flags = 0;
+		dpool->entry[i].index = start_index;
+		dpool->entry[i].entry_data = 0UL;
+		start_index++;
+	}
+
+	return 0;
+}
+
+static int dpool_move(struct dpool *dpool,
+		      uint32_t dst_index,
+		      uint32_t src_index)
+{
+	uint32_t size;
+	uint32_t i;
+	if (DP_IS_FREE(dpool->entry[dst_index].flags)) {
+		size = DP_FLAGS_SIZE(dpool->entry[src_index].flags);
+
+		dpool->entry[dst_index].flags = dpool->entry[src_index].flags;
+		dpool->entry[dst_index].entry_data = dpool->entry[src_index].entry_data;
+
+		if (dpool->move_callback != NULL) {
+			dpool->move_callback(dpool->user_data,
+					     dpool->entry[src_index].entry_data,
+					     dst_index + dpool->start_index);
+		}
+
+		dpool->entry[src_index].flags = 0;
+		dpool->entry[src_index].entry_data = 0UL;
+
+		for (i = 1; i < size; i++) {
+			dpool->entry[dst_index + i].flags = size;
+			dpool->entry[src_index + i].flags = 0;
+		}
+	} else {
+		return -1;
+	}
+
+	return 0;
+}
+
+
+int dpool_defrag(struct dpool *dpool,
+		 uint32_t entry_size,
+		 uint8_t defrag)
+{
+	struct dpool_free_list *free_list;
+	struct dpool_adj_list *adj_list;
+	uint32_t count;
+	uint32_t index;
+	uint32_t used;
+	uint32_t i;
+	uint32_t size;
+	uint32_t largest_free_index = 0;
+	uint32_t largest_free_size;
+	uint32_t max;
+	uint32_t max_index;
+	uint32_t max_size = 0;
+	int rc;
+
+	free_list = rte_zmalloc("dpool_free_list",
+				sizeof(struct dpool_free_list), 0);
+	if (free_list == NULL) {
+		TFP_DRV_LOG(ERR, "dpool free list allocation failed\n");
+		return -ENOMEM;
+	}
+
+	adj_list = rte_zmalloc("dpool_adjacent_list",
+				sizeof(struct dpool_adj_list), 0);
+	if (adj_list == NULL) {
+		TFP_DRV_LOG(ERR, "dpool adjacent list allocation failed\n");
+		return -ENOMEM;
+	}
+
+	while (1) {
+		/*
+		 * Create list of free entries
+		 */
+		free_list->size = 0;
+		largest_free_size = 0;
+		largest_free_index = 0;
+		count = 0;
+
+		for (i = 0; i < dpool->size; i++) {
+			if (DP_IS_FREE(dpool->entry[i].flags)) {
+				if (count == 0)
+					index = i;
+				count++;
+			} else if (count > 0) {
+				free_list->entry[free_list->size].index = index;
+				free_list->entry[free_list->size].size = count;
+
+				if (count > largest_free_size) {
+					largest_free_index = free_list->size;
+					largest_free_size = count;
+				}
+
+				free_list->size++;
+				count = 0;
+			}
+		}
+
+		if (free_list->size == 0)
+			largest_free_size = count;
+
+		/*
+		 * If using defrag to fit and there's a large enough
+		 * space then we are done.
+		 */
+		if (defrag == DP_DEFRAG_TO_FIT &&
+		    largest_free_size >= entry_size)
+			goto done;
+
+		/*
+		 * Create list of entries adjacent to free entries
+		 */
+		count = 0;
+		adj_list->size = 0;
+		used = 0;
+
+		for (i = 0; i < dpool->size; ) {
+			if (DP_IS_USED(dpool->entry[i].flags)) {
+				used++;
+
+				if (count > 0) {
+					adj_list->entry[adj_list->size].index = i;
+					adj_list->entry[adj_list->size].size =
+						DP_FLAGS_SIZE(dpool->entry[i].flags);
+					adj_list->entry[adj_list->size].left = count;
+
+					if (adj_list->size > 0 && used == 1)
+						adj_list->entry[adj_list->size - 1].right = count;
+
+					adj_list->size++;
+				}
+
+				count = 0;
+				i += DP_FLAGS_SIZE(dpool->entry[i].flags);
+			} else {
+				used = 0;
+				count++;
+				i++;
+			}
+		}
+
+		/*
+		 * Using the size of the largest free space available
+		 * select the adjacency list entry of that size with
+		 * the largest left + right + size count. If there
+		 * are no entries of that size then decrement the size
+		 * and try again.
+		 */
+		max = 0;
+		max_index = 0;
+		max_size = 0;
+
+		for (size = largest_free_size; size > 0; size--) {
+			for (i = 0; i < adj_list->size; i++) {
+				if (adj_list->entry[i].size == size &&
+				    ((size +
+				      adj_list->entry[i].left +
+				      adj_list->entry[i].right) > max)) {
+					max = size +
+						adj_list->entry[i].left +
+						adj_list->entry[i].right;
+					max_size = size;
+					max_index = adj_list->entry[i].index;
+				}
+			}
+
+			if (max)
+				break;
+		}
+
+		/*
+		 * If the max entry is smaller than the largest_free_size
+		 * find the first entry in the free list that it cn fit in to.
+		 */
+		if (max_size < largest_free_size) {
+			for (i = 0; i < free_list->size; i++) {
+				if (free_list->entry[i].size >= max_size) {
+					largest_free_index = i;
+					break;
+				}
+			}
+		}
+
+		/*
+		 * If we have a contender then move it to the new spot.
+		 */
+		if (max) {
+			rc = dpool_move(dpool,
+					free_list->entry[largest_free_index].index,
+					max_index);
+			if (rc) {
+				rte_free(free_list);
+				rte_free(adj_list);
+				return rc;
+			}
+		} else {
+			break;
+		}
+	}
+
+done:
+	rte_free(free_list);
+	rte_free(adj_list);
+	return largest_free_size;
+}
+
+
+uint32_t dpool_alloc(struct dpool *dpool,
+		     uint32_t size,
+		     uint8_t defrag)
+{
+	uint32_t i;
+	uint32_t j;
+	uint32_t count = 0;
+	uint32_t first_entry_index;
+	int rc;
+
+	if (size > dpool->max_alloc_size || size == 0)
+		return DP_INVALID_INDEX;
+
+	/*
+	 * Defrag requires EM move support.
+	 */
+	if (defrag != DP_DEFRAG_NONE &&
+	    dpool->move_callback == NULL)
+		return DP_INVALID_INDEX;
+
+	while (1) {
+		/*
+		 * find <size> consecutive free entries
+		 */
+		for (i = 0; i < dpool->size; i++) {
+			if (DP_IS_FREE(dpool->entry[i].flags)) {
+				if (count == 0)
+					first_entry_index = i;
+
+				count++;
+
+				if (count == size) {
+					for (j = 0; j < size; j++) {
+						dpool->entry[j + first_entry_index].flags = size;
+						if (j == 0)
+							dpool->entry[j + first_entry_index].flags |= DP_FLAGS_START;
+					}
+
+					dpool->entry[i].entry_data = 0UL;
+					return (first_entry_index + dpool->start_index);
+				}
+			} else {
+				count = 0;
+			}
+		}
+
+		/*
+		 * If defragging then do it to it
+		 */
+		if (defrag != DP_DEFRAG_NONE) {
+			rc = dpool_defrag(dpool, size, defrag);
+
+			if (rc < 0)
+				return DP_INVALID_INDEX;
+		} else {
+			break;
+		}
+
+		/*
+		 * If the defrag created enough space then try the
+		 * alloc again else quit.
+		 */
+		if ((uint32_t)rc < size)
+			break;
+	}
+
+	return DP_INVALID_INDEX;
+}
+
+int dpool_free(struct dpool *dpool,
+	       uint32_t index)
+{
+	uint32_t i;
+	int start = (index - dpool->start_index);
+	uint32_t size;
+
+	if (start < 0)
+		return -1;
+
+	if (DP_IS_START(dpool->entry[start].flags)) {
+		size = DP_FLAGS_SIZE(dpool->entry[start].flags);
+		if (size > dpool->max_alloc_size || size == 0)
+			return -1;
+
+		for (i = start; i < (start + size); i++)
+			dpool->entry[i].flags = 0;
+
+		return 0;
+	}
+
+	return -1;
+}
+
+void dpool_free_all(struct dpool *dpool)
+{
+	uint32_t i;
+
+	for (i = 0; i < dpool->size; i++)
+		dpool_free(dpool, dpool->entry[i].index);
+}
+
+int dpool_set_entry_data(struct dpool *dpool,
+			 uint32_t index,
+			 uint64_t entry_data)
+{
+	int start = (index - dpool->start_index);
+
+	if (start < 0)
+		return -1;
+
+	if (DP_IS_START(dpool->entry[start].flags)) {
+		dpool->entry[start].entry_data = entry_data;
+		return 0;
+	}
+
+	return -1;
+}
diff --git a/drivers/net/bnxt/tf_core/dpool.h b/drivers/net/bnxt/tf_core/dpool.h
new file mode 100644
index 0000000000..db9d53f01f
--- /dev/null
+++ b/drivers/net/bnxt/tf_core/dpool.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _DPOOL_H_
+#define _DPOOL_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define DP_MAX_FREE_SIZE 0x8000 /* 32K */
+
+#define DP_INVALID_INDEX 0xffffffff
+
+#define DP_FLAGS_START   0x80000000
+#define DP_IS_START(flags) ((flags) & DP_FLAGS_START)
+
+#define DP_FLAGS_SIZE_SHIFT 0
+#define DP_FLAGS_SIZE_MASK  0x07
+
+#define DP_FLAGS_SIZE(flags) (((flags) >> DP_FLAGS_SIZE_SHIFT) & DP_FLAGS_SIZE_MASK)
+
+#define DP_IS_FREE(flags) (((flags) & DP_FLAGS_SIZE_MASK) == 0)
+#define DP_IS_USED(flags) ((flags) & DP_FLAGS_SIZE_MASK)
+
+#define DP_DEFRAG_NONE   0x0
+#define DP_DEFRAG_ALL    0x1
+#define DP_DEFRAG_TO_FIT 0x2
+
+/**
+ * Free list entry
+ *
+ * Each entry includes an index in to the dpool entry array
+ * and the size of dpool array entry.
+ */
+struct dpool_free_list_entry {
+	/*
+	 * Index in to dpool entry array
+	 */
+	uint32_t index;
+	/*
+	 * The size of the entry in the dpool entry array
+	 */
+	uint32_t size;
+};
+
+/**
+ * Free list
+ *
+ * Used internally to record free entries in the dpool entry array.
+ * Each entry represents a single or multiple contiguious entries
+ * in the dpool entry array.
+ *
+ * Used only during the defrag operation.
+ */
+struct dpool_free_list {
+	/*
+	 * Number of entries in the free list
+	 */
+	uint32_t size;
+	/*
+	 * List of unused entries in the dpool entry array
+	 */
+	struct dpool_free_list_entry entry[DP_MAX_FREE_SIZE];
+};
+
+/**
+ * Adjacent list entry
+ *
+ * Each entry includes and index in to the dpool entry array,
+ * the size of the entry and the counts of free entries to the
+ * right and left off that entry.
+ */
+struct dpool_adj_list_entry {
+	/*
+	 * Index in to dpool entry array
+	 */
+	uint32_t index;
+	/*
+	 * The size of the entry in the dpool entry array
+	 */
+	uint32_t size;
+	/*
+	 * Number of free entries directly to the  left of
+	 * this entry
+	 */
+	uint32_t left;
+	/*
+	 * Number of free entries directly to the right of
+	 * this entry
+	 */
+	uint32_t right;
+};
+
+/**
+ * Adjacent list
+ *
+ * A list of references to entries in the dpool entry array that
+ * have free entries to the left and right. Since we pack to the
+ * left entries will always have a non zero left cout.
+ *
+ * Used only during the defrag operation.
+ */
+struct dpool_adj_list {
+	/*
+	 * Number of entries in the adj list
+	 */
+	uint32_t size;
+	/*
+	 * List of entries in the dpool entry array that have
+	 * free entries directly to their left and right.
+	 */
+	struct dpool_adj_list_entry entry[DP_MAX_FREE_SIZE];
+};
+
+/**
+ * Dpool entry
+ *
+ * Each entry includes flags and the FW index.
+ */
+struct dpool_entry {
+	uint32_t flags;
+	uint32_t index;
+	uint64_t entry_data;
+};
+
+/**
+ * Dpool
+ *
+ * Used to manage resource pool. Includes the start FW index, the
+ * size of the entry array and the entry array it's self.
+ */
+struct dpool {
+	uint32_t start_index;
+	uint32_t size;
+	uint8_t  max_alloc_size;
+	void *user_data;
+	int (*move_callback)(void *user_data,
+			     uint64_t entry_data,
+			     uint32_t new_index);
+	struct dpool_entry *entry;
+};
+
+/**
+ * dpool_init
+ *
+ * Initialize the dpool
+ *
+ * [in] dpool
+ *      Pointer to a dpool structure that includes an entry field
+ *      that points to the entry array. The user is responsible for
+ *      allocating memory for the dpool struct and the entry array.
+ *
+ * [in] start_index
+ *      The base index to use.
+ *
+ * [in] size
+ *      The number of entries
+ *
+ * [in] max_alloc_size
+ *      The number of entries
+ *
+ * [in] user_data
+ *      Pointer to user data. Will be passed in callbacks.
+ *
+ * [in] move_callback
+ *      Pointer to move EM entry callback.
+ *
+ * Return
+ *      -  0 on success
+ *      - -1 on failure
+ *
+ */
+int dpool_init(struct dpool *dpool,
+	       uint32_t start_index,
+	       uint32_t size,
+	       uint8_t max_alloc_size,
+	       void *user_data,
+	       int (*move_callback)(void *, uint64_t, uint32_t));
+
+/**
+ * dpool_alloc
+ *
+ * Request a FW index of size and if necessary de-fragment the dpool
+ * array.
+ *
+ * [i] dpool
+ *     The dpool
+ *
+ * [i] size
+ *     The size of the requested allocation.
+ *
+ * [i] defrag
+ *     Operation to apply when there is insufficient space:
+ *
+ *     DP_DEFRAG_NONE   (0x0) - Don't do anything.
+ *     DP_DEFRAG_ALL    (0x1) - Defrag until there is nothing left
+ *                              to defrag.
+ *     DP_DEFRAG_TO_FIT (0x2) - Defrag until there is just enough space
+ *                              to insert the requested allocation.
+ *
+ * Return
+ *      - FW index on success
+ *      - DP_INVALID_INDEX on failure
+ *
+ */
+uint32_t dpool_alloc(struct dpool *dpool,
+		     uint32_t size,
+		     uint8_t defrag);
+
+/**
+ * dpool_set_entry_data
+ *
+ * Set the entry data field. This will be passed to callbacks.
+ *
+ * [i] dpool
+ *     The dpool
+ *
+ * [i] index
+ *     FW index
+ *
+ * [i] entry_data
+ *     Entry data value
+ *
+ * Return
+ *      - FW index on success
+ *      - DP_INVALID_INDEX on failure
+ *
+ */
+int dpool_set_entry_data(struct dpool *dpool,
+			 uint32_t index,
+			 uint64_t entry_data);
+
+/**
+ * dpool_free
+ *
+ * Free allocated entry. The is responsible for the dpool and dpool
+ * entry array memory.
+ *
+ * [in] dpool
+ *      The pool
+ *
+ * [in] index
+ *      FW index to free up.
+ *
+ * Result
+ *      - 0  on success
+ *      - -1 on failure
+ *
+ */
+int dpool_free(struct dpool *dpool,
+	       uint32_t index);
+
+/**
+ * dpool_free_all
+ *
+ * Free all entries.
+ *
+ * [in] dpool
+ *      The pool
+ *
+ * Result
+ *      - 0  on success
+ *      - -1 on failure
+ *
+ */
+void dpool_free_all(struct dpool *dpool);
+
+/**
+ * dpool_dump
+ *
+ * Debug/util function to dump the dpool array.
+ *
+ * [in] dpool
+ *      The pool
+ *
+ */
+void dpool_dump(struct dpool *dpool);
+
+/**
+ * dpool_defrag
+ *
+ * De-fragment the dpool array and apply the specified defrag stratagy.
+ *
+ * [in] dpool
+ *      The dpool
+ *
+ * [in] entry_size
+ *      If using the DP_DEFRAG_TO_FIT stratagy defrag will stop when there's
+ *      at least entry_size space available.
+ *
+ * [i] defrag
+ *     Defrag stratagy:
+ *
+ *     DP_DEFRAG_ALL    (0x1) - Defrag until there is nothing left
+ *                              to defrag.
+ *     DP_DEFRAG_TO_FIT (0x2) - Defrag until there is just enough space
+ *                              to insert the requested allocation.
+ *
+ * Return
+ *      < 0 - on failure
+ *      > 0 - The size of the largest free space
+ */
+int dpool_defrag(struct dpool *dpool,
+		 uint32_t entry_size,
+		 uint8_t defrag);
+
+#endif /* _DPOOL_H_ */
diff --git a/drivers/net/bnxt/tf_core/meson.build b/drivers/net/bnxt/tf_core/meson.build
index 2c02214d83..3a91f04bc0 100644
--- a/drivers/net/bnxt/tf_core/meson.build
+++ b/drivers/net/bnxt/tf_core/meson.build
@@ -10,6 +10,7 @@ sources += files(
         'tf_core.c',
         'bitalloc.c',
         'tf_msg.c',
+	'dpool.c',
         'rand.c',
         'stack.c',
         'tf_em_common.c',
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index 4440d60fe5..08a083077c 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -1953,6 +1953,48 @@ struct tf_delete_em_entry_parms {
 	 */
 	uint64_t flow_handle;
 };
+/**
+ * tf_move_em_entry parameter definition
+ */
+struct tf_move_em_entry_parms {
+	/**
+	 * [in] receive or transmit direction
+	 */
+	enum tf_dir dir;
+	/**
+	 * [in] internal or external
+	 */
+	enum tf_mem mem;
+	/**
+	 * [in] ID of table scope to use (external only)
+	 */
+	uint32_t tbl_scope_id;
+	/**
+	 * [in] ID of table interface to use (SR2 only)
+	 */
+	uint32_t tbl_if_id;
+	/**
+	 * [in] epoch group IDs of entry to delete
+	 * 2 element array with 2 ids. (SR2 only)
+	 */
+	uint16_t *epochs;
+	/**
+	 * [out] The index of the entry
+	 */
+	uint16_t index;
+	/**
+	 * [in] External memory channel type to use
+	 */
+	enum tf_ext_mem_chan_type chan_type;
+	/**
+	 * [in] The index of the new EM record
+	 */
+	uint32_t new_index;
+	/**
+	 * [in] structure containing flow delete handle information
+	 */
+	uint64_t flow_handle;
+};
 /**
  * tf_search_em_entry parameter definition
  */
diff --git a/drivers/net/bnxt/tf_core/tf_device.h b/drivers/net/bnxt/tf_core/tf_device.h
index 16c2fe0f64..31806bb289 100644
--- a/drivers/net/bnxt/tf_core/tf_device.h
+++ b/drivers/net/bnxt/tf_core/tf_device.h
@@ -611,6 +611,22 @@ struct tf_dev_ops {
 	int (*tf_dev_delete_int_em_entry)(struct tf *tfp,
 					  struct tf_delete_em_entry_parms *parms);
 
+	/**
+	 * Move EM hash entry API
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] parms
+	 *   Pointer to E/EM move parameters
+	 *
+	 *    returns:
+	 *    0       - Success
+	 *    -EINVAL - Error
+	 */
+	int (*tf_dev_move_int_em_entry)(struct tf *tfp,
+					struct tf_move_em_entry_parms *parms);
+
 	/**
 	 * Insert EEM hash entry API
 	 *
@@ -661,6 +677,24 @@ struct tf_dev_ops {
 	int (*tf_dev_get_em_resc_info)(struct tf *tfp,
 				       struct tf_em_resource_info *parms);
 
+	/**
+	 * Move EEM hash entry API
+	 *
+	 *   Pointer to E/EM move parameters
+	 *
+	 * [in] tfp
+	 *   Pointer to TF handle
+	 *
+	 * [in] parms
+	 *   Pointer to em info
+	 *
+	 *    returns:
+	 *    0       - Success
+	 *    -EINVAL - Error
+	 */
+	int (*tf_dev_move_ext_em_entry)(struct tf *tfp,
+					struct tf_move_em_entry_parms *parms);
+
 	/**
 	 * Allocate EEM table scope
 	 *
diff --git a/drivers/net/bnxt/tf_core/tf_device_p58.c b/drivers/net/bnxt/tf_core/tf_device_p58.c
index c2bc283220..7917c9613a 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p58.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p58.c
@@ -295,6 +295,11 @@ const struct tf_dev_ops tf_dev_ops_p58 = {
 	.tf_dev_get_tcam_resc_info = tf_tcam_get_resc_info,
 	.tf_dev_insert_int_em_entry = tf_em_hash_insert_int_entry,
 	.tf_dev_delete_int_em_entry = tf_em_hash_delete_int_entry,
+#if (TF_EM_ALLOC == 1)
+	.tf_dev_move_int_em_entry = tf_em_move_int_entry,
+#else
+	.tf_dev_move_int_em_entry = NULL,
+#endif
 	.tf_dev_insert_ext_em_entry = NULL,
 	.tf_dev_delete_ext_em_entry = NULL,
 	.tf_dev_get_em_resc_info = tf_em_get_resc_info,
diff --git a/drivers/net/bnxt/tf_core/tf_em.h b/drivers/net/bnxt/tf_core/tf_em.h
index 60d90e28de..9d168c3c7f 100644
--- a/drivers/net/bnxt/tf_core/tf_em.h
+++ b/drivers/net/bnxt/tf_core/tf_em.h
@@ -13,6 +13,16 @@
 
 #include "hcapi_cfa_defs.h"
 
+/**
+ * TF_EM_ALLOC
+ *
+ * 0: Use stack allocator with fixed sized entries
+ *    (default).
+ * 1: Use dpool allocator with variable size
+ *    entries.
+ */
+#define TF_EM_ALLOC 0
+
 #define TF_EM_MIN_ENTRIES     (1 << 15) /* 32K */
 #define TF_EM_MAX_ENTRIES     (1 << 27) /* 128M */
 
@@ -243,6 +253,22 @@ int tf_em_hash_insert_int_entry(struct tf *tfp,
 int tf_em_hash_delete_int_entry(struct tf *tfp,
 				struct tf_delete_em_entry_parms *parms);
 
+/**
+ * Move record from internal EM table
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_move_int_entry(struct tf *tfp,
+			 struct tf_move_em_entry_parms *parms);
+
 /**
  * Insert record in to external EEM table
  *
diff --git a/drivers/net/bnxt/tf_core/tf_em_hash_internal.c b/drivers/net/bnxt/tf_core/tf_em_hash_internal.c
index f6c9772b44..098e8af07e 100644
--- a/drivers/net/bnxt/tf_core/tf_em_hash_internal.c
+++ b/drivers/net/bnxt/tf_core/tf_em_hash_internal.c
@@ -22,7 +22,9 @@
 /**
  * EM Pool
  */
-extern struct stack em_pool[TF_DIR_MAX];
+#if (TF_EM_ALLOC == 1)
+#include "dpool.h"
+#endif
 
 /**
  * Insert EM internal entry API
@@ -39,7 +41,11 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
 	uint16_t rptr_index = 0;
 	uint8_t rptr_entry = 0;
 	uint8_t num_of_entries = 0;
-	struct stack *pool = &em_pool[parms->dir];
+#if (TF_EM_ALLOC == 1)
+	struct dpool *pool;
+#else
+	struct stack *pool;
+#endif
 	uint32_t index;
 	uint32_t key0_hash;
 	uint32_t key1_hash;
@@ -56,7 +62,20 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
 	rc = tf_session_get_device(tfs, &dev);
 	if (rc)
 		return rc;
+#if (TF_EM_ALLOC == 1)
+	pool = (struct dpool *)tfs->em_pool[parms->dir];
+	index = dpool_alloc(pool,
+			    parms->em_record_sz_in_bits / 128,
+			    DP_DEFRAG_TO_FIT);
 
+	if (index == DP_INVALID_INDEX) {
+		PMD_DRV_LOG(ERR,
+			    "%s, EM entry index allocation failed\n",
+			    tf_dir_2_str(parms->dir));
+		return -1;
+	}
+#else
+	pool = (struct stack *)tfs->em_pool[parms->dir];
 	rc = stack_pop(pool, &index);
 	if (rc) {
 		PMD_DRV_LOG(ERR,
@@ -64,6 +83,7 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
 			    tf_dir_2_str(parms->dir));
 		return rc;
 	}
+#endif
 
 	if (dev->ops->tf_dev_cfa_key_hash == NULL)
 		return -EINVAL;
@@ -83,19 +103,14 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
 						  &num_of_entries);
 	if (rc) {
 		/* Free the allocated index before returning */
+#if (TF_EM_ALLOC == 1)
+		dpool_free(pool, index);
+#else
 		stack_push(pool, index);
+#endif
 		return -1;
 	}
 
-	PMD_DRV_LOG
-		  (DEBUG,
-		   "%s, Internal entry @ Index:%d rptr_index:0x%x rptr_entry:0x%x num_of_entries:%d\n",
-		   tf_dir_2_str(parms->dir),
-		   index,
-		   rptr_index,
-		   rptr_entry,
-		   num_of_entries);
-
 	TF_SET_GFID(gfid,
 		    ((rptr_index << TF_EM_INTERNAL_INDEX_SHIFT) |
 		     rptr_entry),
@@ -113,6 +128,9 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
 				     rptr_index,
 				     rptr_entry,
 				     0);
+#if (TF_EM_ALLOC == 1)
+	dpool_set_entry_data(pool, index, parms->flow_handle);
+#endif
 	return 0;
 }
 
@@ -127,13 +145,71 @@ tf_em_hash_delete_int_entry(struct tf *tfp,
 			    struct tf_delete_em_entry_parms *parms)
 {
 	int rc = 0;
-	struct stack *pool = &em_pool[parms->dir];
+	struct tf_session *tfs;
+#if (TF_EM_ALLOC == 1)
+	struct dpool *pool;
+#else
+	struct stack *pool;
+#endif
+	/* Retrieve the session information */
+	rc = tf_session_get_session(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    strerror(-rc));
+		return rc;
+	}
 
 	rc = tf_msg_delete_em_entry(tfp, parms);
 
 	/* Return resource to pool */
-	if (rc == 0)
+	if (rc == 0) {
+#if (TF_EM_ALLOC == 1)
+		pool = (struct dpool *)tfs->em_pool[parms->dir];
+		dpool_free(pool, parms->index);
+#else
+		pool = (struct stack *)tfs->em_pool[parms->dir];
 		stack_push(pool, parms->index);
+#endif
+	}
+
+	return rc;
+}
+
+#if (TF_EM_ALLOC == 1)
+/** Move EM internal entry API
+ *
+ * returns:
+ * 0
+ * -EINVAL
+ */
+int
+tf_em_move_int_entry(struct tf *tfp,
+		     struct tf_move_em_entry_parms *parms)
+{
+	int rc = 0;
+	struct dpool *pool;
+	struct tf_session *tfs;
+
+	/* Retrieve the session information */
+	rc = tf_session_get_session(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	rc = tf_msg_move_em_entry(tfp, parms);
+
+	/* Return resource to pool */
+	if (rc == 0) {
+		pool = (struct dpool *)tfs->em_pool[parms->dir];
+		dpool_free(pool, parms->index);
+	}
 
 	return rc;
 }
+#endif
diff --git a/drivers/net/bnxt/tf_core/tf_em_internal.c b/drivers/net/bnxt/tf_core/tf_em_internal.c
index e373a9b029..eec15b89bc 100644
--- a/drivers/net/bnxt/tf_core/tf_em_internal.c
+++ b/drivers/net/bnxt/tf_core/tf_em_internal.c
@@ -15,7 +15,6 @@
 #include "tf_msg.h"
 #include "tfp.h"
 #include "tf_ext_flow_handle.h"
-
 #include "bnxt.h"
 
 #define TF_EM_DB_EM_REC 0
@@ -23,7 +22,9 @@
 /**
  * EM Pool
  */
-struct stack em_pool[TF_DIR_MAX];
+#if (TF_EM_ALLOC == 1)
+#include "dpool.h"
+#else
 
 /**
  * Create EM Tbl pool of memory indexes.
@@ -41,14 +42,35 @@ struct stack em_pool[TF_DIR_MAX];
  *          - Failure, entry not allocated, out of resources
  */
 static int
-tf_create_em_pool(enum tf_dir dir,
+tf_create_em_pool(struct tf_session *tfs,
+		  enum tf_dir dir,
 		  uint32_t num_entries,
 		  uint32_t start)
 {
 	struct tfp_calloc_parms parms;
 	uint32_t i, j;
 	int rc = 0;
-	struct stack *pool = &em_pool[dir];
+	struct stack *pool;
+
+	/*
+	 * Allocate stack pool
+	 */
+	parms.nitems = 1;
+	parms.size = sizeof(struct stack);
+	parms.alignment = 0;
+
+	rc = tfp_calloc(&parms);
+
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s, EM stack allocation failure %s\n",
+			    tf_dir_2_str(dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	pool = (struct stack *)parms.mem_va;
+	tfs->em_pool[dir] = (void *)pool;
 
 	/* Assumes that num_entries has been checked before we get here */
 	parms.nitems = num_entries / TF_SESSION_EM_ENTRY_SIZE;
@@ -108,6 +130,8 @@ tf_create_em_pool(enum tf_dir dir,
 	return 0;
 cleanup:
 	tfp_free((void *)parms.mem_va);
+	tfp_free((void *)tfs->em_pool[dir]);
+	tfs->em_pool[dir] = NULL;
 	return rc;
 }
 
@@ -120,16 +144,23 @@ tf_create_em_pool(enum tf_dir dir,
  * Return:
  */
 static void
-tf_free_em_pool(enum tf_dir dir)
+tf_free_em_pool(struct tf_session *tfs,
+		enum tf_dir dir)
 {
-	struct stack *pool = &em_pool[dir];
+	struct stack *pool = (struct stack *)tfs->em_pool[dir];
 	uint32_t *ptr;
 
-	ptr = stack_items(pool);
+	if (pool != NULL) {
+		ptr = stack_items(pool);
+
+		if (ptr != NULL)
+			tfp_free(ptr);
 
-	if (ptr != NULL)
-		tfp_free(ptr);
+		tfp_free(pool);
+		tfs->em_pool[dir] = NULL;
+	}
 }
+#endif /* TF_EM_ALLOC != 1 */
 
 /**
  * Insert EM internal entry API
@@ -146,17 +177,44 @@ tf_em_insert_int_entry(struct tf *tfp,
 	uint16_t rptr_index = 0;
 	uint8_t rptr_entry = 0;
 	uint8_t num_of_entries = 0;
-	struct stack *pool = &em_pool[parms->dir];
+	struct tf_session *tfs;
+#if (TF_EM_ALLOC == 1)
+	struct dpool *pool;
+#else
+	struct stack *pool;
+#endif
 	uint32_t index;
 
-	rc = stack_pop(pool, &index);
+	/* Retrieve the session information */
+	rc = tf_session_get_session(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    strerror(-rc));
+		return rc;
+	}
 
+#if (TF_EM_ALLOC == 1)
+	pool = (struct dpool *)tfs->em_pool[parms->dir];
+	index = dpool_alloc(pool, TF_SESSION_EM_ENTRY_SIZE, 0);
+	if (index == DP_INVALID_INDEX) {
+		PMD_DRV_LOG(ERR,
+			    "%s, EM entry index allocation failed\n",
+			    tf_dir_2_str(parms->dir));
+		return -1;
+	}
+#else
+	pool = (struct stack *)tfs->em_pool[parms->dir];
+	rc = stack_pop(pool, &index);
 	if (rc) {
 		PMD_DRV_LOG(ERR,
 			    "%s, EM entry index allocation failed\n",
 			    tf_dir_2_str(parms->dir));
 		return rc;
 	}
+#endif
+
 
 	rptr_index = index;
 	rc = tf_msg_insert_em_internal_entry(tfp,
@@ -166,19 +224,13 @@ tf_em_insert_int_entry(struct tf *tfp,
 					     &num_of_entries);
 	if (rc) {
 		/* Free the allocated index before returning */
+#if (TF_EM_ALLOC == 1)
+		dpool_free(pool, index);
+#else
 		stack_push(pool, index);
+#endif
 		return -1;
 	}
-
-	PMD_DRV_LOG
-		  (DEBUG,
-		   "%s, Internal entry @ Index:%d rptr_index:0x%x rptr_entry:0x%x num_of_entries:%d\n",
-		   tf_dir_2_str(parms->dir),
-		   index,
-		   rptr_index,
-		   rptr_entry,
-		   num_of_entries);
-
 	TF_SET_GFID(gfid,
 		    ((rptr_index << TF_EM_INTERNAL_INDEX_SHIFT) |
 		     rptr_entry),
@@ -211,16 +263,86 @@ tf_em_delete_int_entry(struct tf *tfp,
 		       struct tf_delete_em_entry_parms *parms)
 {
 	int rc = 0;
-	struct stack *pool = &em_pool[parms->dir];
+	struct tf_session *tfs;
+#if (TF_EM_ALLOC == 1)
+	struct dpool *pool;
+#else
+	struct stack *pool;
+#endif
+	/* Retrieve the session information */
+	rc = tf_session_get_session(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(parms->dir),
+			    strerror(-rc));
+		return rc;
+	}
 
 	rc = tf_msg_delete_em_entry(tfp, parms);
 
 	/* Return resource to pool */
-	if (rc == 0)
+	if (rc == 0) {
+#if (TF_EM_ALLOC == 1)
+		pool = (struct dpool *)tfs->em_pool[parms->dir];
+		dpool_free(pool, parms->index);
+#else
+		pool = (struct stack *)tfs->em_pool[parms->dir];
 		stack_push(pool, parms->index);
+#endif
+	}
+
+	return rc;
+}
+
+#if (TF_EM_ALLOC == 1)
+static int
+tf_em_move_callback(void *user_data,
+		    uint64_t entry_data,
+		    uint32_t new_index)
+{
+	int rc;
+	struct tf *tfp = (struct tf *)user_data;
+	struct tf_move_em_entry_parms parms;
+	struct tf_dev_info     *dev;
+	struct tf_session      *tfs;
+
+	memset(&parms, 0, sizeof(parms));
+
+	parms.tbl_scope_id = 0;
+	parms.flow_handle  = entry_data;
+	parms.new_index    = new_index;
+	TF_GET_DIR_FROM_FLOW_ID(entry_data, parms.dir);
+	parms.mem          = TF_MEM_INTERNAL;
+
+	/* Retrieve the session information */
+	rc = tf_session_get_session(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(parms.dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Retrieve the device information */
+	rc = tf_session_get_device(tfs, &dev);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup device, rc:%s\n",
+			    tf_dir_2_str(parms.dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	if (dev->ops->tf_dev_move_int_em_entry != NULL)
+		rc = dev->ops->tf_dev_move_int_em_entry(tfp, &parms);
+	else
+		rc = -EOPNOTSUPP;
 
 	return rc;
 }
+#endif
 
 int
 tf_em_int_bind(struct tf *tfp,
@@ -311,14 +433,49 @@ tf_em_int_bind(struct tf *tfp,
 					    tf_dir_2_str(i));
 				return rc;
 			}
+#if (TF_EM_ALLOC == 1)
+			/*
+			 * Allocate stack pool
+			 */
+			cparms.nitems = 1;
+			cparms.size = sizeof(struct dpool);
+			cparms.alignment = 0;
+
+			rc = tfp_calloc(&cparms);
 
-			rc = tf_create_em_pool(i,
-					       iparms.info->entry.stride,
-					       iparms.info->entry.start);
+			if (rc) {
+				TFP_DRV_LOG(ERR,
+					 "%s, EM stack allocation failure %s\n",
+					 tf_dir_2_str(i),
+					 strerror(-rc));
+				return rc;
+			}
+
+			tfs->em_pool[i] = (struct dpool *)cparms.mem_va;
+
+			rc = dpool_init(tfs->em_pool[i],
+					iparms.info->entry.start,
+					iparms.info->entry.stride,
+					7,
+					(void *)tfp,
+					tf_em_move_callback);
+#else
+			rc = tf_create_em_pool(tfs,
+				       i,
+				       iparms.info->entry.stride,
+				       iparms.info->entry.start);
+#endif
 			/* Logging handled in tf_create_em_pool */
 			if (rc)
 				return rc;
 		}
+
+		if (rc) {
+			TFP_DRV_LOG(ERR,
+				    "%s: EM pool init failed\n",
+				    tf_dir_2_str(i));
+			return rc;
+		}
 	}
 
 	return 0;
@@ -343,7 +500,11 @@ tf_em_int_unbind(struct tf *tfp)
 
 	if (!tf_session_is_shared_session(tfs)) {
 		for (i = 0; i < TF_DIR_MAX; i++)
-			tf_free_em_pool(i);
+#if (TF_EM_ALLOC == 1)
+			dpool_free_all(tfs->em_pool[i]);
+#else
+		tf_free_em_pool(tfs, i);
+#endif
 	}
 
 	rc = tf_session_get_db(tfp, TF_MODULE_TYPE_EM, &em_db_ptr);
diff --git a/drivers/net/bnxt/tf_core/tf_msg.c b/drivers/net/bnxt/tf_core/tf_msg.c
index 4a840f3473..2ee8a1e8a9 100644
--- a/drivers/net/bnxt/tf_core/tf_msg.c
+++ b/drivers/net/bnxt/tf_core/tf_msg.c
@@ -1004,6 +1004,75 @@ tf_msg_delete_em_entry(struct tf *tfp,
 	return 0;
 }
 
+int
+tf_msg_move_em_entry(struct tf *tfp,
+		     struct tf_move_em_entry_parms *em_parms)
+{
+	int rc;
+	struct tfp_send_msg_parms parms = { 0 };
+	struct hwrm_tf_em_move_input req = { 0 };
+	struct hwrm_tf_em_move_output resp = { 0 };
+	uint16_t flags;
+	uint8_t fw_session_id;
+	struct tf_dev_info *dev;
+	struct tf_session *tfs;
+
+	/* Retrieve the session information */
+	rc = tf_session_get_session_internal(tfp, &tfs);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup session, rc:%s\n",
+			    tf_dir_2_str(em_parms->dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Retrieve the device information */
+	rc = tf_session_get_device(tfs, &dev);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Failed to lookup device, rc:%s\n",
+			    tf_dir_2_str(em_parms->dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
+	if (rc) {
+		TFP_DRV_LOG(ERR,
+			    "%s: Unable to lookup FW id, rc:%s\n",
+			    tf_dir_2_str(em_parms->dir),
+			    strerror(-rc));
+		return rc;
+	}
+
+	/* Populate the request */
+	req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
+
+	flags = (em_parms->dir == TF_DIR_TX ?
+		 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX :
+		 HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX);
+	req.flags = tfp_cpu_to_le_16(flags);
+	req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
+	req.new_index = tfp_cpu_to_le_32(em_parms->new_index);
+
+	parms.tf_type = HWRM_TF_EM_MOVE;
+	parms.req_data = (uint32_t *)&req;
+	parms.req_size = sizeof(req);
+	parms.resp_data = (uint32_t *)&resp;
+	parms.resp_size = sizeof(resp);
+	parms.mailbox = dev->ops->tf_dev_get_mailbox();
+
+	rc = tfp_send_msg_direct(tf_session_get_bp(tfs),
+				 &parms);
+	if (rc)
+		return rc;
+
+	em_parms->index = tfp_le_to_cpu_16(resp.em_index);
+
+	return 0;
+}
+
 int tf_msg_ext_em_ctxt_mem_alloc(struct tf *tfp,
 				struct hcapi_cfa_em_table *tbl,
 				uint64_t *dma_addr,
diff --git a/drivers/net/bnxt/tf_core/tf_msg.h b/drivers/net/bnxt/tf_core/tf_msg.h
index 5ecaf9e7e7..e8662fef0e 100644
--- a/drivers/net/bnxt/tf_core/tf_msg.h
+++ b/drivers/net/bnxt/tf_core/tf_msg.h
@@ -315,6 +315,21 @@ tf_msg_hash_insert_em_internal_entry(struct tf *tfp,
 int tf_msg_delete_em_entry(struct tf *tfp,
 			   struct tf_delete_em_entry_parms *em_parms);
 
+/**
+ * Sends EM internal move request to Firmware
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] em_parms
+ *   Pointer to em move parameters
+ *
+ * Returns:
+ *   0 on Success else internal Truflow error
+ */
+int tf_msg_move_em_entry(struct tf *tfp,
+			 struct tf_move_em_entry_parms *em_parms);
+
 /**
  * Sends Ext EM mem allocation request to Firmware
  *
diff --git a/drivers/net/bnxt/tf_core/tf_session.h b/drivers/net/bnxt/tf_core/tf_session.h
index 0b8f63c374..e2cebd20a1 100644
--- a/drivers/net/bnxt/tf_core/tf_session.h
+++ b/drivers/net/bnxt/tf_core/tf_session.h
@@ -159,6 +159,11 @@ struct tf_session {
 	 * the pointer to the parent bp struct
 	 */
 	void *bp;
+
+	/**
+	 * EM allocator for session
+	 */
+	void *em_pool[TF_DIR_MAX];
 };
 
 /**
-- 
2.17.1


  parent reply	other threads:[~2021-05-30  9:02 UTC|newest]

Thread overview: 129+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-30  8:58 [dpdk-dev] [PATCH 00/58] enhancements to host based flow table management Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 01/58] net/bnxt: add CFA folder to HCAPI directory Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 02/58] net/bnxt: add base TRUFLOW support for Thor Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 03/58] net/bnxt: add mailbox selection via dev op Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 04/58] net/bnxt: check resource reservation in TRUFLOW Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 05/58] net/bnxt: update TRUFLOW resources Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 06/58] net/bnxt: add support for EM with FKB Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 07/58] net/bnxt: add L2 Context TCAM get support Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 08/58] net/bnxt: add action SRAM Translation Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 09/58] net/bnxt: add Thor WC TCAM support Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 10/58] net/bnxt: add 64B SRAM record management with RM Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 11/58] net/bnxt: add hashing changes for Thor Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 12/58] net/bnxt: modify TRUFLOW HWRM messages Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 13/58] net/bnxt: change RM database type Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 14/58] net/bnxt: add shared session support Venkat Duvvuru
2021-05-30  8:58 ` Venkat Duvvuru [this message]
2021-05-30  8:58 ` [dpdk-dev] [PATCH 16/58] net/bnxt: update shared session functionality Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 17/58] net/bnxt: modify resource reservation strategy Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 18/58] net/bnxt: shared TCAM region support Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 19/58] net/bnxt: cleanup session open/close messages Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 20/58] net/bnxt: add WC TCAM hi/lo move support Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 21/58] net/bnxt: add API to get shared table increments Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 22/58] net/bnxt: modify host session failure cleanup Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 23/58] net/bnxt: cleanup of WC TCAM shared unbind Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 24/58] net/bnxt: add support for WC TCAM shared session Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 25/58] net/bnxt: add API to clear hi/lo WC region Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 26/58] net/bnxt: check FW capability to support TRUFLOW Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 27/58] net/bnxt: add support for generic table processing Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 28/58] net/bnxt: add support for mapper flow database opcodes Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 29/58] net/bnxt: add conditional execution and rejection Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 30/58] net/bnxt: modify TCAM opcode processing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 31/58] net/bnxt: modify VXLAN decap for multichannel mode Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 32/58] net/bnxt: modify table processing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 33/58] net/bnxt: modify ULP priority opcode processing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 34/58] net/bnxt: add support for conflict resolution Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 35/58] net/bnxt: add support for conditional goto processing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 36/58] net/bnxt: set shared handle for generic table Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 37/58] net/bnxt: modify ULP template Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 38/58] net/bnxt: add conditional opcode and L4 port fields Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 39/58] net/bnxt: refactor TF ULP Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 40/58] net/bnxt: add partial header field processing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 41/58] net/bnxt: add support for wild card pattern match Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 42/58] net/bnxt: add support for GRE flows Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 43/58] net/bnxt: enable extended exact match support Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 44/58] net/bnxt: refactor ULP mapper and parser Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 45/58] net/bnxt: add support for generic hash table Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 46/58] net/bnxt: add support for Thor platform Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 47/58] net/bnxt: refactor flow parser in ULP Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 48/58] net/bnxt: add shared session support to ULP Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 49/58] net/bnxt: add field opcodes in ULP Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 50/58] net/bnxt: add support for application ID in ULP matcher Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 51/58] net/bnxt: process resource lists before session open Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 52/58] net/bnxt: add support for shared sessions in ULP Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 53/58] net/bnxt: add HA support " Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 54/58] net/bnxt: add support for icmp6 ULP parsing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 55/58] net/bnxt: add support for ULP context list for timers Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 56/58] net/bnxt: cleanup ULP parser and mapper Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 57/58] net/bnxt: reorganize ULP template directory structure Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 58/58] net/bnxt: add Thor template support Venkat Duvvuru
2021-06-13  0:05 ` [dpdk-dev] [PATCH v2 00/58] enhancements to host based flow table management Ajit Khaparde
2021-06-13  0:05   ` [dpdk-dev] [PATCH v2 01/58] net/bnxt: add CFA folder to HCAPI directory Ajit Khaparde
2021-06-13  0:05   ` [dpdk-dev] [PATCH v2 02/58] net/bnxt: add base TRUFLOW support for Thor Ajit Khaparde
2021-06-13  0:05   ` [dpdk-dev] [PATCH v2 03/58] net/bnxt: add mailbox selection via dev op Ajit Khaparde
2021-06-13  0:05   ` [dpdk-dev] [PATCH v2 04/58] net/bnxt: check resource reservation in TRUFLOW Ajit Khaparde
2021-06-13  0:05   ` [dpdk-dev] [PATCH v2 05/58] net/bnxt: update TRUFLOW resources Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 06/58] net/bnxt: add support for EM with FKB Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 07/58] net/bnxt: support L2 Context TCAM ops Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 08/58] net/bnxt: add action SRAM translation Ajit Khaparde
2021-07-05 21:23     ` Thomas Monjalon
2021-07-06 22:37       ` [dpdk-dev] [PATCH v3] " Ajit Khaparde
2021-07-06 22:58       ` [dpdk-dev] [PATCH v2 08/58] " Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 09/58] net/bnxt: add Thor WC TCAM support Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 10/58] net/bnxt: add 64B SRAM record management with RM Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 11/58] net/bnxt: add hashing changes for Thor Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 12/58] net/bnxt: modify TRUFLOW HWRM messages Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 13/58] net/bnxt: change RM database type Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 14/58] net/bnxt: add shared session support Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 15/58] net/bnxt: add dpool allocator for EM allocation Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 16/58] net/bnxt: update shared session functionality Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 17/58] net/bnxt: modify resource reservation strategy Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 18/58] net/bnxt: shared TCAM region support Ajit Khaparde
2021-07-05 21:27     ` Thomas Monjalon
2021-07-06 22:39       ` [dpdk-dev] [PATCH v3] " Ajit Khaparde
2021-07-06 22:57       ` [dpdk-dev] [PATCH v2 18/58] " Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 19/58] net/bnxt: cleanup logs in session handling paths Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 20/58] net/bnxt: add WC TCAM management support Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 21/58] net/bnxt: add API to get shared table increments Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 22/58] net/bnxt: refactor host session failure cleanup Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 23/58] net/bnxt: cleanup WC TCAM shared pool Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 24/58] net/bnxt: add support for WC TCAM shared session Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 25/58] net/bnxt: add API to clear TCAM regions Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 26/58] net/bnxt: check FW capability to support TRUFLOW Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 27/58] net/bnxt: add support for generic table processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 28/58] net/bnxt: add support for mapper flow database opcodes Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 29/58] net/bnxt: add conditional processing of templates Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 30/58] net/bnxt: modify TCAM opcode processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 31/58] net/bnxt: modify VXLAN decap for multichannel mode Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 32/58] net/bnxt: modify table processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 33/58] net/bnxt: add ULP priority opcode processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 34/58] net/bnxt: add support to identify duplicate flows Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 35/58] net/bnxt: add conditional goto processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 36/58] net/bnxt: set shared handle for generic table Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 37/58] net/bnxt: modify ULP template Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 38/58] net/bnxt: add conditional opcode and L4 port fields Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 39/58] net/bnxt: refactor TRUFLOW processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 40/58] net/bnxt: add partial header field processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 41/58] net/bnxt: add support for wild card pattern match Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 42/58] net/bnxt: add support for GRE flows Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 43/58] net/bnxt: enable extended exact match support Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 44/58] net/bnxt: refactor ULP mapper Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 45/58] net/bnxt: add support for generic hash table Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 46/58] net/bnxt: add support for Thor platform Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 47/58] net/bnxt: refactor flow parser in ULP Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 48/58] net/bnxt: add shared session support to ULP Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 49/58] net/bnxt: add field opcodes in ULP Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 50/58] net/bnxt: add support for application ID in ULP matcher Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 51/58] net/bnxt: process resource lists before session open Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 52/58] net/bnxt: add templates for shared sessions Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 53/58] net/bnxt: add HA support in ULP Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 54/58] net/bnxt: add ICMPv6 parser to ULP Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 55/58] net/bnxt: add context list for timers Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 56/58] net/bnxt: cleanup ULP parser and mapper Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 57/58] net/bnxt: reorganize ULP template directory structure Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 58/58] net/bnxt: add Thor template support Ajit Khaparde
2021-06-15 19:33   ` [dpdk-dev] [PATCH v2 00/58] enhancements to host based flow table management Ajit Khaparde
2021-07-07  8:43     ` Thomas Monjalon
2021-07-08  3:57       ` Ajit Khaparde
2021-07-08 12:51         ` Thomas Monjalon
2021-07-08 14:37           ` Ajit Khaparde

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210530085929.29695-16-venkatkumar.duvvuru@broadcom.com \
    --to=venkatkumar.duvvuru@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=peter.spreadborough@broadcom.com \
    --cc=stuart.schacher@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).