From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
To: dev@dpdk.org
Cc: Kishore Padmanabha <kishore.padmanabha@broadcom.com>,
Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Subject: [PATCH 3/4] net/bnxt: fix multi adapter support
Date: Tue, 16 Nov 2021 18:34:36 +0530 [thread overview]
Message-ID: <20211116130437.2022-4-venkatkumar.duvvuru@broadcom.com> (raw)
In-Reply-To: <20211116130437.2022-1-venkatkumar.duvvuru@broadcom.com>
From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
1. removed the global flag for tf global config initialization.
2. Modified the truflow context lock to be a global lock instead
of per context lock.
3. The ulp context list is modified to check on the ulp configiuration
data so alarm handlers can operate on the correct ulp context.
These changes help in support of multiple network cards using
single dpdk application.
Fixes: d75b55121bcd ("net/bnxt: add context list for timers")
Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Randy Schacher <stuart.schacher@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
---
drivers/net/bnxt/tf_core/tf_device_p4.c | 2 +-
drivers/net/bnxt/tf_core/tf_device_p58.c | 4 +--
drivers/net/bnxt/tf_core/tf_global_cfg.c | 35 ------------------------
drivers/net/bnxt/tf_core/tf_global_cfg.h | 2 +-
drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 12 ++++----
drivers/net/bnxt/tf_ulp/bnxt_ulp.h | 2 +-
drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c | 12 ++++----
drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c | 16 +++++------
8 files changed, 26 insertions(+), 59 deletions(-)
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c
index aa55587ba8..b8b3dcbb3f 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c
@@ -243,7 +243,7 @@ tf_dev_p4_set_tcam_slice_info(struct tf *tfp,
enum tf_wc_num_slice num_slices_per_row)
{
int rc;
- struct tf_session *tfs;
+ struct tf_session *tfs = NULL;
/* Retrieve the session information */
rc = tf_session_get_session_internal(tfp, &tfs);
diff --git a/drivers/net/bnxt/tf_core/tf_device_p58.c b/drivers/net/bnxt/tf_core/tf_device_p58.c
index 987ad2a564..8179287e46 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p58.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p58.c
@@ -354,7 +354,7 @@ tf_dev_p58_set_tcam_slice_info(struct tf *tfp,
enum tf_wc_num_slice num_slices_per_row)
{
int rc;
- struct tf_session *tfs;
+ struct tf_session *tfs = NULL;
/* Retrieve the session information */
rc = tf_session_get_session_internal(tfp, &tfs);
@@ -401,7 +401,7 @@ tf_dev_p58_get_tcam_slice_info(struct tf *tfp,
uint16_t *num_slices_per_row)
{
int rc;
- struct tf_session *tfs;
+ struct tf_session *tfs = NULL;
/* Retrieve the session information */
rc = tf_session_get_session_internal(tfp, &tfs);
diff --git a/drivers/net/bnxt/tf_core/tf_global_cfg.c b/drivers/net/bnxt/tf_core/tf_global_cfg.c
index 98a42b2fe6..d83e7db315 100644
--- a/drivers/net/bnxt/tf_core/tf_global_cfg.c
+++ b/drivers/net/bnxt/tf_core/tf_global_cfg.c
@@ -20,11 +20,6 @@ struct tf_global_cfg_db {
struct tf_global_cfg_cfg *global_cfg_db[TF_DIR_MAX];
};
-/**
- * Init flag, set on bind and cleared on unbind
- */
-static uint8_t init;
-
/**
* Get HCAPI type parameters for a single element
*/
@@ -83,11 +78,6 @@ tf_global_cfg_bind(struct tf *tfp,
TF_CHECK_PARMS2(tfp, parms);
- if (init) {
- TFP_DRV_LOG(ERR, "Global Cfg DB already initialized\n");
- return -EINVAL;
- }
-
cparms.nitems = 1;
cparms.size = sizeof(struct tf_global_cfg_db);
cparms.alignment = 0;
@@ -100,13 +90,9 @@ tf_global_cfg_bind(struct tf *tfp,
global_cfg_db = cparms.mem_va;
global_cfg_db->global_cfg_db[TF_DIR_RX] = parms->cfg;
global_cfg_db->global_cfg_db[TF_DIR_TX] = parms->cfg;
-
tf_session_set_global_db(tfp, (void *)global_cfg_db);
- init = 1;
-
TFP_DRV_LOG(INFO, "Global Cfg - initialized\n");
-
return 0;
}
@@ -118,12 +104,6 @@ tf_global_cfg_unbind(struct tf *tfp)
TF_CHECK_PARMS1(tfp);
- /* Bail if nothing has been initialized */
- if (!init) {
- TFP_DRV_LOG(INFO, "No Global Cfg DBs created\n");
- return 0;
- }
-
rc = tf_session_get_global_db(tfp, (void **)&global_cfg_db_ptr);
if (rc) {
TFP_DRV_LOG(INFO, "global_cfg_db is not initialized\n");
@@ -131,8 +111,6 @@ tf_global_cfg_unbind(struct tf *tfp)
}
tfp_free((void *)global_cfg_db_ptr);
- init = 0;
-
return 0;
}
@@ -147,12 +125,6 @@ tf_global_cfg_set(struct tf *tfp,
TF_CHECK_PARMS3(tfp, parms, parms->config);
- if (!init) {
- TFP_DRV_LOG(ERR, "%s: No Global Cfg DBs created\n",
- tf_dir_2_str(parms->dir));
- return -EINVAL;
- }
-
rc = tf_session_get_global_db(tfp, (void **)&global_cfg_db_ptr);
if (rc) {
TFP_DRV_LOG(INFO, "No global cfg DBs initialized\n");
@@ -197,13 +169,6 @@ tf_global_cfg_get(struct tf *tfp,
TF_CHECK_PARMS3(tfp, parms, parms->config);
- if (!init) {
- TFP_DRV_LOG(ERR,
- "%s: No Global Cfg DBs created\n",
- tf_dir_2_str(parms->dir));
- return -EINVAL;
- }
-
rc = tf_session_get_global_db(tfp, (void **)&global_cfg_db_ptr);
if (rc) {
TFP_DRV_LOG(INFO, "No Global cfg DBs initialized\n");
diff --git a/drivers/net/bnxt/tf_core/tf_global_cfg.h b/drivers/net/bnxt/tf_core/tf_global_cfg.h
index 3522bcc07e..c14e5e9109 100644
--- a/drivers/net/bnxt/tf_core/tf_global_cfg.h
+++ b/drivers/net/bnxt/tf_core/tf_global_cfg.h
@@ -93,7 +93,7 @@ struct tf_global_cfg_cfg_parms {
*
* Returns
* - (0) if successful.
- * - (-EINVAL) on failure.
+ * - (-ENOMEM) on failure.
*/
int
tf_global_cfg_bind(struct tf *tfp,
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index fd211bbc3f..b1b8679aa6 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -35,6 +35,7 @@ STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =
static pthread_mutex_t bnxt_ulp_global_mutex = PTHREAD_MUTEX_INITIALIZER;
/* Spin lock to protect context global list */
+uint32_t bnxt_ulp_ctxt_lock_created;
rte_spinlock_t bnxt_ulp_ctxt_lock;
TAILQ_HEAD(cntx_list_entry_list, ulp_context_list_entry);
static struct cntx_list_entry_list ulp_cntx_list =
@@ -2010,9 +2011,10 @@ bnxt_ulp_cntxt_ha_enabled(struct bnxt_ulp_context *ulp_ctx)
static int32_t
bnxt_ulp_cntxt_list_init(void)
{
- /* Create the cntxt spin lock */
- rte_spinlock_init(&bnxt_ulp_ctxt_lock);
-
+ /* Create the cntxt spin lock only once*/
+ if (!bnxt_ulp_ctxt_lock_created)
+ rte_spinlock_init(&bnxt_ulp_ctxt_lock);
+ bnxt_ulp_ctxt_lock_created = 1;
return 0;
}
@@ -2051,14 +2053,14 @@ bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx)
}
struct bnxt_ulp_context *
-bnxt_ulp_cntxt_entry_acquire(void)
+bnxt_ulp_cntxt_entry_acquire(void *arg)
{
struct ulp_context_list_entry *entry;
/* take a lock and get the first ulp context available */
if (rte_spinlock_trylock(&bnxt_ulp_ctxt_lock)) {
TAILQ_FOREACH(entry, &ulp_cntx_list, next)
- if (entry->ulp_ctx)
+ if (entry->ulp_ctx->cfg_data == arg)
return entry->ulp_ctx;
}
return NULL;
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
index 17c6898196..05a98b14e6 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.h
@@ -307,7 +307,7 @@ bool
bnxt_ulp_cntxt_ha_enabled(struct bnxt_ulp_context *ulp_ctx);
struct bnxt_ulp_context *
-bnxt_ulp_cntxt_entry_acquire(void);
+bnxt_ulp_cntxt_entry_acquire(void *arg);
void
bnxt_ulp_cntxt_entry_release(void);
diff --git a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
index 92243083b5..85c9cbb7f2 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c
@@ -203,7 +203,7 @@ ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt)
if (ulp_fc_info && !(ulp_fc_info->flags & ULP_FLAG_FC_THREAD)) {
rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
- ulp_fc_mgr_alarm_cb, NULL);
+ ulp_fc_mgr_alarm_cb, (void *)ctxt->cfg_data);
ulp_fc_info->flags |= ULP_FLAG_FC_THREAD;
}
@@ -225,7 +225,7 @@ void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt)
return;
ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD;
- rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, NULL);
+ rte_eal_alarm_cancel(ulp_fc_mgr_alarm_cb, ctxt->cfg_data);
}
/*
@@ -434,7 +434,7 @@ static int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt,
*/
void
-ulp_fc_mgr_alarm_cb(void *arg __rte_unused)
+ulp_fc_mgr_alarm_cb(void *arg)
{
int rc = 0;
unsigned int j;
@@ -445,11 +445,11 @@ ulp_fc_mgr_alarm_cb(void *arg __rte_unused)
struct tf *tfp;
uint32_t dev_id, hw_cntr_id = 0, num_entries = 0;
- ctxt = bnxt_ulp_cntxt_entry_acquire();
+ ctxt = bnxt_ulp_cntxt_entry_acquire(arg);
if (ctxt == NULL) {
BNXT_TF_DBG(INFO, "could not get the ulp context lock\n");
rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
- ulp_fc_mgr_alarm_cb, NULL);
+ ulp_fc_mgr_alarm_cb, arg);
return;
}
@@ -534,7 +534,7 @@ ulp_fc_mgr_alarm_cb(void *arg __rte_unused)
out:
bnxt_ulp_cntxt_entry_release();
rte_eal_alarm_set(US_PER_S * ULP_FC_TIMER,
- ulp_fc_mgr_alarm_cb, NULL);
+ ulp_fc_mgr_alarm_cb, arg);
}
/*
diff --git a/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c b/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c
index 5f5b5d639e..1325986aba 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c
@@ -27,7 +27,7 @@
#define ULP_HA_CLIENT_CNT_IF_TBL_IDX 9
static void ulp_ha_mgr_timer_cancel(void);
-static int32_t ulp_ha_mgr_timer_start(void);
+static int32_t ulp_ha_mgr_timer_start(void *arg);
static void ulp_ha_mgr_timer_cb(void *arg);
static int32_t ulp_ha_mgr_app_type_set(struct bnxt_ulp_context *ulp_ctx,
enum ulp_ha_mgr_app_type app_type);
@@ -151,7 +151,7 @@ ulp_ha_mgr_app_type_set(struct bnxt_ulp_context *ulp_ctx,
}
static void
-ulp_ha_mgr_timer_cb(void *arg __rte_unused)
+ulp_ha_mgr_timer_cb(void *arg)
{
struct tf_move_tcam_shared_entries_parms mparms = { 0 };
struct tf_clear_tcam_shared_entries_parms cparms = { 0 };
@@ -163,9 +163,9 @@ ulp_ha_mgr_timer_cb(void *arg __rte_unused)
struct tf *tfp;
int32_t rc;
- ulp_ctx = bnxt_ulp_cntxt_entry_acquire();
+ ulp_ctx = bnxt_ulp_cntxt_entry_acquire(arg);
if (ulp_ctx == NULL) {
- ulp_ha_mgr_timer_start();
+ ulp_ha_mgr_timer_start(arg);
return;
}
@@ -299,14 +299,14 @@ ulp_ha_mgr_timer_cb(void *arg __rte_unused)
bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
cb_restart:
bnxt_ulp_cntxt_entry_release();
- ulp_ha_mgr_timer_start();
+ ulp_ha_mgr_timer_start(arg);
}
static int32_t
-ulp_ha_mgr_timer_start(void)
+ulp_ha_mgr_timer_start(void *arg)
{
rte_eal_alarm_set(US_PER_S * ULP_HA_TIMER_SEC,
- ulp_ha_mgr_timer_cb, NULL);
+ ulp_ha_mgr_timer_cb, arg);
return 0;
}
@@ -333,7 +333,7 @@ ulp_ha_mgr_init(struct bnxt_ulp_context *ulp_ctx)
PMD_DRV_LOG(ERR, "Failed to initialize ha mutex\n");
goto cleanup;
}
- rc = ulp_ha_mgr_timer_start();
+ rc = ulp_ha_mgr_timer_start(ulp_ctx->cfg_data);
if (rc) {
BNXT_TF_DBG(ERR, "Unable to start timer CB.\n");
goto cleanup;
--
2.17.1
next prev parent reply other threads:[~2021-11-16 13:05 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-11-16 13:04 [PATCH 0/4] fixes to TruFlow Venkat Duvvuru
2021-11-16 13:04 ` [PATCH 1/4] net/bnxt: remove settings to support multiple session Venkat Duvvuru
2021-11-16 13:04 ` [PATCH 2/4] net/bnxt: fix sram resource free block list Venkat Duvvuru
2021-11-16 13:04 ` Venkat Duvvuru [this message]
2021-11-16 13:04 ` [PATCH 4/4] net/bnxt: fix ULP context list deadlock Venkat Duvvuru
2021-11-17 4:01 ` [PATCH 0/4] fixes to TruFlow Ajit Khaparde
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211116130437.2022-4-venkatkumar.duvvuru@broadcom.com \
--to=venkatkumar.duvvuru@broadcom.com \
--cc=dev@dpdk.org \
--cc=kishore.padmanabha@broadcom.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).