From: Manish Chopra <manishc@marvell.com>
To: <jerinjacobk@gmail.com>, <jerinj@marvell.com>,
<ferruh.yigit@intel.com>, <grive@u256.net>
Cc: <dev@dpdk.org>, <irusskikh@marvell.com>, <rmody@marvell.com>,
<GR-Everest-DPDK-Dev@marvell.com>, <rosen.xu@intel.com>,
<tianfei.zhang@intel.com>, <heinrich.kuhn@netronome.com>,
<qiming.yang@intel.com>, <qi.z.zhang@intel.com>
Subject: [dpdk-dev] [PATCH v6 4/6] net/qede: add infrastructure support for VF load
Date: Fri, 25 Sep 2020 04:55:06 -0700 [thread overview]
Message-ID: <20200925115508.4179-5-manishc@marvell.com> (raw)
In-Reply-To: <20200925115508.4179-1-manishc@marvell.com>
This patch adds necessary infrastructure support (required to handle
messages from VF and sending ramrod on behalf of VF's configuration
request from alarm handler context) to start/load the VF-PMD driver
instance on top of PF-PMD driver instance.
Signed-off-by: Manish Chopra <manishc@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: Rasesh Mody <rmody@marvell.com>
---
drivers/net/qede/base/bcm_osal.c | 26 ++++++++++++
drivers/net/qede/base/bcm_osal.h | 11 +++--
drivers/net/qede/base/ecore.h | 4 ++
drivers/net/qede/base/ecore_iov_api.h | 3 ++
drivers/net/qede/qede_ethdev.c | 2 +
drivers/net/qede/qede_main.c | 4 +-
drivers/net/qede/qede_sriov.c | 61 +++++++++++++++++++++++++++
drivers/net/qede/qede_sriov.h | 16 ++++++-
8 files changed, 121 insertions(+), 6 deletions(-)
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 65837b53d..ef47339df 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -14,6 +14,32 @@
#include "ecore_iov_api.h"
#include "ecore_mcp_api.h"
#include "ecore_l2_api.h"
+#include "../qede_sriov.h"
+
+int osal_pf_vf_msg(struct ecore_hwfn *p_hwfn)
+{
+ int rc;
+
+ rc = qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Failed to schedule alarm handler rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
+{
+ struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
+
+ if (!p_hwfn)
+ return;
+
+ OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
+ ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+ OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
+}
/* Array of memzone pointers */
static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 681d2d5b6..e7212f40c 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -177,9 +177,12 @@ typedef pthread_mutex_t osal_mutex_t;
/* DPC */
+void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie);
#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t))
-#define OSAL_DPC_INIT(dpc, hwfn) nothing
-#define OSAL_POLL_MODE_DPC(hwfn) nothing
+#define OSAL_DPC_INIT(dpc, hwfn) \
+ OSAL_SPIN_LOCK_INIT(&(hwfn)->spq_lock)
+#define OSAL_POLL_MODE_DPC(hwfn) \
+ osal_poll_mode_dpc((osal_int_ptr_t)(p_hwfn))
#define OSAL_DPC_SYNC(hwfn) nothing
/* Lists */
@@ -344,10 +347,12 @@ u32 qede_find_first_zero_bit(u32 *bitmap, u32 length);
/* SR-IOV channel */
+int osal_pf_vf_msg(struct ecore_hwfn *p_hwfn);
#define OSAL_VF_FLR_UPDATE(hwfn) nothing
#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0)
-#define OSAL_PF_VF_MSG(hwfn, vfid) 0
+#define OSAL_PF_VF_MSG(hwfn, vfid) \
+ osal_pf_vf_msg(hwfn)
#define OSAL_PF_VF_MALICIOUS(hwfn, vfid) nothing
#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0
#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 750e99a8f..6c8e6d407 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -714,6 +714,10 @@ struct ecore_hwfn {
/* @DPDK */
struct ecore_ptt *p_arfs_ptt;
+
+ /* DPDK specific, not the part of vanilla ecore */
+ osal_spinlock_t spq_lock;
+ u32 iov_task_flags;
};
enum ecore_mf_mode {
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 545001812..bd7c5703f 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -14,6 +14,9 @@
#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
#define ECORE_VF_ARRAY_LENGTH (3)
+#define ECORE_VF_ARRAY_GET_VFID(arr, vfid) \
+ (((arr)[(vfid) / 64]) & (1ULL << ((vfid) % 64)))
+
#define IS_VF(p_dev) ((p_dev)->b_is_vf)
#define IS_PF(p_dev) (!((p_dev)->b_is_vf))
#ifdef CONFIG_ECORE_SRIOV
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 27ec42e2c..3cc141954 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -281,7 +281,9 @@ qede_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
{
+ OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+ OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
}
static void
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index c1135822b..6a7bfc188 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -221,7 +221,9 @@ static void qed_stop_iov_task(struct ecore_dev *edev)
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
- if (!IS_PF(edev))
+ if (IS_PF(edev))
+ rte_eal_alarm_cancel(qed_iov_pf_task, p_hwfn);
+ else
rte_eal_alarm_cancel(qede_vf_task, p_hwfn);
}
}
diff --git a/drivers/net/qede/qede_sriov.c b/drivers/net/qede/qede_sriov.c
index ba4384e90..6d620dde8 100644
--- a/drivers/net/qede/qede_sriov.c
+++ b/drivers/net/qede/qede_sriov.c
@@ -4,6 +4,14 @@
* www.marvell.com
*/
+#include <rte_alarm.h>
+
+#include "base/bcm_osal.h"
+#include "base/ecore.h"
+#include "base/ecore_sriov.h"
+#include "base/ecore_mcp.h"
+#include "base/ecore_vf.h"
+
#include "qede_sriov.h"
static void qed_sriov_enable_qid_config(struct ecore_hwfn *hwfn,
@@ -83,3 +91,56 @@ void qed_sriov_configure(struct ecore_dev *edev, int num_vfs_param)
if (num_vfs_param)
qed_sriov_enable(edev, num_vfs_param);
}
+
+static void qed_handle_vf_msg(struct ecore_hwfn *hwfn)
+{
+ u64 events[ECORE_VF_ARRAY_LENGTH];
+ struct ecore_ptt *ptt;
+ int i;
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, true, "PTT acquire failed\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
+ return;
+ }
+
+ ecore_iov_pf_get_pending_events(hwfn, events);
+
+ ecore_for_each_vf(hwfn, i) {
+ /* Skip VFs with no pending messages */
+ if (!ECORE_VF_ARRAY_GET_VFID(events, i))
+ continue;
+
+ DP_VERBOSE(hwfn, ECORE_MSG_IOV,
+ "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
+ i, hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
+
+ /* Copy VF's message to PF's request buffer for that VF */
+ if (ecore_iov_copy_vf_msg(hwfn, ptt, i))
+ continue;
+
+ ecore_iov_process_mbx_req(hwfn, ptt, i);
+ }
+
+ ecore_ptt_release(hwfn, ptt);
+}
+
+void qed_iov_pf_task(void *arg)
+{
+ struct ecore_hwfn *p_hwfn = arg;
+
+ if (OSAL_GET_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags)) {
+ OSAL_CLEAR_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags);
+ qed_handle_vf_msg(p_hwfn);
+ }
+}
+
+int qed_schedule_iov(struct ecore_hwfn *p_hwfn, enum qed_iov_wq_flag flag)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Scheduling iov task [Flag: %d]\n",
+ flag);
+
+ OSAL_SET_BIT(flag, &p_hwfn->iov_task_flags);
+ return rte_eal_alarm_set(1, qed_iov_pf_task, p_hwfn);
+}
diff --git a/drivers/net/qede/qede_sriov.h b/drivers/net/qede/qede_sriov.h
index 6c85b1dd5..8b7fa7daa 100644
--- a/drivers/net/qede/qede_sriov.h
+++ b/drivers/net/qede/qede_sriov.h
@@ -4,6 +4,18 @@
* www.marvell.com
*/
-#include "qede_ethdev.h"
-
void qed_sriov_configure(struct ecore_dev *edev, int num_vfs_param);
+
+enum qed_iov_wq_flag {
+ QED_IOV_WQ_MSG_FLAG,
+ QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ QED_IOV_WQ_STOP_WQ_FLAG,
+ QED_IOV_WQ_FLR_FLAG,
+ QED_IOV_WQ_TRUST_FLAG,
+ QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
+ QED_IOV_WQ_DB_REC_HANDLER,
+};
+
+int qed_schedule_iov(struct ecore_hwfn *p_hwfn, enum qed_iov_wq_flag flag);
+void qed_iov_pf_task(void *arg);
--
2.17.1
next prev parent reply other threads:[~2020-09-25 11:57 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-25 11:55 [dpdk-dev] [PATCH v6 0/6] qede: SR-IOV PF driver support Manish Chopra
2020-09-25 11:55 ` [dpdk-dev] [PATCH v6 1/6] drivers: add generic API to find PCI extended cap Manish Chopra
2020-09-27 12:21 ` Jerin Jacob
2020-09-28 8:59 ` David Marchand
2020-09-28 10:32 ` Ferruh Yigit
2020-09-29 7:40 ` Jerin Jacob
2020-09-25 11:55 ` [dpdk-dev] [PATCH v6 2/6] net/qede: define PCI config space specific osals Manish Chopra
2020-09-25 11:55 ` [dpdk-dev] [PATCH v6 3/6] net/qede: configure VFs on hardware Manish Chopra
2020-09-25 11:55 ` Manish Chopra [this message]
2020-09-25 11:55 ` [dpdk-dev] [PATCH v6 5/6] net/qede: initialize VF MAC and link Manish Chopra
2020-09-25 11:55 ` [dpdk-dev] [PATCH v6 6/6] net/qede: add VF FLR support Manish Chopra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200925115508.4179-5-manishc@marvell.com \
--to=manishc@marvell.com \
--cc=GR-Everest-DPDK-Dev@marvell.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=grive@u256.net \
--cc=heinrich.kuhn@netronome.com \
--cc=irusskikh@marvell.com \
--cc=jerinj@marvell.com \
--cc=jerinjacobk@gmail.com \
--cc=qi.z.zhang@intel.com \
--cc=qiming.yang@intel.com \
--cc=rmody@marvell.com \
--cc=rosen.xu@intel.com \
--cc=tianfei.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).