* [PATCH v2 01/10] net/octeon_ep: support cnf95n and cnf95o SoC
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
@ 2023-04-05 14:25 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 02/10] net/octeon_ep: support CNX10K series SoC Sathesh Edara
` (9 subsequent siblings)
10 siblings, 1 reply; 50+ messages in thread
From: Sathesh Edara @ 2023-04-05 14:25 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
This patch adds the required functionality in the Octeon endpoint
driver to support the cnf95n and cnf95o endpoint device.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx2_ep_vf.h | 2 ++
drivers/net/octeon_ep/otx_ep_ethdev.c | 13 +++++++++++--
2 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.h b/drivers/net/octeon_ep/otx2_ep_vf.h
index 757eeae9f0..8f00acd737 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.h
+++ b/drivers/net/octeon_ep/otx2_ep_vf.h
@@ -115,6 +115,8 @@
#define PCI_DEVID_CN9K_EP_NET_VF 0xB203 /* OCTEON 9 EP mode */
#define PCI_DEVID_CN98XX_EP_NET_VF 0xB103
+#define PCI_DEVID_CNF95N_EP_NET_VF 0xB403
+#define PCI_DEVID_CNF95O_EP_NET_VF 0xB603
int
otx2_ep_vf_setup_device(struct otx_ep_device *sdpvf);
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index f43db1e398..24f62c3e49 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -105,6 +105,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
break;
case PCI_DEVID_CN9K_EP_NET_VF:
case PCI_DEVID_CN98XX_EP_NET_VF:
+ case PCI_DEVID_CNF95N_EP_NET_VF:
+ case PCI_DEVID_CNF95O_EP_NET_VF:
otx_epvf->chip_id = dev_id;
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
@@ -144,7 +146,9 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
else if (otx_epvf->chip_id == PCI_DEVID_CNXK_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
@@ -494,7 +498,10 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->pdev = pdev;
otx_epdev_init(otx_epvf);
- if (pdev->id.device_id == PCI_DEVID_CN9K_EP_NET_VF)
+ if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
otx_epvf->pkind = SDP_OTX2_PKIND_FS0;
else
otx_epvf->pkind = SDP_PKIND;
@@ -524,6 +531,8 @@ static const struct rte_pci_id pci_id_otx_ep_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95N_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95O_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_EP_NET_VF) },
{ .vendor_id = 0, /* sentinel */ }
};
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 00/11] extend octeon ep driver functionality
2023-04-05 14:25 ` [PATCH v2 01/10] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
` (10 more replies)
0 siblings, 11 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara; +Cc: dev
This patch set adds the following support to
octeon_ep driver
- extends support for newer SoCs
- support new features like IQ/OQ reset, ISM,
mailbox between VF and PF and sets the watermark
level for output queues.
V3 changes:
- Addresses V2 review comments
Sathesh Edara (11):
net/octeon_ep: support cnf95n and cnf95o SoC
net/octeon_ep: support CNX10K series SoC
net/octeon_ep: support error propagation
net/octeon_ep: support IQ/OQ reset
devtools: add acronym in dictionary for commit checks
net/octeon_ep: support ISM
net/octeon_ep: flush pending DMA operations
net/octeon_ep: update queue size checks
net/octeon_ep: support mailbox between VF and PF
net/octeon_ep: set watermark for output queues
net/octeon_ep: set secondary process dev ops
devtools/words-case.txt | 1 +
doc/guides/nics/features/octeon_ep.ini | 2 +
drivers/net/octeon_ep/cnxk_ep_vf.c | 92 ++++++--
drivers/net/octeon_ep/cnxk_ep_vf.h | 29 ++-
drivers/net/octeon_ep/meson.build | 1 +
drivers/net/octeon_ep/otx2_ep_vf.c | 279 ++++++++++++++++++++--
drivers/net/octeon_ep/otx2_ep_vf.h | 77 +++++-
drivers/net/octeon_ep/otx_ep_common.h | 71 +++++-
drivers/net/octeon_ep/otx_ep_ethdev.c | 264 ++++++++++++++++++---
drivers/net/octeon_ep/otx_ep_mbox.c | 309 +++++++++++++++++++++++++
drivers/net/octeon_ep/otx_ep_mbox.h | 163 +++++++++++++
drivers/net/octeon_ep/otx_ep_rxtx.c | 111 +++++----
drivers/net/octeon_ep/otx_ep_rxtx.h | 4 +-
drivers/net/octeon_ep/otx_ep_vf.c | 71 ++++--
14 files changed, 1319 insertions(+), 155 deletions(-)
create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.c
create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.h
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 02/11] net/octeon_ep: support CNX10K series SoC Sathesh Edara
` (9 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds the required functionality in the Octeon endpoint
driver to support the cnf95n and cnf95o endpoint device.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx2_ep_vf.h | 2 ++
drivers/net/octeon_ep/otx_ep_ethdev.c | 13 +++++++++++--
2 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.h b/drivers/net/octeon_ep/otx2_ep_vf.h
index 757eeae9f0..8f00acd737 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.h
+++ b/drivers/net/octeon_ep/otx2_ep_vf.h
@@ -115,6 +115,8 @@
#define PCI_DEVID_CN9K_EP_NET_VF 0xB203 /* OCTEON 9 EP mode */
#define PCI_DEVID_CN98XX_EP_NET_VF 0xB103
+#define PCI_DEVID_CNF95N_EP_NET_VF 0xB403
+#define PCI_DEVID_CNF95O_EP_NET_VF 0xB603
int
otx2_ep_vf_setup_device(struct otx_ep_device *sdpvf);
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index f43db1e398..24f62c3e49 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -105,6 +105,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
break;
case PCI_DEVID_CN9K_EP_NET_VF:
case PCI_DEVID_CN98XX_EP_NET_VF:
+ case PCI_DEVID_CNF95N_EP_NET_VF:
+ case PCI_DEVID_CNF95O_EP_NET_VF:
otx_epvf->chip_id = dev_id;
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
@@ -144,7 +146,9 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
else if (otx_epvf->chip_id == PCI_DEVID_CNXK_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
@@ -494,7 +498,10 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->pdev = pdev;
otx_epdev_init(otx_epvf);
- if (pdev->id.device_id == PCI_DEVID_CN9K_EP_NET_VF)
+ if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
otx_epvf->pkind = SDP_OTX2_PKIND_FS0;
else
otx_epvf->pkind = SDP_PKIND;
@@ -524,6 +531,8 @@ static const struct rte_pci_id pci_id_otx_ep_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95N_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95O_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_EP_NET_VF) },
{ .vendor_id = 0, /* sentinel */ }
};
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 02/11] net/octeon_ep: support CNX10K series SoC
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 03/11] net/octeon_ep: support error propagation Sathesh Edara
` (8 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds the required functionality in the Octeon endpoint driver
to support the following CNX10K series endpoint devices.
- CN10KA
- CN10KB
- CNF10KA
- CNF10KB
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.h | 5 ++++-
drivers/net/octeon_ep/otx_ep_ethdev.c | 21 +++++++++++++++++----
2 files changed, 21 insertions(+), 5 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
index 7162461dd9..aaa5774552 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.h
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
@@ -134,7 +134,10 @@
#define CNXK_EP_R_OUT_CTL_ROR_P (1ULL << 24)
#define CNXK_EP_R_OUT_CTL_IMODE (1ULL << 23)
-#define PCI_DEVID_CNXK_EP_NET_VF 0xB903
+#define PCI_DEVID_CN10KA_EP_NET_VF 0xB903
+#define PCI_DEVID_CNF10KA_EP_NET_VF 0xBA03
+#define PCI_DEVID_CNF10KB_EP_NET_VF 0xBC03
+#define PCI_DEVID_CN10KB_EP_NET_VF 0xBD03
int
cnxk_ep_vf_setup_device(struct otx_ep_device *sdpvf);
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 24f62c3e49..b23d52ff84 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -111,7 +111,10 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
break;
- case PCI_DEVID_CNXK_EP_NET_VF:
+ case PCI_DEVID_CN10KA_EP_NET_VF:
+ case PCI_DEVID_CN10KB_EP_NET_VF:
+ case PCI_DEVID_CNF10KA_EP_NET_VF:
+ case PCI_DEVID_CNF10KB_EP_NET_VF:
otx_epvf->chip_id = dev_id;
ret = cnxk_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
@@ -150,7 +153,10 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
- else if (otx_epvf->chip_id == PCI_DEVID_CNXK_EP_NET_VF)
+ else if (otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
otx_epvf->max_rx_queues = ethdev_queues;
@@ -501,7 +507,11 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
otx_epvf->pkind = SDP_OTX2_PKIND_FS0;
else
otx_epvf->pkind = SDP_PKIND;
@@ -533,7 +543,10 @@ static const struct rte_pci_id pci_id_otx_ep_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95N_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95O_EP_NET_VF) },
- { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KA_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF10KA_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF10KB_EP_NET_VF) },
{ .vendor_id = 0, /* sentinel */ }
};
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 03/11] net/octeon_ep: support error propagation
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 02/11] net/octeon_ep: support CNX10K series SoC Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 04/11] net/octeon_ep: support IQ/OQ reset Sathesh Edara
` (7 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds detection of loop limits being hit,
and propagate errors up the call chain
when this happens.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.c | 51 +++++++++++--------
drivers/net/octeon_ep/otx2_ep_vf.c | 49 ++++++++++--------
drivers/net/octeon_ep/otx_ep_common.h | 6 +--
drivers/net/octeon_ep/otx_ep_ethdev.c | 27 +++++++---
drivers/net/octeon_ep/otx_ep_rxtx.c | 51 +++++++++----------
drivers/net/octeon_ep/otx_ep_vf.c | 71 +++++++++++++++++++--------
6 files changed, 155 insertions(+), 100 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 3427fb213b..1a92887109 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -47,36 +47,43 @@ cnxk_ep_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
oct_ep_write64(reg_val, otx_ep->hw_addr + CNXK_EP_R_OUT_CONTROL(q_no));
}
-static void
+static int
cnxk_ep_vf_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
cnxk_ep_vf_setup_global_iq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
cnxk_ep_vf_setup_global_output_regs(struct otx_ep_device *otx_ep)
{
uint32_t q_no;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
cnxk_ep_vf_setup_global_oq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
cnxk_ep_vf_setup_device_regs(struct otx_ep_device *otx_ep)
{
- cnxk_ep_vf_setup_global_input_regs(otx_ep);
- cnxk_ep_vf_setup_global_output_regs(otx_ep);
+ int ret;
+
+ ret = cnxk_ep_vf_setup_global_input_regs(otx_ep);
+ if (ret)
+ return ret;
+ ret = cnxk_ep_vf_setup_global_output_regs(otx_ep);
+ return ret;
}
-static void
+static int
cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
volatile uint64_t reg_val = 0ull;
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(iq_no));
@@ -91,9 +98,9 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
} while ((!(reg_val & CNXK_EP_R_IN_CTL_IDLE)) && loop--);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("IDLE bit is not set\n");
- return;
+ return -EIO;
}
/* Write the start of the input queue's ring and its size */
@@ -115,9 +122,9 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
rte_delay_ms(1);
} while (reg_val != 0 && loop--);
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INST CNT REGISTER is not zero\n");
- return;
+ return -EIO;
}
/* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
@@ -125,14 +132,15 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + CNXK_EP_R_IN_INT_LEVELS(iq_no));
+ return 0;
}
-static void
+static int
cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
/* Wait on IDLE to set to 1, supposed to configure BADDR
@@ -145,9 +153,9 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("OUT CNT REGISTER value is zero\n");
- return;
+ return -EIO;
}
oct_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr + CNXK_EP_R_OUT_SLIST_BADDR(oq_no));
@@ -181,9 +189,9 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets credit register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_credit:%x", oq_no, rte_read32(droq->pkts_credit_reg));
@@ -201,18 +209,19 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets sent register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
+ return 0;
}
static int
cnxk_ep_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
uint64_t reg_val = 0ull;
/* Resetting doorbells during IQ enabling also to handle abrupt
@@ -225,7 +234,7 @@ cnxk_ep_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INSTR DBELL not coming back to 0\n");
return -EIO;
}
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 3c9a70157e..3ffc7275c7 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -49,32 +49,39 @@ otx2_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
}
-static void
+static int
otx2_vf_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_iq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
otx2_vf_setup_global_output_regs(struct otx_ep_device *otx_ep)
{
uint32_t q_no;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_oq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
otx2_vf_setup_device_regs(struct otx_ep_device *otx_ep)
{
- otx2_vf_setup_global_input_regs(otx_ep);
- otx2_vf_setup_global_output_regs(otx_ep);
+ int ret;
+
+ ret = otx2_vf_setup_global_input_regs(otx_ep);
+ if (ret)
+ return ret;
+ ret = otx2_vf_setup_global_output_regs(otx_ep);
+ return ret;
}
-static void
+static int
otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
@@ -92,9 +99,9 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
} while ((!(reg_val & SDP_VF_R_IN_CTL_IDLE)) && loop--);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("IDLE bit is not set\n");
- return;
+ return -EIO;
}
/* Write the start of the input queue's ring and its size */
@@ -115,9 +122,9 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
rte_write32(reg_val, iq->inst_cnt_reg);
} while (reg_val != 0 && loop--);
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INST CNT REGISTER is not zero\n");
- return;
+ return -EIO;
}
/* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
@@ -125,14 +132,15 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
+ return 0;
}
-static void
+static int
otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
/* Wait on IDLE to set to 1, supposed to configure BADDR
@@ -145,9 +153,9 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("OUT CNT REGISTER value is zero\n");
- return;
+ return -EIO;
}
oct_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_BADDR(oq_no));
@@ -181,9 +189,9 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets credit register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_credit:%x", oq_no, rte_read32(droq->pkts_credit_reg));
@@ -200,17 +208,18 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets sent register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
+ return 0;
}
static int
otx2_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t loop = SDP_VF_BUSY_LOOP_COUNT;
+ int loop = SDP_VF_BUSY_LOOP_COUNT;
uint64_t reg_val = 0ull;
/* Resetting doorbells during IQ enabling also to handle abrupt
@@ -223,7 +232,7 @@ otx2_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INSTR DBELL not coming back to 0\n");
return -EIO;
}
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index e4c92270d4..479bb1a1a0 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -394,11 +394,11 @@ struct otx_ep_sriov_info {
/* Required functions for each VF device */
struct otx_ep_fn_list {
- void (*setup_iq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
+ int (*setup_iq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
- void (*setup_oq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
+ int (*setup_oq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
- void (*setup_device_regs)(struct otx_ep_device *otx_ep);
+ int (*setup_device_regs)(struct otx_ep_device *otx_ep);
int (*enable_io_queues)(struct otx_ep_device *otx_ep);
void (*disable_io_queues)(struct otx_ep_device *otx_ep);
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index b23d52ff84..5677a2d6a6 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -151,13 +151,17 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
- otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
- else if (otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF) {
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
+ } else {
+ otx_ep_err("Invalid chip_id\n");
+ ret = -EINVAL;
+ goto setup_fail;
+ }
ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
otx_epvf->max_rx_queues = ethdev_queues;
otx_epvf->max_tx_queues = ethdev_queues;
@@ -489,6 +493,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ rte_eth_copy_pci_info(eth_dev, pdev);
otx_epvf->eth_dev = eth_dev;
otx_epvf->port_id = eth_dev->data->port_id;
eth_dev->dev_ops = &otx_ep_eth_dev_ops;
@@ -503,7 +508,8 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->hw_addr = pdev->mem_resource[0].addr;
otx_epvf->pdev = pdev;
- otx_epdev_init(otx_epvf);
+ if (otx_epdev_init(otx_epvf))
+ return -ENOMEM;
if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
@@ -511,11 +517,16 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF) {
otx_epvf->pkind = SDP_OTX2_PKIND_FS0;
- else
+ otx_ep_info("using pkind %d\n", otx_epvf->pkind);
+ } else if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF) {
otx_epvf->pkind = SDP_PKIND;
- otx_ep_info("using pkind %d\n", otx_epvf->pkind);
+ otx_ep_info("Using pkind %d.\n", otx_epvf->pkind);
+ } else {
+ otx_ep_err("Invalid chip id\n");
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 6912ca2401..9712e6cce6 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -3,7 +3,7 @@
*/
#include <unistd.h>
-
+#include <assert.h>
#include <rte_eal.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
@@ -81,6 +81,7 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
const struct otx_ep_config *conf;
struct otx_ep_instr_queue *iq;
uint32_t q_size;
+ int ret;
conf = otx_ep->conf;
iq = otx_ep->instr_queue[iq_no];
@@ -140,7 +141,9 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
iq->iqcmd_64B = (conf->iq.instr_type == 64);
/* Set up IQ registers */
- otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
+ ret = otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
+ if (ret)
+ return ret;
return 0;
@@ -271,6 +274,7 @@ otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
uint32_t c_refill_threshold;
struct otx_ep_droq *droq;
uint32_t desc_ring_size;
+ int ret;
otx_ep_info("OQ[%d] Init start\n", q_no);
@@ -318,7 +322,9 @@ otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
droq->refill_threshold = c_refill_threshold;
/* Set up OQ registers */
- otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
+ ret = otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
+ if (ret)
+ return ret;
otx_ep->io_qmask.oq |= (1ull << q_no);
@@ -852,19 +858,15 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
* droq->pkts_pending);
*/
droq->stats.pkts_delayed_data++;
- while (retry && !info->length)
+ while (retry && !info->length) {
retry--;
+ rte_delay_us_block(50);
+ }
if (!retry && !info->length) {
otx_ep_err("OCTEON DROQ[%d]: read_idx: %d; Retry failed !!\n",
droq->q_no, droq->read_idx);
/* May be zero length packet; drop it */
- rte_pktmbuf_free(droq_pkt);
- droq->recv_buf_list[droq->read_idx] = NULL;
- droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
- droq->nb_desc);
- droq->stats.dropped_zlp++;
- droq->refill_count++;
- goto oq_read_fail;
+ assert(0);
}
}
if (next_fetch) {
@@ -938,6 +940,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
last_buf = droq_pkt;
} else {
otx_ep_err("no buf\n");
+ assert(0);
}
pkt_len += cpy_len;
@@ -953,16 +956,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
droq_pkt->l3_len = hdr_lens.l3_len;
droq_pkt->l4_len = hdr_lens.l4_len;
- if (droq_pkt->nb_segs > 1 &&
- !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
- rte_pktmbuf_free(droq_pkt);
- goto oq_read_fail;
- }
-
return droq_pkt;
-
-oq_read_fail:
- return NULL;
}
static inline uint32_t
@@ -992,6 +986,7 @@ otx_ep_recv_pkts(void *rx_queue,
struct rte_mbuf *oq_pkt;
uint32_t pkts = 0;
+ uint32_t valid_pkts = 0;
uint32_t new_pkts = 0;
int next_fetch;
@@ -1019,14 +1014,15 @@ otx_ep_recv_pkts(void *rx_queue,
"last_pkt_count %" PRIu64 "new_pkts %d.\n",
droq->pkts_pending, droq->last_pkt_count,
new_pkts);
- droq->pkts_pending -= pkts;
droq->stats.rx_err++;
- goto finish;
+ continue;
+ } else {
+ rx_pkts[valid_pkts] = oq_pkt;
+ valid_pkts++;
+ /* Stats */
+ droq->stats.pkts_received++;
+ droq->stats.bytes_received += oq_pkt->pkt_len;
}
- rx_pkts[pkts] = oq_pkt;
- /* Stats */
- droq->stats.pkts_received++;
- droq->stats.bytes_received += oq_pkt->pkt_len;
}
droq->pkts_pending -= pkts;
@@ -1053,6 +1049,5 @@ otx_ep_recv_pkts(void *rx_queue,
rte_write32(0, droq->pkts_credit_reg);
}
-finish:
- return pkts;
+ return valid_pkts;
}
diff --git a/drivers/net/octeon_ep/otx_ep_vf.c b/drivers/net/octeon_ep/otx_ep_vf.c
index 96366b2a7f..4f3538146b 100644
--- a/drivers/net/octeon_ep/otx_ep_vf.c
+++ b/drivers/net/octeon_ep/otx_ep_vf.c
@@ -12,10 +12,11 @@
#include "otx_ep_vf.h"
-static void
+static int
otx_ep_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
{
volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
/* Select ES, RO, NS, RDSIZE,DPTR Format#0 for IQs
* IS_64B is by default enabled.
@@ -33,8 +34,11 @@ otx_ep_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
do {
reg_val = rte_read64(otx_ep->hw_addr +
OTX_EP_R_IN_CONTROL(q_no));
- } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE));
+ } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE) && loop--);
+ if (loop < 0)
+ return -EIO;
}
+ return 0;
}
static void
@@ -60,13 +64,18 @@ otx_ep_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
otx_ep_write64(reg_val, otx_ep->hw_addr, OTX_EP_R_OUT_CONTROL(q_no));
}
-static void
+static int
otx_ep_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
+ int ret = 0;
- for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
- otx_ep_setup_global_iq_reg(otx_ep, q_no);
+ for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++) {
+ ret = otx_ep_setup_global_iq_reg(otx_ep, q_no);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static void
@@ -78,18 +87,24 @@ otx_ep_setup_global_output_regs(struct otx_ep_device *otx_ep)
otx_ep_setup_global_oq_reg(otx_ep, q_no);
}
-static void
+static int
otx_ep_setup_device_regs(struct otx_ep_device *otx_ep)
{
- otx_ep_setup_global_input_regs(otx_ep);
+ int ret;
+
+ ret = otx_ep_setup_global_input_regs(otx_ep);
+ if (ret)
+ return ret;
otx_ep_setup_global_output_regs(otx_ep);
+ return 0;
}
-static void
+static int
otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_IN_CONTROL(iq_no));
@@ -100,7 +115,9 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
do {
reg_val = rte_read64(otx_ep->hw_addr +
OTX_EP_R_IN_CONTROL(iq_no));
- } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE));
+ } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE) && loop--);
+ if (loop < 0)
+ return -EIO;
}
/* Write the start of the input queue's ring and its size */
@@ -120,10 +137,13 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p\n",
iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+ loop = OTX_EP_BUSY_LOOP_COUNT;
do {
reg_val = rte_read32(iq->inst_cnt_reg);
rte_write32(reg_val, iq->inst_cnt_reg);
- } while (reg_val != 0);
+ } while ((reg_val != 0) && loop--);
+ if (loop < 0)
+ return -EIO;
/* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
* to raise
@@ -133,13 +153,15 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
otx_ep_write64(OTX_EP_CLEAR_IN_INT_LVLS, otx_ep->hw_addr,
OTX_EP_R_IN_INT_LEVELS(iq_no));
+ return 0;
}
-static void
+static int
otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
@@ -150,10 +172,12 @@ otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_OUT_CONTROL(oq_no));
- while (!(reg_val & OTX_EP_R_OUT_CTL_IDLE)) {
+ while (!(reg_val & OTX_EP_R_OUT_CTL_IDLE) && loop--) {
reg_val = rte_read64(otx_ep->hw_addr +
OTX_EP_R_OUT_CONTROL(oq_no));
}
+ if (loop < 0)
+ return -EIO;
otx_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr,
OTX_EP_R_OUT_SLIST_BADDR(oq_no));
@@ -180,11 +204,14 @@ otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
OTX_EP_R_OUT_INT_LEVELS(oq_no));
/* Clear the OQ doorbell */
+ loop = OTX_EP_BUSY_LOOP_COUNT;
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
- while ((rte_read32(droq->pkts_credit_reg) != 0ull)) {
+ while ((rte_read32(droq->pkts_credit_reg) != 0ull) && loop--) {
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
rte_delay_ms(1);
}
+ if (loop < 0)
+ return -EIO;
otx_ep_dbg("OTX_EP_R[%d]_credit:%x\n", oq_no,
rte_read32(droq->pkts_credit_reg));
@@ -195,18 +222,22 @@ otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
otx_ep_dbg("OTX_EP_R[%d]_sent: %x\n", oq_no,
rte_read32(droq->pkts_sent_reg));
- while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
+ loop = OTX_EP_BUSY_LOOP_COUNT;
+ while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
}
+ if (loop < 0)
+ return -EIO;
+ return 0;
}
static int
otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
- uint64_t reg_val = 0ull;
+ volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
/* Resetting doorbells during IQ enabling also to handle abrupt
* guest reboot. IQ reset does not clear the doorbells.
@@ -219,7 +250,7 @@ otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
rte_delay_ms(1);
}
- if (loop == 0) {
+ if (loop < 0) {
otx_ep_err("dbell reset failed\n");
return -EIO;
}
@@ -238,8 +269,8 @@ otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
static int
otx_ep_enable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t reg_val = 0ull;
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
/* Resetting doorbells during IQ enabling also to handle abrupt
* guest reboot. IQ reset does not clear the doorbells.
@@ -250,7 +281,7 @@ otx_ep_enable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
OTX_EP_R_OUT_SLIST_DBELL(q_no))) != 0ull) && loop--) {
rte_delay_ms(1);
}
- if (loop == 0) {
+ if (loop < 0) {
otx_ep_err("dbell reset failed\n");
return -EIO;
}
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 04/11] net/octeon_ep: support IQ/OQ reset
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (2 preceding siblings ...)
2023-04-24 12:28 ` [PATCH v3 03/11] net/octeon_ep: support error propagation Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks Sathesh Edara
` (6 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds input and output queue reset functionality,
also receive queue interrupt enable and disable
functionality.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx2_ep_vf.c | 193 +++++++++++++++++++++++++-
drivers/net/octeon_ep/otx2_ep_vf.h | 61 ++++++--
drivers/net/octeon_ep/otx_ep_common.h | 5 +-
3 files changed, 244 insertions(+), 15 deletions(-)
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 3ffc7275c7..3e4895862b 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -9,6 +9,117 @@
#include "otx_ep_common.h"
#include "otx2_ep_vf.h"
+static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no);
+
+static int
+otx2_vf_reset_iq(struct otx_ep_device *otx_ep, int q_no)
+{
+ int loop = SDP_VF_BUSY_LOOP_COUNT;
+ volatile uint64_t d64 = 0ull;
+
+ /* There is no RST for a ring.
+ * Clear all registers one by one after disabling the ring
+ */
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_BADDR(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_RSIZE(q_no));
+
+ d64 = 0xFFFFFFFF; /* ~0ull */
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no));
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no));
+
+ while ((d64 != 0) && loop--) {
+ rte_delay_ms(1);
+ d64 = otx2_read64(otx_ep->hw_addr +
+ SDP_VF_R_IN_INSTR_DBELL(q_no));
+ }
+ if (loop < 0) {
+ otx_ep_err("%s: doorbell init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
+ do {
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_IN_CNTS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_CNTS(q_no));
+ rte_delay_ms(1);
+ } while ((d64 & ~SDP_VF_R_IN_CNTS_OUT_INT) != 0 && loop--);
+ if (loop < 0) {
+ otx_ep_err("%s: in_cnts init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ d64 = 0ull;
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_PKT_CNT(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_BYTE_CNT(q_no));
+
+ return 0;
+}
+
+static int
+otx2_vf_reset_oq(struct otx_ep_device *otx_ep, int q_no)
+{
+ int loop = SDP_VF_BUSY_LOOP_COUNT;
+ volatile uint64_t d64 = 0ull;
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_BADDR(q_no));
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_RSIZE(q_no));
+
+ d64 = 0xFFFFFFFF;
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no));
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no));
+ while ((d64 != 0) && loop--) {
+ rte_delay_ms(1);
+ d64 = otx2_read64(otx_ep->hw_addr +
+ SDP_VF_R_OUT_SLIST_DBELL(q_no));
+ }
+ if (loop < 0) {
+ otx_ep_err("%s: doorbell init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ if (otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CNTS(q_no))
+ & SDP_VF_R_OUT_CNTS_OUT_INT) {
+ /*
+ * The OUT_INT bit is set. This interrupt must be enabled in
+ * order to clear the interrupt. Interrupts are disabled
+ * at the end of this function.
+ */
+ union out_int_lvl_t out_int_lvl;
+
+ out_int_lvl.d64 = otx2_read64(otx_ep->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.time_cnt_en = 1;
+ out_int_lvl.s.cnt = 0;
+ otx2_write64(out_int_lvl.d64, otx_ep->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ }
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
+ do {
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ rte_delay_ms(1);
+ } while ((d64 & ~SDP_VF_R_OUT_CNTS_IN_INT) != 0 && loop--);
+ if (loop < 0) {
+ otx_ep_err("%s: out_cnts init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ d64 = 0ull;
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_INT_LEVELS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_PKT_CNT(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_BYTE_CNT(q_no));
+
+ return 0;
+}
+
static void
otx2_vf_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
{
@@ -49,24 +160,63 @@ otx2_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
}
+static int
+otx2_vf_reset_input_queues(struct otx_ep_device *otx_ep)
+{
+ uint32_t q_no = 0;
+ int ret = 0;
+
+ for (q_no = 0; q_no < otx_ep->sriov_info.rings_per_vf; q_no++) {
+ ret = otx2_vf_reset_iq(otx_ep, q_no);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+otx2_vf_reset_output_queues(struct otx_ep_device *otx_ep)
+{
+ uint64_t q_no = 0ull;
+ int ret = 0;
+
+ for (q_no = 0; q_no < otx_ep->sriov_info.rings_per_vf; q_no++) {
+ ret = otx2_vf_reset_oq(otx_ep, q_no);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static int
otx2_vf_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
+ int ret = 0;
+
+ ret = otx2_vf_reset_input_queues(otx_ep);
+ if (ret)
+ return ret;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_iq_reg(otx_ep, q_no);
- return 0;
+ return ret;
}
static int
otx2_vf_setup_global_output_regs(struct otx_ep_device *otx_ep)
{
uint32_t q_no;
+ int ret = 0;
+ ret = otx2_vf_reset_output_queues(otx_ep);
+ if (ret)
+ return ret;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_oq_reg(otx_ep, q_no);
- return 0;
+ return ret;
}
static int
@@ -181,8 +331,8 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_write64(OTX_EP_CLEAR_SDP_OUT_PKT_CNT, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_PKT_CNT(oq_no));
- loop = OTX_EP_BUSY_LOOP_COUNT;
/* Clear the OQ doorbell */
+ loop = OTX_EP_BUSY_LOOP_COUNT;
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
while ((rte_read32(droq->pkts_credit_reg) != 0ull) && loop--) {
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
@@ -344,6 +494,40 @@ otx2_ep_get_defconf(struct otx_ep_device *otx_ep_dev __rte_unused)
return default_conf;
}
+static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no)
+{
+ union out_int_lvl_t out_int_lvl;
+ union out_cnts_t out_cnts;
+
+ out_int_lvl.d64 = otx2_read64(otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.time_cnt_en = 1;
+ out_int_lvl.s.cnt = 0;
+ otx2_write64(out_int_lvl.d64, otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_cnts.d64 = 0;
+ out_cnts.s.resend = 1;
+ otx2_write64(out_cnts.d64, otx_epvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ return 0;
+}
+
+static int otx2_vf_disable_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no)
+{
+ union out_int_lvl_t out_int_lvl;
+
+ /* Disable the interrupt for this queue */
+ out_int_lvl.d64 = otx2_read64(otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.time_cnt_en = 0;
+ out_int_lvl.s.cnt = 0;
+ otx2_write64(out_int_lvl.d64, otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+
+ return 0;
+}
+
int
otx2_ep_vf_setup_device(struct otx_ep_device *otx_ep)
{
@@ -381,5 +565,8 @@ otx2_ep_vf_setup_device(struct otx_ep_device *otx_ep)
otx_ep->fn_list.enable_oq = otx2_vf_enable_oq;
otx_ep->fn_list.disable_oq = otx2_vf_disable_oq;
+ otx_ep->fn_list.enable_rxq_intr = otx2_vf_enable_rxq_intr;
+ otx_ep->fn_list.disable_rxq_intr = otx2_vf_disable_rxq_intr;
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.h b/drivers/net/octeon_ep/otx2_ep_vf.h
index 8f00acd737..36c0b25dea 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.h
+++ b/drivers/net/octeon_ep/otx2_ep_vf.h
@@ -14,17 +14,20 @@
#define SDP_VF_BUSY_LOOP_COUNT (10000)
/* SDP VF OQ Masks */
-#define SDP_VF_R_OUT_CTL_IDLE (1ull << 40)
-#define SDP_VF_R_OUT_CTL_ES_I (1ull << 34)
-#define SDP_VF_R_OUT_CTL_NSR_I (1ull << 33)
-#define SDP_VF_R_OUT_CTL_ROR_I (1ull << 32)
-#define SDP_VF_R_OUT_CTL_ES_D (1ull << 30)
-#define SDP_VF_R_OUT_CTL_NSR_D (1ull << 29)
-#define SDP_VF_R_OUT_CTL_ROR_D (1ull << 28)
-#define SDP_VF_R_OUT_CTL_ES_P (1ull << 26)
-#define SDP_VF_R_OUT_CTL_NSR_P (1ull << 25)
-#define SDP_VF_R_OUT_CTL_ROR_P (1ull << 24)
-#define SDP_VF_R_OUT_CTL_IMODE (1ull << 23)
+#define SDP_VF_R_OUT_CTL_IDLE (0x1ull << 40)
+#define SDP_VF_R_OUT_CTL_ES_I (0x1ull << 34)
+#define SDP_VF_R_OUT_CTL_NSR_I (0x1ull << 33)
+#define SDP_VF_R_OUT_CTL_ROR_I (0x1ull << 32)
+#define SDP_VF_R_OUT_CTL_ES_D (0x1ull << 30)
+#define SDP_VF_R_OUT_CTL_NSR_D (0x1ull << 29)
+#define SDP_VF_R_OUT_CTL_ROR_D (0x1ull << 28)
+#define SDP_VF_R_OUT_CTL_ES_P (0x1ull << 26)
+#define SDP_VF_R_OUT_CTL_NSR_P (0x1ull << 25)
+#define SDP_VF_R_OUT_CTL_ROR_P (0x1ull << 24)
+#define SDP_VF_R_OUT_CTL_IMODE (0x1ull << 23)
+#define SDP_VF_R_OUT_CNTS_OUT_INT (0x1ull << 62)
+#define SDP_VF_R_OUT_CNTS_IN_INT (0x1ull << 61)
+#define SDP_VF_R_IN_CNTS_OUT_INT (0x1ull << 62)
/* SDP VF Register definitions */
#define SDP_VF_RING_OFFSET (0x1ull << 17)
@@ -140,4 +143,40 @@ struct otx2_ep_instr_64B {
uint64_t exhdr[4];
};
+union out_int_lvl_t {
+ uint64_t d64;
+ struct {
+ uint64_t cnt:32;
+ uint64_t timet:22;
+ uint64_t max_len:7;
+ uint64_t max_len_en:1;
+ uint64_t time_cnt_en:1;
+ uint64_t bmode:1;
+ } s;
+};
+
+union out_cnts_t {
+ uint64_t d64;
+ struct {
+ uint64_t cnt:32;
+ uint64_t timer:22;
+ uint64_t rsvd:5;
+ uint64_t resend:1;
+ uint64_t mbox_int:1;
+ uint64_t in_int:1;
+ uint64_t out_int:1;
+ uint64_t send_ism:1;
+ } s;
+};
+
+#define OTX2_EP_64B_INSTR_SIZE (sizeof(otx2_ep_instr_64B))
+
+#define NIX_MAX_HW_FRS 9212
+#define NIX_MAX_VTAG_INS 2
+#define NIX_MAX_VTAG_ACT_SIZE (4 * NIX_MAX_VTAG_INS)
+#define NIX_MAX_FRS \
+ (NIX_MAX_HW_FRS + RTE_ETHER_CRC_LEN - NIX_MAX_VTAG_ACT_SIZE)
+
+#define CN93XX_INTR_R_OUT_INT (1ULL << 62)
+#define CN93XX_INTR_R_IN_INT (1ULL << 61)
#endif /*_OTX2_EP_VF_H_ */
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 479bb1a1a0..a3260d5243 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -408,6 +408,9 @@ struct otx_ep_fn_list {
int (*enable_oq)(struct otx_ep_device *otx_ep, uint32_t q_no);
void (*disable_oq)(struct otx_ep_device *otx_ep, uint32_t q_no);
+
+ int (*enable_rxq_intr)(struct otx_ep_device *otx_epvf, uint16_t q_no);
+ int (*disable_rxq_intr)(struct otx_ep_device *otx_epvf, uint16_t q_no);
};
/* OTX_EP EP VF device data structure */
@@ -498,7 +501,7 @@ struct otx_ep_buf_free_info {
struct otx_ep_gather g;
};
-#define OTX_EP_MAX_PKT_SZ 64000U
+#define OTX_EP_MAX_PKT_SZ 65498U
#define OTX_EP_MAX_MAC_ADDRS 1
#define OTX_EP_SG_ALIGN 8
#define OTX_EP_CLEAR_ISIZE_BSIZE 0x7FFFFFULL
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (3 preceding siblings ...)
2023-04-24 12:28 ` [PATCH v3 04/11] net/octeon_ep: support IQ/OQ reset Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 06/11] net/octeon_ep: support ISM Sathesh Edara
` (5 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara, Thomas Monjalon; +Cc: dev
ISM -> Interrupt Status Messages
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
devtools/words-case.txt | 1 +
1 file changed, 1 insertion(+)
diff --git a/devtools/words-case.txt b/devtools/words-case.txt
index 53e029a958..3a7af902bd 100644
--- a/devtools/words-case.txt
+++ b/devtools/words-case.txt
@@ -35,6 +35,7 @@ IP
IPsec
IPv4
IPv6
+ISM
L2
L3
L4
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 06/11] net/octeon_ep: support ISM
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (4 preceding siblings ...)
2023-04-24 12:28 ` [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 07/11] net/octeon_ep: flush pending DMA operations Sathesh Edara
` (4 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds the ISM specific functionality.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.c | 35 +++++++++++++++--
drivers/net/octeon_ep/cnxk_ep_vf.h | 12 ++++++
drivers/net/octeon_ep/otx2_ep_vf.c | 45 ++++++++++++++++++---
drivers/net/octeon_ep/otx2_ep_vf.h | 14 +++++++
drivers/net/octeon_ep/otx_ep_common.h | 16 ++++++++
drivers/net/octeon_ep/otx_ep_ethdev.c | 36 +++++++++++++++++
drivers/net/octeon_ep/otx_ep_rxtx.c | 56 +++++++++++++++++++++------
7 files changed, 194 insertions(+), 20 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 1a92887109..a437ae68cb 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -2,11 +2,12 @@
* Copyright(C) 2022 Marvell.
*/
+#include <inttypes.h>
#include <errno.h>
#include <rte_common.h>
#include <rte_cycles.h>
-
+#include <rte_memzone.h>
#include "cnxk_ep_vf.h"
static void
@@ -85,6 +86,7 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
int loop = OTX_EP_BUSY_LOOP_COUNT;
volatile uint64_t reg_val = 0ull;
+ uint64_t ism_addr;
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(iq_no));
@@ -132,6 +134,19 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + CNXK_EP_R_IN_INT_LEVELS(iq_no));
+ /* Set up IQ ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | CNXK_EP_ISM_EN
+ | CNXK_EP_ISM_MSIX_DIS)
+ + CNXK_EP_IQ_ISM_OFFSET(iq_no);
+ rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_IN_CNTS_ISM(iq_no));
+ iq->inst_cnt_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + CNXK_EP_IQ_ISM_OFFSET(iq_no));
+ otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
+ (void *)iq->inst_cnt_ism, ism_addr);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
return 0;
}
@@ -142,6 +157,7 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
uint64_t oq_ctl = 0ull;
int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
+ uint64_t ism_addr;
/* Wait on IDLE to set to 1, supposed to configure BADDR
* as long as IDLE is 0
@@ -201,9 +217,22 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
- loop = OTX_EP_BUSY_LOOP_COUNT;
+ /* Set up ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | CNXK_EP_ISM_EN
+ | CNXK_EP_ISM_MSIX_DIS)
+ + CNXK_EP_OQ_ISM_OFFSET(oq_no);
+ rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_OUT_CNTS_ISM(oq_no));
+ droq->pkts_sent_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + CNXK_EP_OQ_ISM_OFFSET(oq_no));
+ otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
+ oq_no, (void *)droq->pkts_sent_ism, ism_addr);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
- while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
+ loop = OTX_EP_BUSY_LOOP_COUNT;
+ while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
index aaa5774552..072b38ea15 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.h
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
@@ -27,6 +27,7 @@
#define CNXK_EP_R_IN_INT_LEVELS_START 0x10060
#define CNXK_EP_R_IN_PKT_CNT_START 0x10080
#define CNXK_EP_R_IN_BYTE_CNT_START 0x10090
+#define CNXK_EP_R_IN_CNTS_ISM_START 0x10520
#define CNXK_EP_R_IN_CONTROL(ring) \
(CNXK_EP_R_IN_CONTROL_START + ((ring) * CNXK_EP_RING_OFFSET))
@@ -55,6 +56,8 @@
#define CNXK_EP_R_IN_BYTE_CNT(ring) \
(CNXK_EP_R_IN_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
+#define CNXK_EP_R_IN_CNTS_ISM(ring) \
+ (CNXK_EP_R_IN_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
/** Rings per Virtual Function **/
#define CNXK_EP_R_IN_CTL_RPVF_MASK (0xF)
@@ -87,6 +90,7 @@
#define CNXK_EP_R_OUT_ENABLE_START 0x10170
#define CNXK_EP_R_OUT_PKT_CNT_START 0x10180
#define CNXK_EP_R_OUT_BYTE_CNT_START 0x10190
+#define CNXK_EP_R_OUT_CNTS_ISM_START 0x10510
#define CNXK_EP_R_OUT_CNTS(ring) \
(CNXK_EP_R_OUT_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
@@ -118,6 +122,9 @@
#define CNXK_EP_R_OUT_BYTE_CNT(ring) \
(CNXK_EP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
+#define CNXK_EP_R_OUT_CNTS_ISM(ring) \
+ (CNXK_EP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
+
/*------------------ R_OUT Masks ----------------*/
#define CNXK_EP_R_OUT_INT_LEVELS_BMODE (1ULL << 63)
#define CNXK_EP_R_OUT_INT_LEVELS_TIMET (32)
@@ -161,4 +168,9 @@ struct cnxk_ep_instr_64B {
uint64_t exhdr[4];
};
+#define CNXK_EP_IQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue) + 4)
+#define CNXK_EP_OQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue))
+#define CNXK_EP_ISM_EN (0x1)
+#define CNXK_EP_ISM_MSIX_DIS (0x2)
+
#endif /*_CNXK_EP_VF_H_ */
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 3e4895862b..ced3a415a5 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -6,6 +6,7 @@
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_memzone.h>
#include "otx_ep_common.h"
#include "otx2_ep_vf.h"
@@ -236,6 +237,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
volatile uint64_t reg_val = 0ull;
+ uint64_t ism_addr;
int loop = SDP_VF_BUSY_LOOP_COUNT;
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(iq_no));
@@ -282,6 +284,22 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
+
+ /* Set up IQ ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | OTX2_EP_ISM_EN
+ | OTX2_EP_ISM_MSIX_DIS)
+ + OTX2_EP_IQ_ISM_OFFSET(iq_no);
+ oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ SDP_VF_R_IN_CNTS_ISM(iq_no));
+ iq->inst_cnt_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + OTX2_EP_IQ_ISM_OFFSET(iq_no));
+ otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%x", iq_no,
+ (void *)iq->inst_cnt_ism,
+ (unsigned int)ism_addr);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
+
return 0;
}
@@ -290,6 +308,7 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
+ uint64_t ism_addr;
int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
@@ -351,18 +370,32 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
- loop = OTX_EP_BUSY_LOOP_COUNT;
+ /* Set up ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | OTX2_EP_ISM_EN
+ | OTX2_EP_ISM_MSIX_DIS)
+ + OTX2_EP_OQ_ISM_OFFSET(oq_no);
+ oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ SDP_VF_R_OUT_CNTS_ISM(oq_no));
+ droq->pkts_sent_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + OTX2_EP_OQ_ISM_OFFSET(oq_no));
+ otx_ep_err("SDP_R[%d] OQ ISM virt: %p, dma: 0x%x", oq_no,
+ (void *)droq->pkts_sent_ism,
+ (unsigned int)ism_addr);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
}
-
- if (loop < 0) {
- otx_ep_err("Packets sent register value is not cleared\n");
+ if (loop < 0)
return -EIO;
- }
- otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
+ otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no,
+ rte_read32(droq->pkts_sent_reg));
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.h b/drivers/net/octeon_ep/otx2_ep_vf.h
index 36c0b25dea..7c799475ab 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.h
+++ b/drivers/net/octeon_ep/otx2_ep_vf.h
@@ -42,6 +42,7 @@
#define SDP_VF_R_IN_INT_LEVELS_START (0x10060)
#define SDP_VF_R_IN_PKT_CNT_START (0x10080)
#define SDP_VF_R_IN_BYTE_CNT_START (0x10090)
+#define SDP_VF_R_IN_CNTS_ISM_START (0x10520)
#define SDP_VF_R_IN_CONTROL(ring) \
(SDP_VF_R_IN_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
@@ -70,6 +71,9 @@
#define SDP_VF_R_IN_BYTE_CNT(ring) \
(SDP_VF_R_IN_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+#define SDP_VF_R_IN_CNTS_ISM(ring) \
+ (SDP_VF_R_IN_CNTS_ISM_START + (SDP_VF_RING_OFFSET * (ring)))
+
/* SDP VF OQ Registers */
#define SDP_VF_R_OUT_CNTS_START (0x10100)
#define SDP_VF_R_OUT_INT_LEVELS_START (0x10110)
@@ -80,6 +84,7 @@
#define SDP_VF_R_OUT_ENABLE_START (0x10160)
#define SDP_VF_R_OUT_PKT_CNT_START (0x10180)
#define SDP_VF_R_OUT_BYTE_CNT_START (0x10190)
+#define SDP_VF_R_OUT_CNTS_ISM_START (0x10510)
#define SDP_VF_R_OUT_CONTROL(ring) \
(SDP_VF_R_OUT_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
@@ -108,6 +113,9 @@
#define SDP_VF_R_OUT_BYTE_CNT(ring) \
(SDP_VF_R_OUT_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+#define SDP_VF_R_OUT_CNTS_ISM(ring) \
+ (SDP_VF_R_OUT_CNTS_ISM_START + (SDP_VF_RING_OFFSET * (ring)))
+
/* SDP VF IQ Masks */
#define SDP_VF_R_IN_CTL_RPVF_MASK (0xF)
#define SDP_VF_R_IN_CTL_RPVF_POS (48)
@@ -143,6 +151,12 @@ struct otx2_ep_instr_64B {
uint64_t exhdr[4];
};
+#define OTX2_EP_IQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue) + 4)
+#define OTX2_EP_OQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue))
+#define OTX2_EP_ISM_EN (0x1)
+#define OTX2_EP_ISM_MSIX_DIS (0x2)
+#define OTX2_EP_MAX_RX_PKT_LEN (16384)
+
union out_int_lvl_t {
uint64_t d64;
struct {
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index a3260d5243..76528ed49d 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -185,6 +185,9 @@ struct otx_ep_instr_queue {
*/
uint32_t flush_index;
+ /* Free-running/wrapping instruction counter for IQ. */
+ uint32_t inst_cnt;
+
/* This keeps track of the instructions pending in this queue. */
uint64_t instr_pending;
@@ -211,6 +214,12 @@ struct otx_ep_instr_queue {
/* Memory zone */
const struct rte_memzone *iq_mz;
+
+ /* Location in memory updated by SDP ISM */
+ uint32_t *inst_cnt_ism;
+
+ /* track inst count locally to consolidate HW counter updates */
+ uint32_t inst_cnt_ism_prev;
};
/** Descriptor format.
@@ -355,6 +364,10 @@ struct otx_ep_droq {
const struct rte_memzone *desc_ring_mz;
const struct rte_memzone *info_mz;
+
+ /* Pointer to host memory copy of output packet count, set by ISM */
+ uint32_t *pkts_sent_ism;
+ uint32_t pkts_sent_ism_prev;
};
#define OTX_EP_DROQ_SIZE (sizeof(struct otx_ep_droq))
@@ -459,6 +472,9 @@ struct otx_ep_device {
uint64_t rx_offloads;
uint64_t tx_offloads;
+
+ /* DMA buffer for SDP ISM messages */
+ const struct rte_memzone *ism_buffer_mz;
};
int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 5677a2d6a6..30a7a450fb 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -2,6 +2,7 @@
* Copyright(C) 2021 Marvell.
*/
+#include <inttypes.h>
#include <ethdev_pci.h>
#include "otx_ep_common.h"
@@ -90,6 +91,32 @@ otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
return 0;
}
+/*
+ * We only need 2 uint32_t locations per IOQ, but separate these so
+ * each IOQ has the variables on its own cache line.
+ */
+#define OTX_EP_ISM_BUFFER_SIZE (OTX_EP_MAX_IOQS_PER_VF * RTE_CACHE_LINE_SIZE)
+static int
+otx_ep_ism_setup(struct otx_ep_device *otx_epvf)
+{
+ otx_epvf->ism_buffer_mz =
+ rte_eth_dma_zone_reserve(otx_epvf->eth_dev, "ism",
+ 0, OTX_EP_ISM_BUFFER_SIZE,
+ OTX_EP_PCI_RING_ALIGN, 0);
+
+ /* Same DMA buffer is shared by OQ and IQ, clear it at start */
+ memset(otx_epvf->ism_buffer_mz->addr, 0, OTX_EP_ISM_BUFFER_SIZE);
+ if (otx_epvf->ism_buffer_mz == NULL) {
+ otx_ep_err("Failed to allocate ISM buffer\n");
+ return(-1);
+ }
+ otx_ep_dbg("ISM: virt: 0x%p, dma: 0x%" PRIX64,
+ (void *)otx_epvf->ism_buffer_mz->addr,
+ otx_epvf->ism_buffer_mz->iova);
+
+ return 0;
+}
+
static int
otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
{
@@ -110,6 +137,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
otx_epvf->chip_id = dev_id;
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
+ if (otx_ep_ism_setup(otx_epvf))
+ ret = -EINVAL;
break;
case PCI_DEVID_CN10KA_EP_NET_VF:
case PCI_DEVID_CN10KB_EP_NET_VF:
@@ -118,6 +147,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
otx_epvf->chip_id = dev_id;
ret = cnxk_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
+ if (otx_ep_ism_setup(otx_epvf))
+ ret = -EINVAL;
break;
default:
otx_ep_err("Unsupported device\n");
@@ -434,6 +465,11 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
}
otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
+ if (rte_eth_dma_zone_free(eth_dev, "ism", 0)) {
+ otx_ep_err("Failed to delete ISM buffer\n");
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 9712e6cce6..c4153bd583 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -20,6 +20,7 @@
#define OTX_EP_INFO_SIZE 8
#define OTX_EP_FSZ_FS0 0
#define DROQ_REFILL_THRESHOLD 16
+#define OTX2_SDP_REQUEST_ISM (0x1ULL << 63)
static void
otx_ep_dmazone_free(const struct rte_memzone *mz)
@@ -412,15 +413,32 @@ otx_ep_iqreq_add(struct otx_ep_instr_queue *iq, void *buf,
static uint32_t
otx_vf_update_read_index(struct otx_ep_instr_queue *iq)
{
- uint32_t new_idx = rte_read32(iq->inst_cnt_reg);
- if (unlikely(new_idx == 0xFFFFFFFFU))
- rte_write32(new_idx, iq->inst_cnt_reg);
+ uint32_t val;
+
+ /*
+ * Batch subtractions from the HW counter to reduce PCIe traffic
+ * This adds an extra local variable, but almost halves the
+ * number of PCIe writes.
+ */
+ val = *iq->inst_cnt_ism;
+ iq->inst_cnt += val - iq->inst_cnt_ism_prev;
+ iq->inst_cnt_ism_prev = val;
+
+ if (val > (uint32_t)(1 << 31)) {
+ /*
+ * Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write32(val, iq->inst_cnt_reg);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
+ }
+ rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
+
/* Modulo of the new index with the IQ size will give us
* the new index.
*/
- new_idx &= (iq->nb_desc - 1);
-
- return new_idx;
+ return iq->inst_cnt & (iq->nb_desc - 1);
}
static void
@@ -962,14 +980,30 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
static inline uint32_t
otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
{
- volatile uint64_t pkt_count;
uint32_t new_pkts;
+ uint32_t val;
+
+ /*
+ * Batch subtractions from the HW counter to reduce PCIe traffic
+ * This adds an extra local variable, but almost halves the
+ * number of PCIe writes.
+ */
+ val = *droq->pkts_sent_ism;
+ new_pkts = val - droq->pkts_sent_ism_prev;
+ droq->pkts_sent_ism_prev = val;
- /* Latest available OQ packets */
- pkt_count = rte_read32(droq->pkts_sent_reg);
- rte_write32(pkt_count, droq->pkts_sent_reg);
- new_pkts = pkt_count;
+ if (val > (uint32_t)(1 << 31)) {
+ /*
+ * Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write32(val, droq->pkts_sent_reg);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
+ }
+ rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
droq->pkts_pending += new_pkts;
+
return new_pkts;
}
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 07/11] net/octeon_ep: flush pending DMA operations
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (5 preceding siblings ...)
2023-04-24 12:28 ` [PATCH v3 06/11] net/octeon_ep: support ISM Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 08/11] net/octeon_ep: update queue size checks Sathesh Edara
` (3 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Flushes the pending DMA operations while reading
the packets by reading control and status register.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx_ep_common.h | 8 ++++++++
drivers/net/octeon_ep/otx_ep_rxtx.c | 4 ++++
2 files changed, 12 insertions(+)
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 76528ed49d..444136923f 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -345,6 +345,14 @@ struct otx_ep_droq {
*/
void *pkts_sent_reg;
+ /** Handle DMA incompletion during pkt reads.
+ * This variable is used to initiate a sent_reg_read
+ * that completes pending dma
+ * this variable is used as lvalue so compiler cannot optimize
+ * the reads.
+ */
+ uint32_t sent_reg_val;
+
/* Statistics for this DROQ. */
struct otx_ep_droq_stats stats;
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index c4153bd583..ca968f6fe7 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -917,6 +917,10 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
struct rte_mbuf *first_buf = NULL;
struct rte_mbuf *last_buf = NULL;
+ /* csr read helps to flush pending dma */
+ droq->sent_reg_val = rte_read32(droq->pkts_sent_reg);
+ rte_rmb();
+
while (pkt_len < total_pkt_len) {
int cpy_len = 0;
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 08/11] net/octeon_ep: update queue size checks
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (6 preceding siblings ...)
2023-04-24 12:28 ` [PATCH v3 07/11] net/octeon_ep: flush pending DMA operations Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 09/11] net/octeon_ep: support mailbox between VF and PF Sathesh Edara
` (2 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Updates the output queue size checks to ensure
that queue is larger than backpressure watermark.
Add setting of default queue sizes to the minimum
so that applications like testpmd can be started
without explicit queue size arguments.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx_ep_common.h | 9 +++++++--
drivers/net/octeon_ep/otx_ep_ethdev.c | 12 ++++++++++--
drivers/net/octeon_ep/otx_ep_rxtx.h | 4 ++--
3 files changed, 19 insertions(+), 6 deletions(-)
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 444136923f..3582f3087b 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -11,8 +11,13 @@
#define OTX_EP_MAX_RINGS_PER_VF (8)
#define OTX_EP_CFG_IO_QUEUES OTX_EP_MAX_RINGS_PER_VF
#define OTX_EP_64BYTE_INSTR (64)
-#define OTX_EP_MIN_IQ_DESCRIPTORS (128)
-#define OTX_EP_MIN_OQ_DESCRIPTORS (128)
+/*
+ * Backpressure for SDP is configured on Octeon, and the minimum queue sizes
+ * must be much larger than the backpressure watermark configured in the Octeon
+ * SDP driver. IQ and OQ backpressure configurations are separate.
+ */
+#define OTX_EP_MIN_IQ_DESCRIPTORS (2048)
+#define OTX_EP_MIN_OQ_DESCRIPTORS (2048)
#define OTX_EP_MAX_IQ_DESCRIPTORS (8192)
#define OTX_EP_MAX_OQ_DESCRIPTORS (8192)
#define OTX_EP_OQ_BUF_SIZE (2048)
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 30a7a450fb..0f710b1ffa 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -48,6 +48,9 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
+ devinfo->default_rxportconf.ring_size = OTX_EP_MIN_OQ_DESCRIPTORS;
+ devinfo->default_txportconf.ring_size = OTX_EP_MIN_IQ_DESCRIPTORS;
+
return 0;
}
@@ -274,8 +277,8 @@ otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
return -EINVAL;
}
if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
- otx_ep_err("Invalid rx desc number should at least be greater than 8xwmark %u\n",
- num_rx_descs);
+ otx_ep_err("Invalid rx desc number(%u) should at least be greater than 8xwmark %u\n",
+ num_rx_descs, (SDP_GBL_WMARK * 8));
return -EINVAL;
}
@@ -357,6 +360,11 @@ otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
num_tx_descs);
return -EINVAL;
}
+ if (num_tx_descs < (SDP_GBL_WMARK * 8)) {
+ otx_ep_err("Invalid tx desc number(%u) should at least be greater than 8*wmark(%u)\n",
+ num_tx_descs, (SDP_GBL_WMARK * 8));
+ return -EINVAL;
+ }
retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.h b/drivers/net/octeon_ep/otx_ep_rxtx.h
index 1527d350b5..7012888100 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.h
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.h
@@ -7,8 +7,8 @@
#include <rte_byteorder.h>
-#define OTX_EP_RXD_ALIGN 1
-#define OTX_EP_TXD_ALIGN 1
+#define OTX_EP_RXD_ALIGN 2
+#define OTX_EP_TXD_ALIGN 2
#define OTX_EP_IQ_SEND_FAILED (-1)
#define OTX_EP_IQ_SEND_SUCCESS (0)
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 09/11] net/octeon_ep: support mailbox between VF and PF
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (7 preceding siblings ...)
2023-04-24 12:28 ` [PATCH v3 08/11] net/octeon_ep: update queue size checks Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 10/11] net/octeon_ep: set watermark for output queues Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 11/11] net/octeon_ep: set secondary process dev ops Sathesh Edara
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds the mailbox communication between VF and
PF and supports the following mailbox messages.
- Get and set MAC address
- Get link information
- Get stats
- Set and get MTU
- Send notification to PF
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
doc/guides/nics/features/octeon_ep.ini | 1 +
drivers/net/octeon_ep/cnxk_ep_vf.c | 1 +
drivers/net/octeon_ep/cnxk_ep_vf.h | 12 +-
drivers/net/octeon_ep/meson.build | 1 +
drivers/net/octeon_ep/otx_ep_common.h | 26 +++
drivers/net/octeon_ep/otx_ep_ethdev.c | 143 +++++++++++-
drivers/net/octeon_ep/otx_ep_mbox.c | 309 +++++++++++++++++++++++++
drivers/net/octeon_ep/otx_ep_mbox.h | 163 +++++++++++++
8 files changed, 643 insertions(+), 13 deletions(-)
create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.c
create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.h
diff --git a/doc/guides/nics/features/octeon_ep.ini b/doc/guides/nics/features/octeon_ep.ini
index 305e219262..f3b821c89e 100644
--- a/doc/guides/nics/features/octeon_ep.ini
+++ b/doc/guides/nics/features/octeon_ep.ini
@@ -10,4 +10,5 @@ Linux = Y
x86-64 = Y
Basic stats = Y
Link status = Y
+MTU update = Y
Usage doc = Y
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index a437ae68cb..cadb4ecbf9 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -8,6 +8,7 @@
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_memzone.h>
+#include "otx_ep_common.h"
#include "cnxk_ep_vf.h"
static void
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
index 072b38ea15..86277449ea 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.h
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
@@ -5,7 +5,7 @@
#define _CNXK_EP_VF_H_
#include <rte_io.h>
-#include "otx_ep_common.h"
+
#define CNXK_CONFIG_XPANSION_BAR 0x38
#define CNXK_CONFIG_PCIE_CAP 0x70
#define CNXK_CONFIG_PCIE_DEVCAP 0x74
@@ -92,6 +92,10 @@
#define CNXK_EP_R_OUT_BYTE_CNT_START 0x10190
#define CNXK_EP_R_OUT_CNTS_ISM_START 0x10510
+#define CNXK_EP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CNXK_EP_R_MBOX_VF_PF_DATA_START 0x10230
+#define CNXK_EP_R_MBOX_PF_VF_INT_START 0x10220
+
#define CNXK_EP_R_OUT_CNTS(ring) \
(CNXK_EP_R_OUT_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
@@ -125,6 +129,12 @@
#define CNXK_EP_R_OUT_CNTS_ISM(ring) \
(CNXK_EP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
+#define CNXK_EP_R_MBOX_VF_PF_DATA(ring) \
+ (CNXK_EP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_EP_RING_OFFSET))
+
+#define CNXK_EP_R_MBOX_PF_VF_INT(ring) \
+ (CNXK_EP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_EP_RING_OFFSET))
+
/*------------------ R_OUT Masks ----------------*/
#define CNXK_EP_R_OUT_INT_LEVELS_BMODE (1ULL << 63)
#define CNXK_EP_R_OUT_INT_LEVELS_TIMET (32)
diff --git a/drivers/net/octeon_ep/meson.build b/drivers/net/octeon_ep/meson.build
index a267b60290..e698bf9792 100644
--- a/drivers/net/octeon_ep/meson.build
+++ b/drivers/net/octeon_ep/meson.build
@@ -8,4 +8,5 @@ sources = files(
'otx_ep_vf.c',
'otx2_ep_vf.c',
'cnxk_ep_vf.c',
+ 'otx_ep_mbox.c',
)
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 3582f3087b..dadc8d1579 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -4,6 +4,7 @@
#ifndef _OTX_EP_COMMON_H_
#define _OTX_EP_COMMON_H_
+#include <rte_spinlock.h>
#define OTX_EP_NW_PKT_OP 0x1220
#define OTX_EP_NW_CMD_OP 0x1221
@@ -67,6 +68,9 @@
#define oct_ep_read64(addr) rte_read64_relaxed((void *)(addr))
#define oct_ep_write64(val, addr) rte_write64_relaxed((val), (void *)(addr))
+/* Mailbox maximum data size */
+#define MBOX_MAX_DATA_BUF_SIZE 320
+
/* Input Request Header format */
union otx_ep_instr_irh {
uint64_t u64;
@@ -488,6 +492,18 @@ struct otx_ep_device {
/* DMA buffer for SDP ISM messages */
const struct rte_memzone *ism_buffer_mz;
+
+ /* Mailbox lock */
+ rte_spinlock_t mbox_lock;
+
+ /* Mailbox data */
+ uint8_t mbox_data_buf[MBOX_MAX_DATA_BUF_SIZE];
+
+ /* Mailbox data index */
+ int32_t mbox_data_index;
+
+ /* Mailbox receive message length */
+ int32_t mbox_rcv_message_len;
};
int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
@@ -541,6 +557,16 @@ struct otx_ep_buf_free_info {
#define OTX_EP_CLEAR_SLIST_DBELL 0xFFFFFFFF
#define OTX_EP_CLEAR_SDP_OUT_PKT_CNT 0xFFFFFFFFF
+/* Max overhead includes
+ * - Ethernet hdr
+ * - CRC
+ * - nested VLANs
+ * - octeon rx info
+ */
+#define OTX_EP_ETH_OVERHEAD \
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
+ (2 * RTE_VLAN_HLEN) + OTX_EP_DROQ_INFO_SIZE)
+
/* PCI IDs */
#define PCI_VENDOR_ID_CAVIUM 0x177D
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 0f710b1ffa..885fbb475f 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -10,6 +10,7 @@
#include "otx2_ep_vf.h"
#include "cnxk_ep_vf.h"
#include "otx_ep_rxtx.h"
+#include "otx_ep_mbox.h"
#define OTX_EP_DEV(_eth_dev) \
((struct otx_ep_device *)(_eth_dev)->data->dev_private)
@@ -31,15 +32,24 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *devinfo)
{
struct otx_ep_device *otx_epvf;
+ int max_rx_pktlen;
otx_epvf = OTX_EP_DEV(eth_dev);
+ max_rx_pktlen = otx_ep_mbox_get_max_pkt_len(eth_dev);
+ if (!max_rx_pktlen) {
+ otx_ep_err("Failed to get Max Rx packet length");
+ return -EINVAL;
+ }
+
devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
devinfo->max_rx_queues = otx_epvf->max_rx_queues;
devinfo->max_tx_queues = otx_epvf->max_tx_queues;
devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
- devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
+ devinfo->max_rx_pktlen = max_rx_pktlen;
+ devinfo->max_mtu = devinfo->max_rx_pktlen - OTX_EP_ETH_OVERHEAD;
+ devinfo->min_mtu = RTE_ETHER_MIN_LEN;
devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
@@ -54,6 +64,71 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
return 0;
}
+static int
+otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
+{
+ RTE_SET_USED(wait_to_complete);
+
+ if (!eth_dev->data->dev_started)
+ return 0;
+ struct rte_eth_link link;
+ int ret = 0;
+
+ memset(&link, 0, sizeof(link));
+ ret = otx_ep_mbox_get_link_info(eth_dev, &link);
+ if (ret)
+ return -EINVAL;
+ otx_ep_dbg("link status resp link %d duplex %d autoneg %d link_speed %d\n",
+ link.link_status, link.link_duplex, link.link_autoneg, link.link_speed);
+ return rte_eth_linkstatus_set(eth_dev, &link);
+}
+
+static int
+otx_ep_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct rte_eth_dev_info devinfo;
+ int32_t ret = 0;
+
+ if (otx_ep_dev_info_get(eth_dev, &devinfo)) {
+ otx_ep_err("Cannot set MTU to %u: failed to get device info", mtu);
+ return -EPERM;
+ }
+
+ /* Check if MTU is within the allowed range */
+ if (mtu < devinfo.min_mtu) {
+ otx_ep_err("Invalid MTU %u: lower than minimum MTU %u", mtu, devinfo.min_mtu);
+ return -EINVAL;
+ }
+
+ if (mtu > devinfo.max_mtu) {
+ otx_ep_err("Invalid MTU %u; higher than maximum MTU %u", mtu, devinfo.max_mtu);
+ return -EINVAL;
+ }
+
+ ret = otx_ep_mbox_set_mtu(eth_dev, mtu);
+ if (ret)
+ return -EINVAL;
+
+ otx_ep_dbg("MTU is set to %u", mtu);
+
+ return 0;
+}
+
+static int
+otx_ep_dev_set_default_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ int ret;
+
+ ret = otx_ep_mbox_set_mac_addr(eth_dev, mac_addr);
+ if (ret)
+ return -EINVAL;
+ otx_ep_dbg("Default MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
+ return 0;
+}
+
static int
otx_ep_dev_start(struct rte_eth_dev *eth_dev)
{
@@ -78,6 +153,7 @@ otx_ep_dev_start(struct rte_eth_dev *eth_dev)
rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
}
+ otx_ep_dev_link_update(eth_dev, 0);
otx_ep_info("dev started\n");
return 0;
@@ -454,6 +530,7 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
uint32_t num_queues, q_no;
+ otx_ep_mbox_send_dev_exit(eth_dev);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
num_queues = otx_epvf->nb_rx_queues;
for (q_no = 0; q_no < num_queues; q_no++) {
@@ -482,19 +559,17 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
}
static int
-otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
+otx_ep_dev_get_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
{
- RTE_SET_USED(wait_to_complete);
-
- if (!eth_dev->data->dev_started)
- return 0;
- struct rte_eth_link link;
+ int ret;
- memset(&link, 0, sizeof(link));
- link.link_status = RTE_ETH_LINK_UP;
- link.link_speed = RTE_ETH_SPEED_NUM_10G;
- link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
- return rte_eth_linkstatus_set(eth_dev, &link);
+ ret = otx_ep_mbox_get_mac_addr(eth_dev, mac_addr);
+ if (ret)
+ return -EINVAL;
+ otx_ep_dbg("Get MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ return 0;
}
/* Define our ethernet definitions */
@@ -511,6 +586,8 @@ static const struct eth_dev_ops otx_ep_eth_dev_ops = {
.stats_reset = otx_ep_dev_stats_reset,
.link_update = otx_ep_dev_link_update,
.dev_close = otx_ep_dev_close,
+ .mtu_set = otx_ep_dev_mtu_set,
+ .mac_addr_set = otx_ep_dev_set_default_mac_addr,
};
static int
@@ -526,6 +603,37 @@ otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+static int otx_ep_eth_dev_query_set_vf_mac(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ int ret_val;
+
+ memset(mac_addr, 0, sizeof(struct rte_ether_addr));
+ ret_val = otx_ep_dev_get_mac_addr(eth_dev, mac_addr);
+ if (!ret_val) {
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
+ otx_ep_dbg("PF doesn't have valid VF MAC addr" RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ rte_eth_random_addr(mac_addr->addr_bytes);
+ otx_ep_dbg("Setting Random MAC address" RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ ret_val = otx_ep_dev_set_default_mac_addr(eth_dev, mac_addr);
+ if (ret_val) {
+ otx_ep_err("Setting MAC address " RTE_ETHER_ADDR_PRT_FMT "fails\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ return ret_val;
+ }
+ }
+ otx_ep_dbg("Received valid MAC addr from PF" RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ } else {
+ otx_ep_err("Getting MAC address from PF via Mbox fails with ret_val: %d\n",
+ ret_val);
+ return ret_val;
+ }
+ return 0;
+}
+
static int
otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
{
@@ -541,6 +649,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->eth_dev = eth_dev;
otx_epvf->port_id = eth_dev->data->port_id;
eth_dev->dev_ops = &otx_ep_eth_dev_ops;
+ rte_spinlock_init(&otx_epvf->mbox_lock);
eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
otx_ep_err("MAC addresses memory allocation failed\n");
@@ -572,6 +681,16 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
return -EINVAL;
}
+ if (otx_ep_mbox_version_check(eth_dev))
+ return -EINVAL;
+
+ if (otx_ep_eth_dev_query_set_vf_mac(eth_dev,
+ (struct rte_ether_addr *)&vf_mac_addr)) {
+ otx_ep_err("set mac addr failed\n");
+ return -ENODEV;
+ }
+ rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx_ep_mbox.c b/drivers/net/octeon_ep/otx_ep_mbox.c
new file mode 100644
index 0000000000..1ad36e14c8
--- /dev/null
+++ b/drivers/net/octeon_ep/otx_ep_mbox.c
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <ethdev_pci.h>
+#include <rte_ether.h>
+#include <rte_kvargs.h>
+
+#include "otx_ep_common.h"
+#include "otx_ep_vf.h"
+#include "otx2_ep_vf.h"
+#include "cnxk_ep_vf.h"
+#include "otx_ep_mbox.h"
+
+static int
+__otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep,
+ union otx_ep_mbox_word cmd,
+ union otx_ep_mbox_word *rsp)
+{
+ volatile uint64_t reg_val = 0ull;
+ int count = 0;
+
+ cmd.s.type = OTX_EP_MBOX_TYPE_CMD;
+ otx2_write64(cmd.u64, otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0));
+
+ /* No response for notification messages */
+ if (!rsp)
+ return 0;
+
+ for (count = 0; count < OTX_EP_MBOX_TIMEOUT_MS; count++) {
+ rte_delay_ms(1);
+ reg_val = otx2_read64(otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0));
+ if (reg_val != cmd.u64) {
+ rsp->u64 = reg_val;
+ break;
+ }
+ }
+ if (count == OTX_EP_MBOX_TIMEOUT_MS) {
+ otx_ep_err("mbox send Timeout count:%d\n", count);
+ return OTX_EP_MBOX_TIMEOUT_MS;
+ }
+ if (rsp->s.type != OTX_EP_MBOX_TYPE_RSP_ACK) {
+ otx_ep_err("mbox received NACK from PF\n");
+ return OTX_EP_MBOX_CMD_STATUS_NACK;
+ }
+
+ rsp->u64 = reg_val;
+ return 0;
+}
+
+static int
+otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep,
+ union otx_ep_mbox_word cmd,
+ union otx_ep_mbox_word *rsp)
+{
+ int ret;
+
+ rte_spinlock_lock(&otx_ep->mbox_lock);
+ ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, rsp);
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return ret;
+}
+
+static int
+otx_ep_mbox_bulk_read(struct otx_ep_device *otx_ep,
+ enum otx_ep_mbox_opcode opcode,
+ uint8_t *data, int32_t *size)
+{
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int read_cnt, i = 0, ret;
+ int data_len = 0, tmp_len = 0;
+
+ rte_spinlock_lock(&otx_ep->mbox_lock);
+ cmd.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 0;
+ /* Send cmd to read data from PF */
+ ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("mbox bulk read data request failed\n");
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return ret;
+ }
+ /* PF sends the data length of requested CMD
+ * in ACK
+ */
+ memcpy(&data_len, rsp.s_data.data, sizeof(data_len));
+ tmp_len = data_len;
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ while (data_len) {
+ ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("mbox bulk read data request failed\n");
+ otx_ep->mbox_data_index = 0;
+ memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE);
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return ret;
+ }
+ if (data_len > OTX_EP_MBOX_MAX_DATA_SIZE) {
+ data_len -= OTX_EP_MBOX_MAX_DATA_SIZE;
+ read_cnt = OTX_EP_MBOX_MAX_DATA_SIZE;
+ } else {
+ read_cnt = data_len;
+ data_len = 0;
+ }
+ for (i = 0; i < read_cnt; i++) {
+ otx_ep->mbox_data_buf[otx_ep->mbox_data_index] =
+ rsp.s_data.data[i];
+ otx_ep->mbox_data_index++;
+ }
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ }
+ memcpy(data, otx_ep->mbox_data_buf, tmp_len);
+ *size = tmp_len;
+ otx_ep->mbox_data_index = 0;
+ memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE);
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return 0;
+}
+
+int
+otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret = 0;
+
+ cmd.u64 = 0;
+ cmd.s_set_mtu.opcode = OTX_EP_MBOX_CMD_SET_MTU;
+ cmd.s_set_mtu.mtu = mtu;
+
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("set MTU failed\n");
+ return -EINVAL;
+ }
+ otx_ep_dbg("mtu set success mtu %u\n", mtu);
+
+ return 0;
+}
+
+int
+otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_SET_MAC_ADDR;
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
+ cmd.s_set_mac.mac_addr[i] = mac_addr->addr_bytes[i];
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("set MAC address failed\n");
+ return -EINVAL;
+ }
+ otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n",
+ __func__, RTE_ETHER_ADDR_BYTES(mac_addr));
+ rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
+ return 0;
+}
+
+int
+otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_GET_MAC_ADDR;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("get MAC address failed\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
+ mac_addr->addr_bytes[i] = rsp.s_set_mac.mac_addr[i];
+ otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n",
+ __func__, RTE_ETHER_ADDR_BYTES(mac_addr));
+ return 0;
+}
+
+int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev,
+ uint8_t *oper_up)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OTX_EP_MBOX_CMD_GET_LINK_STATUS;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("Get link status failed\n");
+ return -EINVAL;
+ }
+ *oper_up = rsp.s_link_status.status;
+ return 0;
+}
+
+int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev,
+ struct rte_eth_link *link)
+{
+ int32_t ret, size;
+ struct otx_ep_iface_link_info link_info;
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ memset(&link_info, 0, sizeof(struct otx_ep_iface_link_info));
+ ret = otx_ep_mbox_bulk_read(otx_ep, OTX_EP_MBOX_CMD_GET_LINK_INFO,
+ (uint8_t *)&link_info, (int32_t *)&size);
+ if (ret) {
+ otx_ep_err("Get link info failed\n");
+ return ret;
+ }
+ link->link_status = RTE_ETH_LINK_UP;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = (link_info.autoneg ==
+ OTX_EP_LINK_AUTONEG) ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
+
+ link->link_autoneg = link_info.autoneg;
+ link->link_speed = link_info.speed;
+ return 0;
+}
+
+void
+otx_ep_mbox_enable_interrupt(struct otx_ep_device *otx_ep)
+{
+ rte_write64(0x2, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_MBOX_PF_VF_INT(0));
+}
+
+void
+otx_ep_mbox_disable_interrupt(struct otx_ep_device *otx_ep)
+{
+ rte_write64(0x00, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_MBOX_PF_VF_INT(0));
+}
+
+int
+otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret;
+
+ rsp.u64 = 0;
+ cmd.u64 = 0;
+ cmd.s_get_mtu.opcode = OTX_EP_MBOX_CMD_GET_MTU;
+
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret)
+ return ret;
+ return rsp.s_get_mtu.mtu;
+}
+
+int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_version.opcode = OTX_EP_MBOX_CMD_VERSION;
+ cmd.s_version.version = OTX_EP_MBOX_VERSION;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (!ret)
+ return 0;
+ if (ret == OTX_EP_MBOX_CMD_STATUS_NACK) {
+ otx_ep_err("VF Mbox version:%u is not compatible with PF\n",
+ (uint32_t)cmd.s_version.version);
+ }
+ return ret;
+}
+
+int otx_ep_mbox_send_dev_exit(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_version.opcode = OTX_EP_MBOX_CMD_DEV_REMOVE;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, NULL);
+ return ret;
+}
diff --git a/drivers/net/octeon_ep/otx_ep_mbox.h b/drivers/net/octeon_ep/otx_ep_mbox.h
new file mode 100644
index 0000000000..9df3c53edd
--- /dev/null
+++ b/drivers/net/octeon_ep/otx_ep_mbox.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _OTX_EP_MBOX_H_
+#define _OTX_EP_MBOX_H_
+
+
+#define OTX_EP_MBOX_VERSION 1
+
+enum otx_ep_mbox_opcode {
+ OTX_EP_MBOX_CMD_VERSION,
+ OTX_EP_MBOX_CMD_SET_MTU,
+ OTX_EP_MBOX_CMD_SET_MAC_ADDR,
+ OTX_EP_MBOX_CMD_GET_MAC_ADDR,
+ OTX_EP_MBOX_CMD_GET_LINK_INFO,
+ OTX_EP_MBOX_CMD_GET_STATS,
+ OTX_EP_MBOX_CMD_SET_RX_STATE,
+ OTX_EP_MBOX_CMD_SET_LINK_STATUS,
+ OTX_EP_MBOX_CMD_GET_LINK_STATUS,
+ OTX_EP_MBOX_CMD_GET_MTU,
+ OTX_EP_MBOX_CMD_DEV_REMOVE,
+ OTX_EP_MBOX_CMD_LAST,
+};
+
+enum otx_ep_mbox_word_type {
+ OTX_EP_MBOX_TYPE_CMD,
+ OTX_EP_MBOX_TYPE_RSP_ACK,
+ OTX_EP_MBOX_TYPE_RSP_NACK,
+};
+
+enum otx_ep_mbox_cmd_status {
+ OTX_EP_MBOX_CMD_STATUS_NOT_SETUP = 1,
+ OTX_EP_MBOX_CMD_STATUS_TIMEDOUT = 2,
+ OTX_EP_MBOX_CMD_STATUS_NACK = 3,
+ OTX_EP_MBOX_CMD_STATUS_BUSY = 4
+};
+
+enum otx_ep_mbox_state {
+ OTX_EP_MBOX_STATE_IDLE = 0,
+ OTX_EP_MBOX_STATE_BUSY = 1,
+};
+
+enum otx_ep_link_status {
+ OTX_EP_LINK_STATUS_DOWN,
+ OTX_EP_LINK_STATUS_UP,
+};
+
+enum otx_ep_link_duplex {
+ OTX_EP_LINK_HALF_DUPLEX,
+ OTX_EP_LINK_FULL_DUPLEX,
+};
+
+enum otx_ep_link_autoneg {
+ OTX_EP_LINK_FIXED,
+ OTX_EP_LINK_AUTONEG,
+};
+
+#define OTX_EP_MBOX_TIMEOUT_MS 1200
+#define OTX_EP_MBOX_MAX_RETRIES 2
+#define OTX_EP_MBOX_MAX_DATA_SIZE 6
+#define OTX_EP_MBOX_MAX_DATA_BUF_SIZE 256
+#define OTX_EP_MBOX_MORE_FRAG_FLAG 1
+#define OTX_EP_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
+
+union otx_ep_mbox_word {
+ uint64_t u64;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t data:48;
+ } s;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t frag:1;
+ uint64_t rsvd:5;
+ uint8_t data[6];
+ } s_data;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t version:48;
+ } s_version;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint8_t mac_addr[6];
+ } s_set_mac;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t mtu:48;
+ } s_set_mtu;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t mtu:48;
+ } s_get_mtu;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t state:1;
+ uint64_t rsvd:53;
+ } s_link_state;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t status:1;
+ uint64_t rsvd:53;
+ } s_link_status;
+} __rte_packed;
+
+/* Hardware interface link state information. */
+struct otx_ep_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ uint64_t supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ uint64_t advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ uint32_t speed;
+
+ /* MTU */
+ uint16_t mtu;
+
+ /* Autonegotiation state. */
+#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ uint8_t autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ uint8_t pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ uint8_t admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ uint8_t oper_up;
+};
+
+int otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
+int otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr);
+int otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr);
+int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev,
+ uint8_t *oper_up);
+int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev, struct rte_eth_link *link);
+void otx_ep_mbox_enable_interrupt(struct otx_ep_device *otx_ep);
+void otx_ep_mbox_disable_interrupt(struct otx_ep_device *otx_ep);
+int otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev);
+int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev);
+int otx_ep_mbox_send_dev_exit(struct rte_eth_dev *eth_dev);
+#endif
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 10/11] net/octeon_ep: set watermark for output queues
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (8 preceding siblings ...)
2023-04-24 12:28 ` [PATCH v3 09/11] net/octeon_ep: support mailbox between VF and PF Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
2023-04-24 12:28 ` [PATCH v3 11/11] net/octeon_ep: set secondary process dev ops Sathesh Edara
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Sets the watermark level for SDP output queues
to send backpressure to NIX, when available Rx
buffers fall below watermark.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.c | 7 ++++++-
drivers/net/octeon_ep/otx_ep_common.h | 1 +
2 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index cadb4ecbf9..92c2d2ca5c 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -245,7 +245,12 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
- return 0;
+
+ /* Set Watermark for backpressure */
+ oct_ep_write64(OTX_EP_OQ_WMARK_MIN,
+ otx_ep->hw_addr + CNXK_EP_R_OUT_WMARK(oq_no));
+
+ return 0;
}
static int
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index dadc8d1579..0305079af9 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -23,6 +23,7 @@
#define OTX_EP_MAX_OQ_DESCRIPTORS (8192)
#define OTX_EP_OQ_BUF_SIZE (2048)
#define OTX_EP_MIN_RX_BUF_SIZE (64)
+#define OTX_EP_OQ_WMARK_MIN (256)
#define OTX_EP_OQ_INFOPTR_MODE (0)
#define OTX_EP_OQ_REFIL_THRESHOLD (16)
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 11/11] net/octeon_ep: set secondary process dev ops
2023-04-24 12:28 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (9 preceding siblings ...)
2023-04-24 12:28 ` [PATCH v3 10/11] net/octeon_ep: set watermark for output queues Sathesh Edara
@ 2023-04-24 12:28 ` Sathesh Edara
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:28 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla,
Veerasenareddy Burru, Anatoly Burakov
Cc: dev
Sets the dev ops and transmit/receive callbacks
for secondary process.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
doc/guides/nics/features/octeon_ep.ini | 1 +
drivers/net/octeon_ep/otx_ep_ethdev.c | 22 +++++++++++++++++++---
2 files changed, 20 insertions(+), 3 deletions(-)
diff --git a/doc/guides/nics/features/octeon_ep.ini b/doc/guides/nics/features/octeon_ep.ini
index f3b821c89e..d52491afa3 100644
--- a/doc/guides/nics/features/octeon_ep.ini
+++ b/doc/guides/nics/features/octeon_ep.ini
@@ -11,4 +11,5 @@ x86-64 = Y
Basic stats = Y
Link status = Y
MTU update = Y
+Multiprocess aware = Y
Usage doc = Y
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 885fbb475f..a9868909f8 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -527,9 +527,17 @@ otx_ep_dev_stats_get(struct rte_eth_dev *eth_dev,
static int
otx_ep_dev_close(struct rte_eth_dev *eth_dev)
{
- struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
+ struct otx_ep_device *otx_epvf;
uint32_t num_queues, q_no;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ return 0;
+ }
+
+ otx_epvf = OTX_EP_DEV(eth_dev);
otx_ep_mbox_send_dev_exit(eth_dev);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
num_queues = otx_epvf->nb_rx_queues;
@@ -593,8 +601,12 @@ static const struct eth_dev_ops otx_ep_eth_dev_ops = {
static int
otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
{
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
return 0;
+ }
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
@@ -642,8 +654,12 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
struct rte_ether_addr vf_mac_addr;
/* Single process support */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev->dev_ops = &otx_ep_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
+ eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
return 0;
+ }
rte_eth_copy_pci_info(eth_dev, pdev);
otx_epvf->eth_dev = eth_dev;
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v2 02/10] net/octeon_ep: support CNX10K series SoC
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 01/10] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
@ 2023-04-05 14:25 ` Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 03/10] net/octeon_ep: support error propagation Sathesh Edara
` (8 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-05 14:25 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
This patch adds the required functionality in the Octeon endpoint
driver to support the following CNX10K series endpoint devices.
- CN10KA
- CN10KB
- CNF10KA
- CNF10KB
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.h | 5 ++++-
drivers/net/octeon_ep/otx_ep_ethdev.c | 21 +++++++++++++++++----
2 files changed, 21 insertions(+), 5 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
index 7162461dd9..aaa5774552 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.h
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
@@ -134,7 +134,10 @@
#define CNXK_EP_R_OUT_CTL_ROR_P (1ULL << 24)
#define CNXK_EP_R_OUT_CTL_IMODE (1ULL << 23)
-#define PCI_DEVID_CNXK_EP_NET_VF 0xB903
+#define PCI_DEVID_CN10KA_EP_NET_VF 0xB903
+#define PCI_DEVID_CNF10KA_EP_NET_VF 0xBA03
+#define PCI_DEVID_CNF10KB_EP_NET_VF 0xBC03
+#define PCI_DEVID_CN10KB_EP_NET_VF 0xBD03
int
cnxk_ep_vf_setup_device(struct otx_ep_device *sdpvf);
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 24f62c3e49..b23d52ff84 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -111,7 +111,10 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
break;
- case PCI_DEVID_CNXK_EP_NET_VF:
+ case PCI_DEVID_CN10KA_EP_NET_VF:
+ case PCI_DEVID_CN10KB_EP_NET_VF:
+ case PCI_DEVID_CNF10KA_EP_NET_VF:
+ case PCI_DEVID_CNF10KB_EP_NET_VF:
otx_epvf->chip_id = dev_id;
ret = cnxk_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
@@ -150,7 +153,10 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
- else if (otx_epvf->chip_id == PCI_DEVID_CNXK_EP_NET_VF)
+ else if (otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
otx_epvf->max_rx_queues = ethdev_queues;
@@ -501,7 +507,11 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
otx_epvf->pkind = SDP_OTX2_PKIND_FS0;
else
otx_epvf->pkind = SDP_PKIND;
@@ -533,7 +543,10 @@ static const struct rte_pci_id pci_id_otx_ep_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95N_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95O_EP_NET_VF) },
- { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KA_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF10KA_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF10KB_EP_NET_VF) },
{ .vendor_id = 0, /* sentinel */ }
};
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v2 03/10] net/octeon_ep: support error propagation
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 01/10] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 02/10] net/octeon_ep: support CNX10K series SoC Sathesh Edara
@ 2023-04-05 14:25 ` Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 04/10] net/octeon_ep: support IQ/OQ reset Sathesh Edara
` (7 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-05 14:25 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
This patch adds detection of loop limits being
hit, and propagate errors up the call chain when
this happens.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.c | 51 +++++++++++--------
drivers/net/octeon_ep/otx2_ep_vf.c | 49 ++++++++++--------
drivers/net/octeon_ep/otx_ep_common.h | 6 +--
drivers/net/octeon_ep/otx_ep_ethdev.c | 27 +++++++---
drivers/net/octeon_ep/otx_ep_rxtx.c | 51 +++++++++----------
drivers/net/octeon_ep/otx_ep_vf.c | 71 +++++++++++++++++++--------
6 files changed, 155 insertions(+), 100 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 3427fb213b..1a92887109 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -47,36 +47,43 @@ cnxk_ep_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
oct_ep_write64(reg_val, otx_ep->hw_addr + CNXK_EP_R_OUT_CONTROL(q_no));
}
-static void
+static int
cnxk_ep_vf_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
cnxk_ep_vf_setup_global_iq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
cnxk_ep_vf_setup_global_output_regs(struct otx_ep_device *otx_ep)
{
uint32_t q_no;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
cnxk_ep_vf_setup_global_oq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
cnxk_ep_vf_setup_device_regs(struct otx_ep_device *otx_ep)
{
- cnxk_ep_vf_setup_global_input_regs(otx_ep);
- cnxk_ep_vf_setup_global_output_regs(otx_ep);
+ int ret;
+
+ ret = cnxk_ep_vf_setup_global_input_regs(otx_ep);
+ if (ret)
+ return ret;
+ ret = cnxk_ep_vf_setup_global_output_regs(otx_ep);
+ return ret;
}
-static void
+static int
cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
volatile uint64_t reg_val = 0ull;
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(iq_no));
@@ -91,9 +98,9 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
} while ((!(reg_val & CNXK_EP_R_IN_CTL_IDLE)) && loop--);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("IDLE bit is not set\n");
- return;
+ return -EIO;
}
/* Write the start of the input queue's ring and its size */
@@ -115,9 +122,9 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
rte_delay_ms(1);
} while (reg_val != 0 && loop--);
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INST CNT REGISTER is not zero\n");
- return;
+ return -EIO;
}
/* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
@@ -125,14 +132,15 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + CNXK_EP_R_IN_INT_LEVELS(iq_no));
+ return 0;
}
-static void
+static int
cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
/* Wait on IDLE to set to 1, supposed to configure BADDR
@@ -145,9 +153,9 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("OUT CNT REGISTER value is zero\n");
- return;
+ return -EIO;
}
oct_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr + CNXK_EP_R_OUT_SLIST_BADDR(oq_no));
@@ -181,9 +189,9 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets credit register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_credit:%x", oq_no, rte_read32(droq->pkts_credit_reg));
@@ -201,18 +209,19 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets sent register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
+ return 0;
}
static int
cnxk_ep_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
uint64_t reg_val = 0ull;
/* Resetting doorbells during IQ enabling also to handle abrupt
@@ -225,7 +234,7 @@ cnxk_ep_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INSTR DBELL not coming back to 0\n");
return -EIO;
}
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 3c9a70157e..3ffc7275c7 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -49,32 +49,39 @@ otx2_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
}
-static void
+static int
otx2_vf_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_iq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
otx2_vf_setup_global_output_regs(struct otx_ep_device *otx_ep)
{
uint32_t q_no;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_oq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
otx2_vf_setup_device_regs(struct otx_ep_device *otx_ep)
{
- otx2_vf_setup_global_input_regs(otx_ep);
- otx2_vf_setup_global_output_regs(otx_ep);
+ int ret;
+
+ ret = otx2_vf_setup_global_input_regs(otx_ep);
+ if (ret)
+ return ret;
+ ret = otx2_vf_setup_global_output_regs(otx_ep);
+ return ret;
}
-static void
+static int
otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
@@ -92,9 +99,9 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
} while ((!(reg_val & SDP_VF_R_IN_CTL_IDLE)) && loop--);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("IDLE bit is not set\n");
- return;
+ return -EIO;
}
/* Write the start of the input queue's ring and its size */
@@ -115,9 +122,9 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
rte_write32(reg_val, iq->inst_cnt_reg);
} while (reg_val != 0 && loop--);
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INST CNT REGISTER is not zero\n");
- return;
+ return -EIO;
}
/* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
@@ -125,14 +132,15 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
+ return 0;
}
-static void
+static int
otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
/* Wait on IDLE to set to 1, supposed to configure BADDR
@@ -145,9 +153,9 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("OUT CNT REGISTER value is zero\n");
- return;
+ return -EIO;
}
oct_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_BADDR(oq_no));
@@ -181,9 +189,9 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets credit register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_credit:%x", oq_no, rte_read32(droq->pkts_credit_reg));
@@ -200,17 +208,18 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets sent register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
+ return 0;
}
static int
otx2_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t loop = SDP_VF_BUSY_LOOP_COUNT;
+ int loop = SDP_VF_BUSY_LOOP_COUNT;
uint64_t reg_val = 0ull;
/* Resetting doorbells during IQ enabling also to handle abrupt
@@ -223,7 +232,7 @@ otx2_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INSTR DBELL not coming back to 0\n");
return -EIO;
}
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index e4c92270d4..479bb1a1a0 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -394,11 +394,11 @@ struct otx_ep_sriov_info {
/* Required functions for each VF device */
struct otx_ep_fn_list {
- void (*setup_iq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
+ int (*setup_iq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
- void (*setup_oq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
+ int (*setup_oq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
- void (*setup_device_regs)(struct otx_ep_device *otx_ep);
+ int (*setup_device_regs)(struct otx_ep_device *otx_ep);
int (*enable_io_queues)(struct otx_ep_device *otx_ep);
void (*disable_io_queues)(struct otx_ep_device *otx_ep);
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index b23d52ff84..5677a2d6a6 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -151,13 +151,17 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
- otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
- else if (otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF) {
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
+ } else {
+ otx_ep_err("Invalid chip_id\n");
+ ret = -EINVAL;
+ goto setup_fail;
+ }
ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
otx_epvf->max_rx_queues = ethdev_queues;
otx_epvf->max_tx_queues = ethdev_queues;
@@ -489,6 +493,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ rte_eth_copy_pci_info(eth_dev, pdev);
otx_epvf->eth_dev = eth_dev;
otx_epvf->port_id = eth_dev->data->port_id;
eth_dev->dev_ops = &otx_ep_eth_dev_ops;
@@ -503,7 +508,8 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->hw_addr = pdev->mem_resource[0].addr;
otx_epvf->pdev = pdev;
- otx_epdev_init(otx_epvf);
+ if (otx_epdev_init(otx_epvf))
+ return -ENOMEM;
if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
@@ -511,11 +517,16 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF) {
otx_epvf->pkind = SDP_OTX2_PKIND_FS0;
- else
+ otx_ep_info("using pkind %d\n", otx_epvf->pkind);
+ } else if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF) {
otx_epvf->pkind = SDP_PKIND;
- otx_ep_info("using pkind %d\n", otx_epvf->pkind);
+ otx_ep_info("Using pkind %d.\n", otx_epvf->pkind);
+ } else {
+ otx_ep_err("Invalid chip id\n");
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 6912ca2401..9712e6cce6 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -3,7 +3,7 @@
*/
#include <unistd.h>
-
+#include <assert.h>
#include <rte_eal.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
@@ -81,6 +81,7 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
const struct otx_ep_config *conf;
struct otx_ep_instr_queue *iq;
uint32_t q_size;
+ int ret;
conf = otx_ep->conf;
iq = otx_ep->instr_queue[iq_no];
@@ -140,7 +141,9 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
iq->iqcmd_64B = (conf->iq.instr_type == 64);
/* Set up IQ registers */
- otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
+ ret = otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
+ if (ret)
+ return ret;
return 0;
@@ -271,6 +274,7 @@ otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
uint32_t c_refill_threshold;
struct otx_ep_droq *droq;
uint32_t desc_ring_size;
+ int ret;
otx_ep_info("OQ[%d] Init start\n", q_no);
@@ -318,7 +322,9 @@ otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
droq->refill_threshold = c_refill_threshold;
/* Set up OQ registers */
- otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
+ ret = otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
+ if (ret)
+ return ret;
otx_ep->io_qmask.oq |= (1ull << q_no);
@@ -852,19 +858,15 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
* droq->pkts_pending);
*/
droq->stats.pkts_delayed_data++;
- while (retry && !info->length)
+ while (retry && !info->length) {
retry--;
+ rte_delay_us_block(50);
+ }
if (!retry && !info->length) {
otx_ep_err("OCTEON DROQ[%d]: read_idx: %d; Retry failed !!\n",
droq->q_no, droq->read_idx);
/* May be zero length packet; drop it */
- rte_pktmbuf_free(droq_pkt);
- droq->recv_buf_list[droq->read_idx] = NULL;
- droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
- droq->nb_desc);
- droq->stats.dropped_zlp++;
- droq->refill_count++;
- goto oq_read_fail;
+ assert(0);
}
}
if (next_fetch) {
@@ -938,6 +940,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
last_buf = droq_pkt;
} else {
otx_ep_err("no buf\n");
+ assert(0);
}
pkt_len += cpy_len;
@@ -953,16 +956,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
droq_pkt->l3_len = hdr_lens.l3_len;
droq_pkt->l4_len = hdr_lens.l4_len;
- if (droq_pkt->nb_segs > 1 &&
- !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
- rte_pktmbuf_free(droq_pkt);
- goto oq_read_fail;
- }
-
return droq_pkt;
-
-oq_read_fail:
- return NULL;
}
static inline uint32_t
@@ -992,6 +986,7 @@ otx_ep_recv_pkts(void *rx_queue,
struct rte_mbuf *oq_pkt;
uint32_t pkts = 0;
+ uint32_t valid_pkts = 0;
uint32_t new_pkts = 0;
int next_fetch;
@@ -1019,14 +1014,15 @@ otx_ep_recv_pkts(void *rx_queue,
"last_pkt_count %" PRIu64 "new_pkts %d.\n",
droq->pkts_pending, droq->last_pkt_count,
new_pkts);
- droq->pkts_pending -= pkts;
droq->stats.rx_err++;
- goto finish;
+ continue;
+ } else {
+ rx_pkts[valid_pkts] = oq_pkt;
+ valid_pkts++;
+ /* Stats */
+ droq->stats.pkts_received++;
+ droq->stats.bytes_received += oq_pkt->pkt_len;
}
- rx_pkts[pkts] = oq_pkt;
- /* Stats */
- droq->stats.pkts_received++;
- droq->stats.bytes_received += oq_pkt->pkt_len;
}
droq->pkts_pending -= pkts;
@@ -1053,6 +1049,5 @@ otx_ep_recv_pkts(void *rx_queue,
rte_write32(0, droq->pkts_credit_reg);
}
-finish:
- return pkts;
+ return valid_pkts;
}
diff --git a/drivers/net/octeon_ep/otx_ep_vf.c b/drivers/net/octeon_ep/otx_ep_vf.c
index 96366b2a7f..4f3538146b 100644
--- a/drivers/net/octeon_ep/otx_ep_vf.c
+++ b/drivers/net/octeon_ep/otx_ep_vf.c
@@ -12,10 +12,11 @@
#include "otx_ep_vf.h"
-static void
+static int
otx_ep_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
{
volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
/* Select ES, RO, NS, RDSIZE,DPTR Format#0 for IQs
* IS_64B is by default enabled.
@@ -33,8 +34,11 @@ otx_ep_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
do {
reg_val = rte_read64(otx_ep->hw_addr +
OTX_EP_R_IN_CONTROL(q_no));
- } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE));
+ } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE) && loop--);
+ if (loop < 0)
+ return -EIO;
}
+ return 0;
}
static void
@@ -60,13 +64,18 @@ otx_ep_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
otx_ep_write64(reg_val, otx_ep->hw_addr, OTX_EP_R_OUT_CONTROL(q_no));
}
-static void
+static int
otx_ep_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
+ int ret = 0;
- for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
- otx_ep_setup_global_iq_reg(otx_ep, q_no);
+ for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++) {
+ ret = otx_ep_setup_global_iq_reg(otx_ep, q_no);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static void
@@ -78,18 +87,24 @@ otx_ep_setup_global_output_regs(struct otx_ep_device *otx_ep)
otx_ep_setup_global_oq_reg(otx_ep, q_no);
}
-static void
+static int
otx_ep_setup_device_regs(struct otx_ep_device *otx_ep)
{
- otx_ep_setup_global_input_regs(otx_ep);
+ int ret;
+
+ ret = otx_ep_setup_global_input_regs(otx_ep);
+ if (ret)
+ return ret;
otx_ep_setup_global_output_regs(otx_ep);
+ return 0;
}
-static void
+static int
otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_IN_CONTROL(iq_no));
@@ -100,7 +115,9 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
do {
reg_val = rte_read64(otx_ep->hw_addr +
OTX_EP_R_IN_CONTROL(iq_no));
- } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE));
+ } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE) && loop--);
+ if (loop < 0)
+ return -EIO;
}
/* Write the start of the input queue's ring and its size */
@@ -120,10 +137,13 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p\n",
iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+ loop = OTX_EP_BUSY_LOOP_COUNT;
do {
reg_val = rte_read32(iq->inst_cnt_reg);
rte_write32(reg_val, iq->inst_cnt_reg);
- } while (reg_val != 0);
+ } while ((reg_val != 0) && loop--);
+ if (loop < 0)
+ return -EIO;
/* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
* to raise
@@ -133,13 +153,15 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
otx_ep_write64(OTX_EP_CLEAR_IN_INT_LVLS, otx_ep->hw_addr,
OTX_EP_R_IN_INT_LEVELS(iq_no));
+ return 0;
}
-static void
+static int
otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
@@ -150,10 +172,12 @@ otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_OUT_CONTROL(oq_no));
- while (!(reg_val & OTX_EP_R_OUT_CTL_IDLE)) {
+ while (!(reg_val & OTX_EP_R_OUT_CTL_IDLE) && loop--) {
reg_val = rte_read64(otx_ep->hw_addr +
OTX_EP_R_OUT_CONTROL(oq_no));
}
+ if (loop < 0)
+ return -EIO;
otx_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr,
OTX_EP_R_OUT_SLIST_BADDR(oq_no));
@@ -180,11 +204,14 @@ otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
OTX_EP_R_OUT_INT_LEVELS(oq_no));
/* Clear the OQ doorbell */
+ loop = OTX_EP_BUSY_LOOP_COUNT;
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
- while ((rte_read32(droq->pkts_credit_reg) != 0ull)) {
+ while ((rte_read32(droq->pkts_credit_reg) != 0ull) && loop--) {
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
rte_delay_ms(1);
}
+ if (loop < 0)
+ return -EIO;
otx_ep_dbg("OTX_EP_R[%d]_credit:%x\n", oq_no,
rte_read32(droq->pkts_credit_reg));
@@ -195,18 +222,22 @@ otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
otx_ep_dbg("OTX_EP_R[%d]_sent: %x\n", oq_no,
rte_read32(droq->pkts_sent_reg));
- while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
+ loop = OTX_EP_BUSY_LOOP_COUNT;
+ while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
}
+ if (loop < 0)
+ return -EIO;
+ return 0;
}
static int
otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
- uint64_t reg_val = 0ull;
+ volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
/* Resetting doorbells during IQ enabling also to handle abrupt
* guest reboot. IQ reset does not clear the doorbells.
@@ -219,7 +250,7 @@ otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
rte_delay_ms(1);
}
- if (loop == 0) {
+ if (loop < 0) {
otx_ep_err("dbell reset failed\n");
return -EIO;
}
@@ -238,8 +269,8 @@ otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
static int
otx_ep_enable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t reg_val = 0ull;
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
/* Resetting doorbells during IQ enabling also to handle abrupt
* guest reboot. IQ reset does not clear the doorbells.
@@ -250,7 +281,7 @@ otx_ep_enable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
OTX_EP_R_OUT_SLIST_DBELL(q_no))) != 0ull) && loop--) {
rte_delay_ms(1);
}
- if (loop == 0) {
+ if (loop < 0) {
otx_ep_err("dbell reset failed\n");
return -EIO;
}
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v2 04/10] net/octeon_ep: support IQ/OQ reset
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
` (2 preceding siblings ...)
2023-04-05 14:25 ` [PATCH v2 03/10] net/octeon_ep: support error propagation Sathesh Edara
@ 2023-04-05 14:25 ` Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 05/10] net/octeon_ep: support ISM Sathesh Edara
` (6 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-05 14:25 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
This patch adds input and output queue reset
functionality, also receive queue interrupt
enable and disable functionality.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx2_ep_vf.c | 193 +++++++++++++++++++++++++-
drivers/net/octeon_ep/otx2_ep_vf.h | 61 ++++++--
drivers/net/octeon_ep/otx_ep_common.h | 5 +-
3 files changed, 244 insertions(+), 15 deletions(-)
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 3ffc7275c7..3e4895862b 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -9,6 +9,117 @@
#include "otx_ep_common.h"
#include "otx2_ep_vf.h"
+static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no);
+
+static int
+otx2_vf_reset_iq(struct otx_ep_device *otx_ep, int q_no)
+{
+ int loop = SDP_VF_BUSY_LOOP_COUNT;
+ volatile uint64_t d64 = 0ull;
+
+ /* There is no RST for a ring.
+ * Clear all registers one by one after disabling the ring
+ */
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_BADDR(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_RSIZE(q_no));
+
+ d64 = 0xFFFFFFFF; /* ~0ull */
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no));
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no));
+
+ while ((d64 != 0) && loop--) {
+ rte_delay_ms(1);
+ d64 = otx2_read64(otx_ep->hw_addr +
+ SDP_VF_R_IN_INSTR_DBELL(q_no));
+ }
+ if (loop < 0) {
+ otx_ep_err("%s: doorbell init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
+ do {
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_IN_CNTS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_CNTS(q_no));
+ rte_delay_ms(1);
+ } while ((d64 & ~SDP_VF_R_IN_CNTS_OUT_INT) != 0 && loop--);
+ if (loop < 0) {
+ otx_ep_err("%s: in_cnts init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ d64 = 0ull;
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_PKT_CNT(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_BYTE_CNT(q_no));
+
+ return 0;
+}
+
+static int
+otx2_vf_reset_oq(struct otx_ep_device *otx_ep, int q_no)
+{
+ int loop = SDP_VF_BUSY_LOOP_COUNT;
+ volatile uint64_t d64 = 0ull;
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_BADDR(q_no));
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_RSIZE(q_no));
+
+ d64 = 0xFFFFFFFF;
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no));
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no));
+ while ((d64 != 0) && loop--) {
+ rte_delay_ms(1);
+ d64 = otx2_read64(otx_ep->hw_addr +
+ SDP_VF_R_OUT_SLIST_DBELL(q_no));
+ }
+ if (loop < 0) {
+ otx_ep_err("%s: doorbell init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ if (otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CNTS(q_no))
+ & SDP_VF_R_OUT_CNTS_OUT_INT) {
+ /*
+ * The OUT_INT bit is set. This interrupt must be enabled in
+ * order to clear the interrupt. Interrupts are disabled
+ * at the end of this function.
+ */
+ union out_int_lvl_t out_int_lvl;
+
+ out_int_lvl.d64 = otx2_read64(otx_ep->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.time_cnt_en = 1;
+ out_int_lvl.s.cnt = 0;
+ otx2_write64(out_int_lvl.d64, otx_ep->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ }
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
+ do {
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ rte_delay_ms(1);
+ } while ((d64 & ~SDP_VF_R_OUT_CNTS_IN_INT) != 0 && loop--);
+ if (loop < 0) {
+ otx_ep_err("%s: out_cnts init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ d64 = 0ull;
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_INT_LEVELS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_PKT_CNT(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_BYTE_CNT(q_no));
+
+ return 0;
+}
+
static void
otx2_vf_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
{
@@ -49,24 +160,63 @@ otx2_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
}
+static int
+otx2_vf_reset_input_queues(struct otx_ep_device *otx_ep)
+{
+ uint32_t q_no = 0;
+ int ret = 0;
+
+ for (q_no = 0; q_no < otx_ep->sriov_info.rings_per_vf; q_no++) {
+ ret = otx2_vf_reset_iq(otx_ep, q_no);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+otx2_vf_reset_output_queues(struct otx_ep_device *otx_ep)
+{
+ uint64_t q_no = 0ull;
+ int ret = 0;
+
+ for (q_no = 0; q_no < otx_ep->sriov_info.rings_per_vf; q_no++) {
+ ret = otx2_vf_reset_oq(otx_ep, q_no);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static int
otx2_vf_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
+ int ret = 0;
+
+ ret = otx2_vf_reset_input_queues(otx_ep);
+ if (ret)
+ return ret;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_iq_reg(otx_ep, q_no);
- return 0;
+ return ret;
}
static int
otx2_vf_setup_global_output_regs(struct otx_ep_device *otx_ep)
{
uint32_t q_no;
+ int ret = 0;
+ ret = otx2_vf_reset_output_queues(otx_ep);
+ if (ret)
+ return ret;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_oq_reg(otx_ep, q_no);
- return 0;
+ return ret;
}
static int
@@ -181,8 +331,8 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_write64(OTX_EP_CLEAR_SDP_OUT_PKT_CNT, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_PKT_CNT(oq_no));
- loop = OTX_EP_BUSY_LOOP_COUNT;
/* Clear the OQ doorbell */
+ loop = OTX_EP_BUSY_LOOP_COUNT;
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
while ((rte_read32(droq->pkts_credit_reg) != 0ull) && loop--) {
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
@@ -344,6 +494,40 @@ otx2_ep_get_defconf(struct otx_ep_device *otx_ep_dev __rte_unused)
return default_conf;
}
+static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no)
+{
+ union out_int_lvl_t out_int_lvl;
+ union out_cnts_t out_cnts;
+
+ out_int_lvl.d64 = otx2_read64(otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.time_cnt_en = 1;
+ out_int_lvl.s.cnt = 0;
+ otx2_write64(out_int_lvl.d64, otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_cnts.d64 = 0;
+ out_cnts.s.resend = 1;
+ otx2_write64(out_cnts.d64, otx_epvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ return 0;
+}
+
+static int otx2_vf_disable_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no)
+{
+ union out_int_lvl_t out_int_lvl;
+
+ /* Disable the interrupt for this queue */
+ out_int_lvl.d64 = otx2_read64(otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.time_cnt_en = 0;
+ out_int_lvl.s.cnt = 0;
+ otx2_write64(out_int_lvl.d64, otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+
+ return 0;
+}
+
int
otx2_ep_vf_setup_device(struct otx_ep_device *otx_ep)
{
@@ -381,5 +565,8 @@ otx2_ep_vf_setup_device(struct otx_ep_device *otx_ep)
otx_ep->fn_list.enable_oq = otx2_vf_enable_oq;
otx_ep->fn_list.disable_oq = otx2_vf_disable_oq;
+ otx_ep->fn_list.enable_rxq_intr = otx2_vf_enable_rxq_intr;
+ otx_ep->fn_list.disable_rxq_intr = otx2_vf_disable_rxq_intr;
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.h b/drivers/net/octeon_ep/otx2_ep_vf.h
index 8f00acd737..36c0b25dea 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.h
+++ b/drivers/net/octeon_ep/otx2_ep_vf.h
@@ -14,17 +14,20 @@
#define SDP_VF_BUSY_LOOP_COUNT (10000)
/* SDP VF OQ Masks */
-#define SDP_VF_R_OUT_CTL_IDLE (1ull << 40)
-#define SDP_VF_R_OUT_CTL_ES_I (1ull << 34)
-#define SDP_VF_R_OUT_CTL_NSR_I (1ull << 33)
-#define SDP_VF_R_OUT_CTL_ROR_I (1ull << 32)
-#define SDP_VF_R_OUT_CTL_ES_D (1ull << 30)
-#define SDP_VF_R_OUT_CTL_NSR_D (1ull << 29)
-#define SDP_VF_R_OUT_CTL_ROR_D (1ull << 28)
-#define SDP_VF_R_OUT_CTL_ES_P (1ull << 26)
-#define SDP_VF_R_OUT_CTL_NSR_P (1ull << 25)
-#define SDP_VF_R_OUT_CTL_ROR_P (1ull << 24)
-#define SDP_VF_R_OUT_CTL_IMODE (1ull << 23)
+#define SDP_VF_R_OUT_CTL_IDLE (0x1ull << 40)
+#define SDP_VF_R_OUT_CTL_ES_I (0x1ull << 34)
+#define SDP_VF_R_OUT_CTL_NSR_I (0x1ull << 33)
+#define SDP_VF_R_OUT_CTL_ROR_I (0x1ull << 32)
+#define SDP_VF_R_OUT_CTL_ES_D (0x1ull << 30)
+#define SDP_VF_R_OUT_CTL_NSR_D (0x1ull << 29)
+#define SDP_VF_R_OUT_CTL_ROR_D (0x1ull << 28)
+#define SDP_VF_R_OUT_CTL_ES_P (0x1ull << 26)
+#define SDP_VF_R_OUT_CTL_NSR_P (0x1ull << 25)
+#define SDP_VF_R_OUT_CTL_ROR_P (0x1ull << 24)
+#define SDP_VF_R_OUT_CTL_IMODE (0x1ull << 23)
+#define SDP_VF_R_OUT_CNTS_OUT_INT (0x1ull << 62)
+#define SDP_VF_R_OUT_CNTS_IN_INT (0x1ull << 61)
+#define SDP_VF_R_IN_CNTS_OUT_INT (0x1ull << 62)
/* SDP VF Register definitions */
#define SDP_VF_RING_OFFSET (0x1ull << 17)
@@ -140,4 +143,40 @@ struct otx2_ep_instr_64B {
uint64_t exhdr[4];
};
+union out_int_lvl_t {
+ uint64_t d64;
+ struct {
+ uint64_t cnt:32;
+ uint64_t timet:22;
+ uint64_t max_len:7;
+ uint64_t max_len_en:1;
+ uint64_t time_cnt_en:1;
+ uint64_t bmode:1;
+ } s;
+};
+
+union out_cnts_t {
+ uint64_t d64;
+ struct {
+ uint64_t cnt:32;
+ uint64_t timer:22;
+ uint64_t rsvd:5;
+ uint64_t resend:1;
+ uint64_t mbox_int:1;
+ uint64_t in_int:1;
+ uint64_t out_int:1;
+ uint64_t send_ism:1;
+ } s;
+};
+
+#define OTX2_EP_64B_INSTR_SIZE (sizeof(otx2_ep_instr_64B))
+
+#define NIX_MAX_HW_FRS 9212
+#define NIX_MAX_VTAG_INS 2
+#define NIX_MAX_VTAG_ACT_SIZE (4 * NIX_MAX_VTAG_INS)
+#define NIX_MAX_FRS \
+ (NIX_MAX_HW_FRS + RTE_ETHER_CRC_LEN - NIX_MAX_VTAG_ACT_SIZE)
+
+#define CN93XX_INTR_R_OUT_INT (1ULL << 62)
+#define CN93XX_INTR_R_IN_INT (1ULL << 61)
#endif /*_OTX2_EP_VF_H_ */
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 479bb1a1a0..a3260d5243 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -408,6 +408,9 @@ struct otx_ep_fn_list {
int (*enable_oq)(struct otx_ep_device *otx_ep, uint32_t q_no);
void (*disable_oq)(struct otx_ep_device *otx_ep, uint32_t q_no);
+
+ int (*enable_rxq_intr)(struct otx_ep_device *otx_epvf, uint16_t q_no);
+ int (*disable_rxq_intr)(struct otx_ep_device *otx_epvf, uint16_t q_no);
};
/* OTX_EP EP VF device data structure */
@@ -498,7 +501,7 @@ struct otx_ep_buf_free_info {
struct otx_ep_gather g;
};
-#define OTX_EP_MAX_PKT_SZ 64000U
+#define OTX_EP_MAX_PKT_SZ 65498U
#define OTX_EP_MAX_MAC_ADDRS 1
#define OTX_EP_SG_ALIGN 8
#define OTX_EP_CLEAR_ISIZE_BSIZE 0x7FFFFFULL
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v2 05/10] net/octeon_ep: support ISM
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
` (3 preceding siblings ...)
2023-04-05 14:25 ` [PATCH v2 04/10] net/octeon_ep: support IQ/OQ reset Sathesh Edara
@ 2023-04-05 14:25 ` Sathesh Edara
2023-04-21 4:56 ` Jerin Jacob
2023-04-05 14:25 ` [PATCH v2 06/10] net/octeon_ep: fix DMA incompletion Sathesh Edara
` (5 subsequent siblings)
10 siblings, 1 reply; 50+ messages in thread
From: Sathesh Edara @ 2023-04-05 14:25 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
This patch adds ISM specific functionality.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.c | 35 +++++++++++++++--
drivers/net/octeon_ep/cnxk_ep_vf.h | 12 ++++++
drivers/net/octeon_ep/otx2_ep_vf.c | 45 ++++++++++++++++++---
drivers/net/octeon_ep/otx2_ep_vf.h | 14 +++++++
drivers/net/octeon_ep/otx_ep_common.h | 16 ++++++++
drivers/net/octeon_ep/otx_ep_ethdev.c | 36 +++++++++++++++++
drivers/net/octeon_ep/otx_ep_rxtx.c | 56 +++++++++++++++++++++------
7 files changed, 194 insertions(+), 20 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 1a92887109..a437ae68cb 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -2,11 +2,12 @@
* Copyright(C) 2022 Marvell.
*/
+#include <inttypes.h>
#include <errno.h>
#include <rte_common.h>
#include <rte_cycles.h>
-
+#include <rte_memzone.h>
#include "cnxk_ep_vf.h"
static void
@@ -85,6 +86,7 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
int loop = OTX_EP_BUSY_LOOP_COUNT;
volatile uint64_t reg_val = 0ull;
+ uint64_t ism_addr;
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(iq_no));
@@ -132,6 +134,19 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + CNXK_EP_R_IN_INT_LEVELS(iq_no));
+ /* Set up IQ ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | CNXK_EP_ISM_EN
+ | CNXK_EP_ISM_MSIX_DIS)
+ + CNXK_EP_IQ_ISM_OFFSET(iq_no);
+ rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_IN_CNTS_ISM(iq_no));
+ iq->inst_cnt_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + CNXK_EP_IQ_ISM_OFFSET(iq_no));
+ otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
+ (void *)iq->inst_cnt_ism, ism_addr);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
return 0;
}
@@ -142,6 +157,7 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
uint64_t oq_ctl = 0ull;
int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
+ uint64_t ism_addr;
/* Wait on IDLE to set to 1, supposed to configure BADDR
* as long as IDLE is 0
@@ -201,9 +217,22 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
- loop = OTX_EP_BUSY_LOOP_COUNT;
+ /* Set up ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | CNXK_EP_ISM_EN
+ | CNXK_EP_ISM_MSIX_DIS)
+ + CNXK_EP_OQ_ISM_OFFSET(oq_no);
+ rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_OUT_CNTS_ISM(oq_no));
+ droq->pkts_sent_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + CNXK_EP_OQ_ISM_OFFSET(oq_no));
+ otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
+ oq_no, (void *)droq->pkts_sent_ism, ism_addr);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
- while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
+ loop = OTX_EP_BUSY_LOOP_COUNT;
+ while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
index aaa5774552..072b38ea15 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.h
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
@@ -27,6 +27,7 @@
#define CNXK_EP_R_IN_INT_LEVELS_START 0x10060
#define CNXK_EP_R_IN_PKT_CNT_START 0x10080
#define CNXK_EP_R_IN_BYTE_CNT_START 0x10090
+#define CNXK_EP_R_IN_CNTS_ISM_START 0x10520
#define CNXK_EP_R_IN_CONTROL(ring) \
(CNXK_EP_R_IN_CONTROL_START + ((ring) * CNXK_EP_RING_OFFSET))
@@ -55,6 +56,8 @@
#define CNXK_EP_R_IN_BYTE_CNT(ring) \
(CNXK_EP_R_IN_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
+#define CNXK_EP_R_IN_CNTS_ISM(ring) \
+ (CNXK_EP_R_IN_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
/** Rings per Virtual Function **/
#define CNXK_EP_R_IN_CTL_RPVF_MASK (0xF)
@@ -87,6 +90,7 @@
#define CNXK_EP_R_OUT_ENABLE_START 0x10170
#define CNXK_EP_R_OUT_PKT_CNT_START 0x10180
#define CNXK_EP_R_OUT_BYTE_CNT_START 0x10190
+#define CNXK_EP_R_OUT_CNTS_ISM_START 0x10510
#define CNXK_EP_R_OUT_CNTS(ring) \
(CNXK_EP_R_OUT_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
@@ -118,6 +122,9 @@
#define CNXK_EP_R_OUT_BYTE_CNT(ring) \
(CNXK_EP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
+#define CNXK_EP_R_OUT_CNTS_ISM(ring) \
+ (CNXK_EP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
+
/*------------------ R_OUT Masks ----------------*/
#define CNXK_EP_R_OUT_INT_LEVELS_BMODE (1ULL << 63)
#define CNXK_EP_R_OUT_INT_LEVELS_TIMET (32)
@@ -161,4 +168,9 @@ struct cnxk_ep_instr_64B {
uint64_t exhdr[4];
};
+#define CNXK_EP_IQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue) + 4)
+#define CNXK_EP_OQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue))
+#define CNXK_EP_ISM_EN (0x1)
+#define CNXK_EP_ISM_MSIX_DIS (0x2)
+
#endif /*_CNXK_EP_VF_H_ */
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 3e4895862b..ced3a415a5 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -6,6 +6,7 @@
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_memzone.h>
#include "otx_ep_common.h"
#include "otx2_ep_vf.h"
@@ -236,6 +237,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
volatile uint64_t reg_val = 0ull;
+ uint64_t ism_addr;
int loop = SDP_VF_BUSY_LOOP_COUNT;
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(iq_no));
@@ -282,6 +284,22 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
+
+ /* Set up IQ ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | OTX2_EP_ISM_EN
+ | OTX2_EP_ISM_MSIX_DIS)
+ + OTX2_EP_IQ_ISM_OFFSET(iq_no);
+ oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ SDP_VF_R_IN_CNTS_ISM(iq_no));
+ iq->inst_cnt_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + OTX2_EP_IQ_ISM_OFFSET(iq_no));
+ otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%x", iq_no,
+ (void *)iq->inst_cnt_ism,
+ (unsigned int)ism_addr);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
+
return 0;
}
@@ -290,6 +308,7 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
+ uint64_t ism_addr;
int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
@@ -351,18 +370,32 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
- loop = OTX_EP_BUSY_LOOP_COUNT;
+ /* Set up ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | OTX2_EP_ISM_EN
+ | OTX2_EP_ISM_MSIX_DIS)
+ + OTX2_EP_OQ_ISM_OFFSET(oq_no);
+ oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ SDP_VF_R_OUT_CNTS_ISM(oq_no));
+ droq->pkts_sent_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + OTX2_EP_OQ_ISM_OFFSET(oq_no));
+ otx_ep_err("SDP_R[%d] OQ ISM virt: %p, dma: 0x%x", oq_no,
+ (void *)droq->pkts_sent_ism,
+ (unsigned int)ism_addr);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
}
-
- if (loop < 0) {
- otx_ep_err("Packets sent register value is not cleared\n");
+ if (loop < 0)
return -EIO;
- }
- otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
+ otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no,
+ rte_read32(droq->pkts_sent_reg));
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.h b/drivers/net/octeon_ep/otx2_ep_vf.h
index 36c0b25dea..7c799475ab 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.h
+++ b/drivers/net/octeon_ep/otx2_ep_vf.h
@@ -42,6 +42,7 @@
#define SDP_VF_R_IN_INT_LEVELS_START (0x10060)
#define SDP_VF_R_IN_PKT_CNT_START (0x10080)
#define SDP_VF_R_IN_BYTE_CNT_START (0x10090)
+#define SDP_VF_R_IN_CNTS_ISM_START (0x10520)
#define SDP_VF_R_IN_CONTROL(ring) \
(SDP_VF_R_IN_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
@@ -70,6 +71,9 @@
#define SDP_VF_R_IN_BYTE_CNT(ring) \
(SDP_VF_R_IN_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+#define SDP_VF_R_IN_CNTS_ISM(ring) \
+ (SDP_VF_R_IN_CNTS_ISM_START + (SDP_VF_RING_OFFSET * (ring)))
+
/* SDP VF OQ Registers */
#define SDP_VF_R_OUT_CNTS_START (0x10100)
#define SDP_VF_R_OUT_INT_LEVELS_START (0x10110)
@@ -80,6 +84,7 @@
#define SDP_VF_R_OUT_ENABLE_START (0x10160)
#define SDP_VF_R_OUT_PKT_CNT_START (0x10180)
#define SDP_VF_R_OUT_BYTE_CNT_START (0x10190)
+#define SDP_VF_R_OUT_CNTS_ISM_START (0x10510)
#define SDP_VF_R_OUT_CONTROL(ring) \
(SDP_VF_R_OUT_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
@@ -108,6 +113,9 @@
#define SDP_VF_R_OUT_BYTE_CNT(ring) \
(SDP_VF_R_OUT_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+#define SDP_VF_R_OUT_CNTS_ISM(ring) \
+ (SDP_VF_R_OUT_CNTS_ISM_START + (SDP_VF_RING_OFFSET * (ring)))
+
/* SDP VF IQ Masks */
#define SDP_VF_R_IN_CTL_RPVF_MASK (0xF)
#define SDP_VF_R_IN_CTL_RPVF_POS (48)
@@ -143,6 +151,12 @@ struct otx2_ep_instr_64B {
uint64_t exhdr[4];
};
+#define OTX2_EP_IQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue) + 4)
+#define OTX2_EP_OQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue))
+#define OTX2_EP_ISM_EN (0x1)
+#define OTX2_EP_ISM_MSIX_DIS (0x2)
+#define OTX2_EP_MAX_RX_PKT_LEN (16384)
+
union out_int_lvl_t {
uint64_t d64;
struct {
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index a3260d5243..76528ed49d 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -185,6 +185,9 @@ struct otx_ep_instr_queue {
*/
uint32_t flush_index;
+ /* Free-running/wrapping instruction counter for IQ. */
+ uint32_t inst_cnt;
+
/* This keeps track of the instructions pending in this queue. */
uint64_t instr_pending;
@@ -211,6 +214,12 @@ struct otx_ep_instr_queue {
/* Memory zone */
const struct rte_memzone *iq_mz;
+
+ /* Location in memory updated by SDP ISM */
+ uint32_t *inst_cnt_ism;
+
+ /* track inst count locally to consolidate HW counter updates */
+ uint32_t inst_cnt_ism_prev;
};
/** Descriptor format.
@@ -355,6 +364,10 @@ struct otx_ep_droq {
const struct rte_memzone *desc_ring_mz;
const struct rte_memzone *info_mz;
+
+ /* Pointer to host memory copy of output packet count, set by ISM */
+ uint32_t *pkts_sent_ism;
+ uint32_t pkts_sent_ism_prev;
};
#define OTX_EP_DROQ_SIZE (sizeof(struct otx_ep_droq))
@@ -459,6 +472,9 @@ struct otx_ep_device {
uint64_t rx_offloads;
uint64_t tx_offloads;
+
+ /* DMA buffer for SDP ISM messages */
+ const struct rte_memzone *ism_buffer_mz;
};
int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 5677a2d6a6..30a7a450fb 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -2,6 +2,7 @@
* Copyright(C) 2021 Marvell.
*/
+#include <inttypes.h>
#include <ethdev_pci.h>
#include "otx_ep_common.h"
@@ -90,6 +91,32 @@ otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
return 0;
}
+/*
+ * We only need 2 uint32_t locations per IOQ, but separate these so
+ * each IOQ has the variables on its own cache line.
+ */
+#define OTX_EP_ISM_BUFFER_SIZE (OTX_EP_MAX_IOQS_PER_VF * RTE_CACHE_LINE_SIZE)
+static int
+otx_ep_ism_setup(struct otx_ep_device *otx_epvf)
+{
+ otx_epvf->ism_buffer_mz =
+ rte_eth_dma_zone_reserve(otx_epvf->eth_dev, "ism",
+ 0, OTX_EP_ISM_BUFFER_SIZE,
+ OTX_EP_PCI_RING_ALIGN, 0);
+
+ /* Same DMA buffer is shared by OQ and IQ, clear it at start */
+ memset(otx_epvf->ism_buffer_mz->addr, 0, OTX_EP_ISM_BUFFER_SIZE);
+ if (otx_epvf->ism_buffer_mz == NULL) {
+ otx_ep_err("Failed to allocate ISM buffer\n");
+ return(-1);
+ }
+ otx_ep_dbg("ISM: virt: 0x%p, dma: 0x%" PRIX64,
+ (void *)otx_epvf->ism_buffer_mz->addr,
+ otx_epvf->ism_buffer_mz->iova);
+
+ return 0;
+}
+
static int
otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
{
@@ -110,6 +137,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
otx_epvf->chip_id = dev_id;
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
+ if (otx_ep_ism_setup(otx_epvf))
+ ret = -EINVAL;
break;
case PCI_DEVID_CN10KA_EP_NET_VF:
case PCI_DEVID_CN10KB_EP_NET_VF:
@@ -118,6 +147,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
otx_epvf->chip_id = dev_id;
ret = cnxk_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
+ if (otx_ep_ism_setup(otx_epvf))
+ ret = -EINVAL;
break;
default:
otx_ep_err("Unsupported device\n");
@@ -434,6 +465,11 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
}
otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
+ if (rte_eth_dma_zone_free(eth_dev, "ism", 0)) {
+ otx_ep_err("Failed to delete ISM buffer\n");
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 9712e6cce6..c4153bd583 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -20,6 +20,7 @@
#define OTX_EP_INFO_SIZE 8
#define OTX_EP_FSZ_FS0 0
#define DROQ_REFILL_THRESHOLD 16
+#define OTX2_SDP_REQUEST_ISM (0x1ULL << 63)
static void
otx_ep_dmazone_free(const struct rte_memzone *mz)
@@ -412,15 +413,32 @@ otx_ep_iqreq_add(struct otx_ep_instr_queue *iq, void *buf,
static uint32_t
otx_vf_update_read_index(struct otx_ep_instr_queue *iq)
{
- uint32_t new_idx = rte_read32(iq->inst_cnt_reg);
- if (unlikely(new_idx == 0xFFFFFFFFU))
- rte_write32(new_idx, iq->inst_cnt_reg);
+ uint32_t val;
+
+ /*
+ * Batch subtractions from the HW counter to reduce PCIe traffic
+ * This adds an extra local variable, but almost halves the
+ * number of PCIe writes.
+ */
+ val = *iq->inst_cnt_ism;
+ iq->inst_cnt += val - iq->inst_cnt_ism_prev;
+ iq->inst_cnt_ism_prev = val;
+
+ if (val > (uint32_t)(1 << 31)) {
+ /*
+ * Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write32(val, iq->inst_cnt_reg);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
+ }
+ rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
+
/* Modulo of the new index with the IQ size will give us
* the new index.
*/
- new_idx &= (iq->nb_desc - 1);
-
- return new_idx;
+ return iq->inst_cnt & (iq->nb_desc - 1);
}
static void
@@ -962,14 +980,30 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
static inline uint32_t
otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
{
- volatile uint64_t pkt_count;
uint32_t new_pkts;
+ uint32_t val;
+
+ /*
+ * Batch subtractions from the HW counter to reduce PCIe traffic
+ * This adds an extra local variable, but almost halves the
+ * number of PCIe writes.
+ */
+ val = *droq->pkts_sent_ism;
+ new_pkts = val - droq->pkts_sent_ism_prev;
+ droq->pkts_sent_ism_prev = val;
- /* Latest available OQ packets */
- pkt_count = rte_read32(droq->pkts_sent_reg);
- rte_write32(pkt_count, droq->pkts_sent_reg);
- new_pkts = pkt_count;
+ if (val > (uint32_t)(1 << 31)) {
+ /*
+ * Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write32(val, droq->pkts_sent_reg);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
+ }
+ rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
droq->pkts_pending += new_pkts;
+
return new_pkts;
}
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* Re: [PATCH v2 05/10] net/octeon_ep: support ISM
2023-04-05 14:25 ` [PATCH v2 05/10] net/octeon_ep: support ISM Sathesh Edara
@ 2023-04-21 4:56 ` Jerin Jacob
0 siblings, 0 replies; 50+ messages in thread
From: Jerin Jacob @ 2023-04-21 4:56 UTC (permalink / raw)
To: Sathesh Edara
Cc: sburla, jerinj, Radha Mohan Chintakuntla, Veerasenareddy Burru, dev
On Wed, Apr 5, 2023 at 7:56 PM Sathesh Edara <sedara@marvell.com> wrote:
>
> This patch adds ISM specific functionality.
See following commit as reference, and update new acronyms like ISM
and others at devtools/words-case.txt
commit 33c942d19260817502b49403f0baaab6113774b2
Author: Ashwin Sekhar T K <asekhar@marvell.com>
Date: Fri Sep 17 16:28:39 2021 +0530
devtools: add Marvell acronyms for commit checks
Update word list with Marvell specific acronyms.
CPT -> Cryptographic Accelerator Unit
CQ -> Completion Queue
LBK -> Loopback Interface Unit
LMT -> Large Atomic Store Unit
MCAM -> Match Content Addressable Memory
NIX -> Network Interface Controller Unit
NPA -> Network Pool Allocator
NPC -> Network Parser and CAM Unit
ROC -> Rest Of Chip
RQ -> Receive Queue
RVU -> Resource Virtualization Unit
SQ -> Send Queue
SSO -> Schedule Synchronize Order Unit
TIM -> Timer Unit
Suggested-by: Ferruh Yigit <ferruh.yigit@intel.com>
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
Reviewed-by: Jerin Jacob <jerinj@marvell.com>
>
> Signed-off-by: Sathesh Edara <sedara@marvell.com>
> ---
> drivers/net/octeon_ep/cnxk_ep_vf.c | 35 +++++++++++++++--
> drivers/net/octeon_ep/cnxk_ep_vf.h | 12 ++++++
> drivers/net/octeon_ep/otx2_ep_vf.c | 45 ++++++++++++++++++---
> drivers/net/octeon_ep/otx2_ep_vf.h | 14 +++++++
> drivers/net/octeon_ep/otx_ep_common.h | 16 ++++++++
> drivers/net/octeon_ep/otx_ep_ethdev.c | 36 +++++++++++++++++
> drivers/net/octeon_ep/otx_ep_rxtx.c | 56 +++++++++++++++++++++------
> 7 files changed, 194 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
> index 1a92887109..a437ae68cb 100644
> --- a/drivers/net/octeon_ep/cnxk_ep_vf.c
> +++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
> @@ -2,11 +2,12 @@
> * Copyright(C) 2022 Marvell.
> */
>
> +#include <inttypes.h>
> #include <errno.h>
>
> #include <rte_common.h>
> #include <rte_cycles.h>
> -
> +#include <rte_memzone.h>
> #include "cnxk_ep_vf.h"
>
> static void
> @@ -85,6 +86,7 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
> struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
> int loop = OTX_EP_BUSY_LOOP_COUNT;
> volatile uint64_t reg_val = 0ull;
> + uint64_t ism_addr;
>
> reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(iq_no));
>
> @@ -132,6 +134,19 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
> */
> oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
> otx_ep->hw_addr + CNXK_EP_R_IN_INT_LEVELS(iq_no));
> + /* Set up IQ ISM registers and structures */
> + ism_addr = (otx_ep->ism_buffer_mz->iova | CNXK_EP_ISM_EN
> + | CNXK_EP_ISM_MSIX_DIS)
> + + CNXK_EP_IQ_ISM_OFFSET(iq_no);
> + rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
> + CNXK_EP_R_IN_CNTS_ISM(iq_no));
> + iq->inst_cnt_ism =
> + (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
> + + CNXK_EP_IQ_ISM_OFFSET(iq_no));
> + otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
> + (void *)iq->inst_cnt_ism, ism_addr);
> + *iq->inst_cnt_ism = 0;
> + iq->inst_cnt_ism_prev = 0;
> return 0;
> }
>
> @@ -142,6 +157,7 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
> uint64_t oq_ctl = 0ull;
> int loop = OTX_EP_BUSY_LOOP_COUNT;
> struct otx_ep_droq *droq = otx_ep->droq[oq_no];
> + uint64_t ism_addr;
>
> /* Wait on IDLE to set to 1, supposed to configure BADDR
> * as long as IDLE is 0
> @@ -201,9 +217,22 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
> rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
>
> otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
> - loop = OTX_EP_BUSY_LOOP_COUNT;
> + /* Set up ISM registers and structures */
> + ism_addr = (otx_ep->ism_buffer_mz->iova | CNXK_EP_ISM_EN
> + | CNXK_EP_ISM_MSIX_DIS)
> + + CNXK_EP_OQ_ISM_OFFSET(oq_no);
> + rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
> + CNXK_EP_R_OUT_CNTS_ISM(oq_no));
> + droq->pkts_sent_ism =
> + (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
> + + CNXK_EP_OQ_ISM_OFFSET(oq_no));
> + otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
> + oq_no, (void *)droq->pkts_sent_ism, ism_addr);
> + *droq->pkts_sent_ism = 0;
> + droq->pkts_sent_ism_prev = 0;
>
> - while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
> + loop = OTX_EP_BUSY_LOOP_COUNT;
> + while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
> reg_val = rte_read32(droq->pkts_sent_reg);
> rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
> rte_delay_ms(1);
> diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
> index aaa5774552..072b38ea15 100644
> --- a/drivers/net/octeon_ep/cnxk_ep_vf.h
> +++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
> @@ -27,6 +27,7 @@
> #define CNXK_EP_R_IN_INT_LEVELS_START 0x10060
> #define CNXK_EP_R_IN_PKT_CNT_START 0x10080
> #define CNXK_EP_R_IN_BYTE_CNT_START 0x10090
> +#define CNXK_EP_R_IN_CNTS_ISM_START 0x10520
>
> #define CNXK_EP_R_IN_CONTROL(ring) \
> (CNXK_EP_R_IN_CONTROL_START + ((ring) * CNXK_EP_RING_OFFSET))
> @@ -55,6 +56,8 @@
> #define CNXK_EP_R_IN_BYTE_CNT(ring) \
> (CNXK_EP_R_IN_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
>
> +#define CNXK_EP_R_IN_CNTS_ISM(ring) \
> + (CNXK_EP_R_IN_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
>
> /** Rings per Virtual Function **/
> #define CNXK_EP_R_IN_CTL_RPVF_MASK (0xF)
> @@ -87,6 +90,7 @@
> #define CNXK_EP_R_OUT_ENABLE_START 0x10170
> #define CNXK_EP_R_OUT_PKT_CNT_START 0x10180
> #define CNXK_EP_R_OUT_BYTE_CNT_START 0x10190
> +#define CNXK_EP_R_OUT_CNTS_ISM_START 0x10510
>
> #define CNXK_EP_R_OUT_CNTS(ring) \
> (CNXK_EP_R_OUT_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
> @@ -118,6 +122,9 @@
> #define CNXK_EP_R_OUT_BYTE_CNT(ring) \
> (CNXK_EP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
>
> +#define CNXK_EP_R_OUT_CNTS_ISM(ring) \
> + (CNXK_EP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
> +
> /*------------------ R_OUT Masks ----------------*/
> #define CNXK_EP_R_OUT_INT_LEVELS_BMODE (1ULL << 63)
> #define CNXK_EP_R_OUT_INT_LEVELS_TIMET (32)
> @@ -161,4 +168,9 @@ struct cnxk_ep_instr_64B {
> uint64_t exhdr[4];
> };
>
> +#define CNXK_EP_IQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue) + 4)
> +#define CNXK_EP_OQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue))
> +#define CNXK_EP_ISM_EN (0x1)
> +#define CNXK_EP_ISM_MSIX_DIS (0x2)
> +
> #endif /*_CNXK_EP_VF_H_ */
> diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
> index 3e4895862b..ced3a415a5 100644
> --- a/drivers/net/octeon_ep/otx2_ep_vf.c
> +++ b/drivers/net/octeon_ep/otx2_ep_vf.c
> @@ -6,6 +6,7 @@
>
> #include <rte_common.h>
> #include <rte_cycles.h>
> +#include <rte_memzone.h>
> #include "otx_ep_common.h"
> #include "otx2_ep_vf.h"
>
> @@ -236,6 +237,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
> {
> struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
> volatile uint64_t reg_val = 0ull;
> + uint64_t ism_addr;
> int loop = SDP_VF_BUSY_LOOP_COUNT;
>
> reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(iq_no));
> @@ -282,6 +284,22 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
> */
> oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
> otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
> +
> + /* Set up IQ ISM registers and structures */
> + ism_addr = (otx_ep->ism_buffer_mz->iova | OTX2_EP_ISM_EN
> + | OTX2_EP_ISM_MSIX_DIS)
> + + OTX2_EP_IQ_ISM_OFFSET(iq_no);
> + oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
> + SDP_VF_R_IN_CNTS_ISM(iq_no));
> + iq->inst_cnt_ism =
> + (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
> + + OTX2_EP_IQ_ISM_OFFSET(iq_no));
> + otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%x", iq_no,
> + (void *)iq->inst_cnt_ism,
> + (unsigned int)ism_addr);
> + *iq->inst_cnt_ism = 0;
> + iq->inst_cnt_ism_prev = 0;
> +
> return 0;
> }
>
> @@ -290,6 +308,7 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
> {
> volatile uint64_t reg_val = 0ull;
> uint64_t oq_ctl = 0ull;
> + uint64_t ism_addr;
> int loop = OTX_EP_BUSY_LOOP_COUNT;
> struct otx_ep_droq *droq = otx_ep->droq[oq_no];
>
> @@ -351,18 +370,32 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
>
> otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
>
> - loop = OTX_EP_BUSY_LOOP_COUNT;
> + /* Set up ISM registers and structures */
> + ism_addr = (otx_ep->ism_buffer_mz->iova | OTX2_EP_ISM_EN
> + | OTX2_EP_ISM_MSIX_DIS)
> + + OTX2_EP_OQ_ISM_OFFSET(oq_no);
> + oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
> + SDP_VF_R_OUT_CNTS_ISM(oq_no));
> + droq->pkts_sent_ism =
> + (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
> + + OTX2_EP_OQ_ISM_OFFSET(oq_no));
> + otx_ep_err("SDP_R[%d] OQ ISM virt: %p, dma: 0x%x", oq_no,
> + (void *)droq->pkts_sent_ism,
> + (unsigned int)ism_addr);
> + *droq->pkts_sent_ism = 0;
> + droq->pkts_sent_ism_prev = 0;
> +
> + loop = SDP_VF_BUSY_LOOP_COUNT;
> while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
> reg_val = rte_read32(droq->pkts_sent_reg);
> rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
> rte_delay_ms(1);
> }
> -
> - if (loop < 0) {
> - otx_ep_err("Packets sent register value is not cleared\n");
> + if (loop < 0)
> return -EIO;
> - }
> - otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
> + otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no,
> + rte_read32(droq->pkts_sent_reg));
> +
> return 0;
> }
>
> diff --git a/drivers/net/octeon_ep/otx2_ep_vf.h b/drivers/net/octeon_ep/otx2_ep_vf.h
> index 36c0b25dea..7c799475ab 100644
> --- a/drivers/net/octeon_ep/otx2_ep_vf.h
> +++ b/drivers/net/octeon_ep/otx2_ep_vf.h
> @@ -42,6 +42,7 @@
> #define SDP_VF_R_IN_INT_LEVELS_START (0x10060)
> #define SDP_VF_R_IN_PKT_CNT_START (0x10080)
> #define SDP_VF_R_IN_BYTE_CNT_START (0x10090)
> +#define SDP_VF_R_IN_CNTS_ISM_START (0x10520)
>
> #define SDP_VF_R_IN_CONTROL(ring) \
> (SDP_VF_R_IN_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
> @@ -70,6 +71,9 @@
> #define SDP_VF_R_IN_BYTE_CNT(ring) \
> (SDP_VF_R_IN_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
>
> +#define SDP_VF_R_IN_CNTS_ISM(ring) \
> + (SDP_VF_R_IN_CNTS_ISM_START + (SDP_VF_RING_OFFSET * (ring)))
> +
> /* SDP VF OQ Registers */
> #define SDP_VF_R_OUT_CNTS_START (0x10100)
> #define SDP_VF_R_OUT_INT_LEVELS_START (0x10110)
> @@ -80,6 +84,7 @@
> #define SDP_VF_R_OUT_ENABLE_START (0x10160)
> #define SDP_VF_R_OUT_PKT_CNT_START (0x10180)
> #define SDP_VF_R_OUT_BYTE_CNT_START (0x10190)
> +#define SDP_VF_R_OUT_CNTS_ISM_START (0x10510)
>
> #define SDP_VF_R_OUT_CONTROL(ring) \
> (SDP_VF_R_OUT_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
> @@ -108,6 +113,9 @@
> #define SDP_VF_R_OUT_BYTE_CNT(ring) \
> (SDP_VF_R_OUT_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
>
> +#define SDP_VF_R_OUT_CNTS_ISM(ring) \
> + (SDP_VF_R_OUT_CNTS_ISM_START + (SDP_VF_RING_OFFSET * (ring)))
> +
> /* SDP VF IQ Masks */
> #define SDP_VF_R_IN_CTL_RPVF_MASK (0xF)
> #define SDP_VF_R_IN_CTL_RPVF_POS (48)
> @@ -143,6 +151,12 @@ struct otx2_ep_instr_64B {
> uint64_t exhdr[4];
> };
>
> +#define OTX2_EP_IQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue) + 4)
> +#define OTX2_EP_OQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue))
> +#define OTX2_EP_ISM_EN (0x1)
> +#define OTX2_EP_ISM_MSIX_DIS (0x2)
> +#define OTX2_EP_MAX_RX_PKT_LEN (16384)
> +
> union out_int_lvl_t {
> uint64_t d64;
> struct {
> diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
> index a3260d5243..76528ed49d 100644
> --- a/drivers/net/octeon_ep/otx_ep_common.h
> +++ b/drivers/net/octeon_ep/otx_ep_common.h
> @@ -185,6 +185,9 @@ struct otx_ep_instr_queue {
> */
> uint32_t flush_index;
>
> + /* Free-running/wrapping instruction counter for IQ. */
> + uint32_t inst_cnt;
> +
> /* This keeps track of the instructions pending in this queue. */
> uint64_t instr_pending;
>
> @@ -211,6 +214,12 @@ struct otx_ep_instr_queue {
>
> /* Memory zone */
> const struct rte_memzone *iq_mz;
> +
> + /* Location in memory updated by SDP ISM */
> + uint32_t *inst_cnt_ism;
> +
> + /* track inst count locally to consolidate HW counter updates */
> + uint32_t inst_cnt_ism_prev;
> };
>
> /** Descriptor format.
> @@ -355,6 +364,10 @@ struct otx_ep_droq {
> const struct rte_memzone *desc_ring_mz;
>
> const struct rte_memzone *info_mz;
> +
> + /* Pointer to host memory copy of output packet count, set by ISM */
> + uint32_t *pkts_sent_ism;
> + uint32_t pkts_sent_ism_prev;
> };
> #define OTX_EP_DROQ_SIZE (sizeof(struct otx_ep_droq))
>
> @@ -459,6 +472,9 @@ struct otx_ep_device {
> uint64_t rx_offloads;
>
> uint64_t tx_offloads;
> +
> + /* DMA buffer for SDP ISM messages */
> + const struct rte_memzone *ism_buffer_mz;
> };
>
> int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
> diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
> index 5677a2d6a6..30a7a450fb 100644
> --- a/drivers/net/octeon_ep/otx_ep_ethdev.c
> +++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
> @@ -2,6 +2,7 @@
> * Copyright(C) 2021 Marvell.
> */
>
> +#include <inttypes.h>
> #include <ethdev_pci.h>
>
> #include "otx_ep_common.h"
> @@ -90,6 +91,32 @@ otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
> return 0;
> }
>
> +/*
> + * We only need 2 uint32_t locations per IOQ, but separate these so
> + * each IOQ has the variables on its own cache line.
> + */
> +#define OTX_EP_ISM_BUFFER_SIZE (OTX_EP_MAX_IOQS_PER_VF * RTE_CACHE_LINE_SIZE)
> +static int
> +otx_ep_ism_setup(struct otx_ep_device *otx_epvf)
> +{
> + otx_epvf->ism_buffer_mz =
> + rte_eth_dma_zone_reserve(otx_epvf->eth_dev, "ism",
> + 0, OTX_EP_ISM_BUFFER_SIZE,
> + OTX_EP_PCI_RING_ALIGN, 0);
> +
> + /* Same DMA buffer is shared by OQ and IQ, clear it at start */
> + memset(otx_epvf->ism_buffer_mz->addr, 0, OTX_EP_ISM_BUFFER_SIZE);
> + if (otx_epvf->ism_buffer_mz == NULL) {
> + otx_ep_err("Failed to allocate ISM buffer\n");
> + return(-1);
> + }
> + otx_ep_dbg("ISM: virt: 0x%p, dma: 0x%" PRIX64,
> + (void *)otx_epvf->ism_buffer_mz->addr,
> + otx_epvf->ism_buffer_mz->iova);
> +
> + return 0;
> +}
> +
> static int
> otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
> {
> @@ -110,6 +137,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
> otx_epvf->chip_id = dev_id;
> ret = otx2_ep_vf_setup_device(otx_epvf);
> otx_epvf->fn_list.disable_io_queues(otx_epvf);
> + if (otx_ep_ism_setup(otx_epvf))
> + ret = -EINVAL;
> break;
> case PCI_DEVID_CN10KA_EP_NET_VF:
> case PCI_DEVID_CN10KB_EP_NET_VF:
> @@ -118,6 +147,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
> otx_epvf->chip_id = dev_id;
> ret = cnxk_ep_vf_setup_device(otx_epvf);
> otx_epvf->fn_list.disable_io_queues(otx_epvf);
> + if (otx_ep_ism_setup(otx_epvf))
> + ret = -EINVAL;
> break;
> default:
> otx_ep_err("Unsupported device\n");
> @@ -434,6 +465,11 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
> }
> otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
>
> + if (rte_eth_dma_zone_free(eth_dev, "ism", 0)) {
> + otx_ep_err("Failed to delete ISM buffer\n");
> + return -EINVAL;
> + }
> +
> return 0;
> }
>
> diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
> index 9712e6cce6..c4153bd583 100644
> --- a/drivers/net/octeon_ep/otx_ep_rxtx.c
> +++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
> @@ -20,6 +20,7 @@
> #define OTX_EP_INFO_SIZE 8
> #define OTX_EP_FSZ_FS0 0
> #define DROQ_REFILL_THRESHOLD 16
> +#define OTX2_SDP_REQUEST_ISM (0x1ULL << 63)
>
> static void
> otx_ep_dmazone_free(const struct rte_memzone *mz)
> @@ -412,15 +413,32 @@ otx_ep_iqreq_add(struct otx_ep_instr_queue *iq, void *buf,
> static uint32_t
> otx_vf_update_read_index(struct otx_ep_instr_queue *iq)
> {
> - uint32_t new_idx = rte_read32(iq->inst_cnt_reg);
> - if (unlikely(new_idx == 0xFFFFFFFFU))
> - rte_write32(new_idx, iq->inst_cnt_reg);
> + uint32_t val;
> +
> + /*
> + * Batch subtractions from the HW counter to reduce PCIe traffic
> + * This adds an extra local variable, but almost halves the
> + * number of PCIe writes.
> + */
> + val = *iq->inst_cnt_ism;
> + iq->inst_cnt += val - iq->inst_cnt_ism_prev;
> + iq->inst_cnt_ism_prev = val;
> +
> + if (val > (uint32_t)(1 << 31)) {
> + /*
> + * Only subtract the packet count in the HW counter
> + * when count above halfway to saturation.
> + */
> + rte_write32(val, iq->inst_cnt_reg);
> + *iq->inst_cnt_ism = 0;
> + iq->inst_cnt_ism_prev = 0;
> + }
> + rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
> +
> /* Modulo of the new index with the IQ size will give us
> * the new index.
> */
> - new_idx &= (iq->nb_desc - 1);
> -
> - return new_idx;
> + return iq->inst_cnt & (iq->nb_desc - 1);
> }
>
> static void
> @@ -962,14 +980,30 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
> static inline uint32_t
> otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
> {
> - volatile uint64_t pkt_count;
> uint32_t new_pkts;
> + uint32_t val;
> +
> + /*
> + * Batch subtractions from the HW counter to reduce PCIe traffic
> + * This adds an extra local variable, but almost halves the
> + * number of PCIe writes.
> + */
> + val = *droq->pkts_sent_ism;
> + new_pkts = val - droq->pkts_sent_ism_prev;
> + droq->pkts_sent_ism_prev = val;
>
> - /* Latest available OQ packets */
> - pkt_count = rte_read32(droq->pkts_sent_reg);
> - rte_write32(pkt_count, droq->pkts_sent_reg);
> - new_pkts = pkt_count;
> + if (val > (uint32_t)(1 << 31)) {
> + /*
> + * Only subtract the packet count in the HW counter
> + * when count above halfway to saturation.
> + */
> + rte_write32(val, droq->pkts_sent_reg);
> + *droq->pkts_sent_ism = 0;
> + droq->pkts_sent_ism_prev = 0;
> + }
> + rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
> droq->pkts_pending += new_pkts;
> +
> return new_pkts;
> }
>
> --
> 2.31.1
>
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v2 06/10] net/octeon_ep: fix DMA incompletion
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
` (4 preceding siblings ...)
2023-04-05 14:25 ` [PATCH v2 05/10] net/octeon_ep: support ISM Sathesh Edara
@ 2023-04-05 14:25 ` Sathesh Edara
2023-04-21 4:51 ` Jerin Jacob
2023-04-05 14:25 ` [PATCH v2 07/10] net/octeon_ep: update queue size checks Sathesh Edara
` (4 subsequent siblings)
10 siblings, 1 reply; 50+ messages in thread
From: Sathesh Edara @ 2023-04-05 14:25 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
This patch fixes the DMA incompletion
during packet reads.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx_ep_common.h | 8 ++++++++
drivers/net/octeon_ep/otx_ep_rxtx.c | 4 ++++
2 files changed, 12 insertions(+)
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 76528ed49d..1d9da5954e 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -345,6 +345,14 @@ struct otx_ep_droq {
*/
void *pkts_sent_reg;
+ /** Fix for DMA incompletion during pkt reads.
+ * This variable is used to initiate a sent_reg_read
+ * that completes pending dma
+ * this variable is used as lvalue so compiler cannot optimize
+ * the reads.
+ */
+ uint32_t sent_reg_val;
+
/* Statistics for this DROQ. */
struct otx_ep_droq_stats stats;
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index c4153bd583..ca968f6fe7 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -917,6 +917,10 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
struct rte_mbuf *first_buf = NULL;
struct rte_mbuf *last_buf = NULL;
+ /* csr read helps to flush pending dma */
+ droq->sent_reg_val = rte_read32(droq->pkts_sent_reg);
+ rte_rmb();
+
while (pkt_len < total_pkt_len) {
int cpy_len = 0;
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v2 07/10] net/octeon_ep: update queue size checks
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
` (5 preceding siblings ...)
2023-04-05 14:25 ` [PATCH v2 06/10] net/octeon_ep: fix DMA incompletion Sathesh Edara
@ 2023-04-05 14:25 ` Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 08/10] net/octeon_ep: support Mailbox between VF and PF Sathesh Edara
` (3 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-05 14:25 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
This patch updates the output queue size checks
to ensure that queue is larger than backpressure
watermark.Add setting of default queue sizes to
the minimum so that applications like testpmd
can be started without explicit queue size
arguments.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx_ep_common.h | 9 +++++++--
drivers/net/octeon_ep/otx_ep_ethdev.c | 12 ++++++++++--
drivers/net/octeon_ep/otx_ep_rxtx.h | 4 ++--
3 files changed, 19 insertions(+), 6 deletions(-)
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 1d9da5954e..3beec71968 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -11,8 +11,13 @@
#define OTX_EP_MAX_RINGS_PER_VF (8)
#define OTX_EP_CFG_IO_QUEUES OTX_EP_MAX_RINGS_PER_VF
#define OTX_EP_64BYTE_INSTR (64)
-#define OTX_EP_MIN_IQ_DESCRIPTORS (128)
-#define OTX_EP_MIN_OQ_DESCRIPTORS (128)
+/*
+ * Backpressure for SDP is configured on Octeon, and the minimum queue sizes
+ * must be much larger than the backpressure watermark configured in the Octeon
+ * SDP driver. IQ and OQ backpressure configurations are separate.
+ */
+#define OTX_EP_MIN_IQ_DESCRIPTORS (2048)
+#define OTX_EP_MIN_OQ_DESCRIPTORS (2048)
#define OTX_EP_MAX_IQ_DESCRIPTORS (8192)
#define OTX_EP_MAX_OQ_DESCRIPTORS (8192)
#define OTX_EP_OQ_BUF_SIZE (2048)
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 30a7a450fb..0f710b1ffa 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -48,6 +48,9 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
+ devinfo->default_rxportconf.ring_size = OTX_EP_MIN_OQ_DESCRIPTORS;
+ devinfo->default_txportconf.ring_size = OTX_EP_MIN_IQ_DESCRIPTORS;
+
return 0;
}
@@ -274,8 +277,8 @@ otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
return -EINVAL;
}
if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
- otx_ep_err("Invalid rx desc number should at least be greater than 8xwmark %u\n",
- num_rx_descs);
+ otx_ep_err("Invalid rx desc number(%u) should at least be greater than 8xwmark %u\n",
+ num_rx_descs, (SDP_GBL_WMARK * 8));
return -EINVAL;
}
@@ -357,6 +360,11 @@ otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
num_tx_descs);
return -EINVAL;
}
+ if (num_tx_descs < (SDP_GBL_WMARK * 8)) {
+ otx_ep_err("Invalid tx desc number(%u) should at least be greater than 8*wmark(%u)\n",
+ num_tx_descs, (SDP_GBL_WMARK * 8));
+ return -EINVAL;
+ }
retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.h b/drivers/net/octeon_ep/otx_ep_rxtx.h
index 1527d350b5..7012888100 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.h
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.h
@@ -7,8 +7,8 @@
#include <rte_byteorder.h>
-#define OTX_EP_RXD_ALIGN 1
-#define OTX_EP_TXD_ALIGN 1
+#define OTX_EP_RXD_ALIGN 2
+#define OTX_EP_TXD_ALIGN 2
#define OTX_EP_IQ_SEND_FAILED (-1)
#define OTX_EP_IQ_SEND_SUCCESS (0)
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v2 08/10] net/octeon_ep: support Mailbox between VF and PF
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
` (6 preceding siblings ...)
2023-04-05 14:25 ` [PATCH v2 07/10] net/octeon_ep: update queue size checks Sathesh Edara
@ 2023-04-05 14:25 ` Sathesh Edara
2023-04-21 5:00 ` Jerin Jacob
2023-04-05 14:25 ` [PATCH v2 09/10] net/octeon_ep: set watermark for output queues Sathesh Edara
` (2 subsequent siblings)
10 siblings, 1 reply; 50+ messages in thread
From: Sathesh Edara @ 2023-04-05 14:25 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
This patch adds the mailbox communication between
VF and PF and supports the following mailbox
messages.
- Get and set MAC address
- Get link information
- Get stats
- Get and set link status
- Set and get MTU
- Send notification to PF
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.c | 1 +
drivers/net/octeon_ep/cnxk_ep_vf.h | 12 +-
drivers/net/octeon_ep/meson.build | 1 +
drivers/net/octeon_ep/otx_ep_common.h | 26 +++
drivers/net/octeon_ep/otx_ep_ethdev.c | 143 +++++++++++-
drivers/net/octeon_ep/otx_ep_mbox.c | 309 ++++++++++++++++++++++++++
drivers/net/octeon_ep/otx_ep_mbox.h | 163 ++++++++++++++
7 files changed, 642 insertions(+), 13 deletions(-)
create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.c
create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.h
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index a437ae68cb..cadb4ecbf9 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -8,6 +8,7 @@
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_memzone.h>
+#include "otx_ep_common.h"
#include "cnxk_ep_vf.h"
static void
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
index 072b38ea15..86277449ea 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.h
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
@@ -5,7 +5,7 @@
#define _CNXK_EP_VF_H_
#include <rte_io.h>
-#include "otx_ep_common.h"
+
#define CNXK_CONFIG_XPANSION_BAR 0x38
#define CNXK_CONFIG_PCIE_CAP 0x70
#define CNXK_CONFIG_PCIE_DEVCAP 0x74
@@ -92,6 +92,10 @@
#define CNXK_EP_R_OUT_BYTE_CNT_START 0x10190
#define CNXK_EP_R_OUT_CNTS_ISM_START 0x10510
+#define CNXK_EP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CNXK_EP_R_MBOX_VF_PF_DATA_START 0x10230
+#define CNXK_EP_R_MBOX_PF_VF_INT_START 0x10220
+
#define CNXK_EP_R_OUT_CNTS(ring) \
(CNXK_EP_R_OUT_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
@@ -125,6 +129,12 @@
#define CNXK_EP_R_OUT_CNTS_ISM(ring) \
(CNXK_EP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
+#define CNXK_EP_R_MBOX_VF_PF_DATA(ring) \
+ (CNXK_EP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_EP_RING_OFFSET))
+
+#define CNXK_EP_R_MBOX_PF_VF_INT(ring) \
+ (CNXK_EP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_EP_RING_OFFSET))
+
/*------------------ R_OUT Masks ----------------*/
#define CNXK_EP_R_OUT_INT_LEVELS_BMODE (1ULL << 63)
#define CNXK_EP_R_OUT_INT_LEVELS_TIMET (32)
diff --git a/drivers/net/octeon_ep/meson.build b/drivers/net/octeon_ep/meson.build
index a267b60290..e698bf9792 100644
--- a/drivers/net/octeon_ep/meson.build
+++ b/drivers/net/octeon_ep/meson.build
@@ -8,4 +8,5 @@ sources = files(
'otx_ep_vf.c',
'otx2_ep_vf.c',
'cnxk_ep_vf.c',
+ 'otx_ep_mbox.c',
)
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 3beec71968..0bf5454a39 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -4,6 +4,7 @@
#ifndef _OTX_EP_COMMON_H_
#define _OTX_EP_COMMON_H_
+#include <rte_spinlock.h>
#define OTX_EP_NW_PKT_OP 0x1220
#define OTX_EP_NW_CMD_OP 0x1221
@@ -67,6 +68,9 @@
#define oct_ep_read64(addr) rte_read64_relaxed((void *)(addr))
#define oct_ep_write64(val, addr) rte_write64_relaxed((val), (void *)(addr))
+/* Mailbox maximum data size */
+#define MBOX_MAX_DATA_BUF_SIZE 320
+
/* Input Request Header format */
union otx_ep_instr_irh {
uint64_t u64;
@@ -488,6 +492,18 @@ struct otx_ep_device {
/* DMA buffer for SDP ISM messages */
const struct rte_memzone *ism_buffer_mz;
+
+ /* Mailbox lock */
+ rte_spinlock_t mbox_lock;
+
+ /* Mailbox data */
+ uint8_t mbox_data_buf[MBOX_MAX_DATA_BUF_SIZE];
+
+ /* Mailbox data index */
+ int32_t mbox_data_index;
+
+ /* Mailbox receive message length */
+ int32_t mbox_rcv_message_len;
};
int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
@@ -541,6 +557,16 @@ struct otx_ep_buf_free_info {
#define OTX_EP_CLEAR_SLIST_DBELL 0xFFFFFFFF
#define OTX_EP_CLEAR_SDP_OUT_PKT_CNT 0xFFFFFFFFF
+/* Max overhead includes
+ * - Ethernet hdr
+ * - CRC
+ * - nested VLANs
+ * - octeon rx info
+ */
+#define OTX_EP_ETH_OVERHEAD \
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
+ (2 * RTE_VLAN_HLEN) + OTX_EP_DROQ_INFO_SIZE)
+
/* PCI IDs */
#define PCI_VENDOR_ID_CAVIUM 0x177D
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 0f710b1ffa..885fbb475f 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -10,6 +10,7 @@
#include "otx2_ep_vf.h"
#include "cnxk_ep_vf.h"
#include "otx_ep_rxtx.h"
+#include "otx_ep_mbox.h"
#define OTX_EP_DEV(_eth_dev) \
((struct otx_ep_device *)(_eth_dev)->data->dev_private)
@@ -31,15 +32,24 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *devinfo)
{
struct otx_ep_device *otx_epvf;
+ int max_rx_pktlen;
otx_epvf = OTX_EP_DEV(eth_dev);
+ max_rx_pktlen = otx_ep_mbox_get_max_pkt_len(eth_dev);
+ if (!max_rx_pktlen) {
+ otx_ep_err("Failed to get Max Rx packet length");
+ return -EINVAL;
+ }
+
devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
devinfo->max_rx_queues = otx_epvf->max_rx_queues;
devinfo->max_tx_queues = otx_epvf->max_tx_queues;
devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
- devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
+ devinfo->max_rx_pktlen = max_rx_pktlen;
+ devinfo->max_mtu = devinfo->max_rx_pktlen - OTX_EP_ETH_OVERHEAD;
+ devinfo->min_mtu = RTE_ETHER_MIN_LEN;
devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
@@ -54,6 +64,71 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
return 0;
}
+static int
+otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
+{
+ RTE_SET_USED(wait_to_complete);
+
+ if (!eth_dev->data->dev_started)
+ return 0;
+ struct rte_eth_link link;
+ int ret = 0;
+
+ memset(&link, 0, sizeof(link));
+ ret = otx_ep_mbox_get_link_info(eth_dev, &link);
+ if (ret)
+ return -EINVAL;
+ otx_ep_dbg("link status resp link %d duplex %d autoneg %d link_speed %d\n",
+ link.link_status, link.link_duplex, link.link_autoneg, link.link_speed);
+ return rte_eth_linkstatus_set(eth_dev, &link);
+}
+
+static int
+otx_ep_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct rte_eth_dev_info devinfo;
+ int32_t ret = 0;
+
+ if (otx_ep_dev_info_get(eth_dev, &devinfo)) {
+ otx_ep_err("Cannot set MTU to %u: failed to get device info", mtu);
+ return -EPERM;
+ }
+
+ /* Check if MTU is within the allowed range */
+ if (mtu < devinfo.min_mtu) {
+ otx_ep_err("Invalid MTU %u: lower than minimum MTU %u", mtu, devinfo.min_mtu);
+ return -EINVAL;
+ }
+
+ if (mtu > devinfo.max_mtu) {
+ otx_ep_err("Invalid MTU %u; higher than maximum MTU %u", mtu, devinfo.max_mtu);
+ return -EINVAL;
+ }
+
+ ret = otx_ep_mbox_set_mtu(eth_dev, mtu);
+ if (ret)
+ return -EINVAL;
+
+ otx_ep_dbg("MTU is set to %u", mtu);
+
+ return 0;
+}
+
+static int
+otx_ep_dev_set_default_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ int ret;
+
+ ret = otx_ep_mbox_set_mac_addr(eth_dev, mac_addr);
+ if (ret)
+ return -EINVAL;
+ otx_ep_dbg("Default MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
+ return 0;
+}
+
static int
otx_ep_dev_start(struct rte_eth_dev *eth_dev)
{
@@ -78,6 +153,7 @@ otx_ep_dev_start(struct rte_eth_dev *eth_dev)
rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
}
+ otx_ep_dev_link_update(eth_dev, 0);
otx_ep_info("dev started\n");
return 0;
@@ -454,6 +530,7 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
uint32_t num_queues, q_no;
+ otx_ep_mbox_send_dev_exit(eth_dev);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
num_queues = otx_epvf->nb_rx_queues;
for (q_no = 0; q_no < num_queues; q_no++) {
@@ -482,19 +559,17 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
}
static int
-otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
+otx_ep_dev_get_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
{
- RTE_SET_USED(wait_to_complete);
-
- if (!eth_dev->data->dev_started)
- return 0;
- struct rte_eth_link link;
+ int ret;
- memset(&link, 0, sizeof(link));
- link.link_status = RTE_ETH_LINK_UP;
- link.link_speed = RTE_ETH_SPEED_NUM_10G;
- link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
- return rte_eth_linkstatus_set(eth_dev, &link);
+ ret = otx_ep_mbox_get_mac_addr(eth_dev, mac_addr);
+ if (ret)
+ return -EINVAL;
+ otx_ep_dbg("Get MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ return 0;
}
/* Define our ethernet definitions */
@@ -511,6 +586,8 @@ static const struct eth_dev_ops otx_ep_eth_dev_ops = {
.stats_reset = otx_ep_dev_stats_reset,
.link_update = otx_ep_dev_link_update,
.dev_close = otx_ep_dev_close,
+ .mtu_set = otx_ep_dev_mtu_set,
+ .mac_addr_set = otx_ep_dev_set_default_mac_addr,
};
static int
@@ -526,6 +603,37 @@ otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+static int otx_ep_eth_dev_query_set_vf_mac(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ int ret_val;
+
+ memset(mac_addr, 0, sizeof(struct rte_ether_addr));
+ ret_val = otx_ep_dev_get_mac_addr(eth_dev, mac_addr);
+ if (!ret_val) {
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
+ otx_ep_dbg("PF doesn't have valid VF MAC addr" RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ rte_eth_random_addr(mac_addr->addr_bytes);
+ otx_ep_dbg("Setting Random MAC address" RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ ret_val = otx_ep_dev_set_default_mac_addr(eth_dev, mac_addr);
+ if (ret_val) {
+ otx_ep_err("Setting MAC address " RTE_ETHER_ADDR_PRT_FMT "fails\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ return ret_val;
+ }
+ }
+ otx_ep_dbg("Received valid MAC addr from PF" RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ } else {
+ otx_ep_err("Getting MAC address from PF via Mbox fails with ret_val: %d\n",
+ ret_val);
+ return ret_val;
+ }
+ return 0;
+}
+
static int
otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
{
@@ -541,6 +649,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->eth_dev = eth_dev;
otx_epvf->port_id = eth_dev->data->port_id;
eth_dev->dev_ops = &otx_ep_eth_dev_ops;
+ rte_spinlock_init(&otx_epvf->mbox_lock);
eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
otx_ep_err("MAC addresses memory allocation failed\n");
@@ -572,6 +681,16 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
return -EINVAL;
}
+ if (otx_ep_mbox_version_check(eth_dev))
+ return -EINVAL;
+
+ if (otx_ep_eth_dev_query_set_vf_mac(eth_dev,
+ (struct rte_ether_addr *)&vf_mac_addr)) {
+ otx_ep_err("set mac addr failed\n");
+ return -ENODEV;
+ }
+ rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx_ep_mbox.c b/drivers/net/octeon_ep/otx_ep_mbox.c
new file mode 100644
index 0000000000..1ad36e14c8
--- /dev/null
+++ b/drivers/net/octeon_ep/otx_ep_mbox.c
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <ethdev_pci.h>
+#include <rte_ether.h>
+#include <rte_kvargs.h>
+
+#include "otx_ep_common.h"
+#include "otx_ep_vf.h"
+#include "otx2_ep_vf.h"
+#include "cnxk_ep_vf.h"
+#include "otx_ep_mbox.h"
+
+static int
+__otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep,
+ union otx_ep_mbox_word cmd,
+ union otx_ep_mbox_word *rsp)
+{
+ volatile uint64_t reg_val = 0ull;
+ int count = 0;
+
+ cmd.s.type = OTX_EP_MBOX_TYPE_CMD;
+ otx2_write64(cmd.u64, otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0));
+
+ /* No response for notification messages */
+ if (!rsp)
+ return 0;
+
+ for (count = 0; count < OTX_EP_MBOX_TIMEOUT_MS; count++) {
+ rte_delay_ms(1);
+ reg_val = otx2_read64(otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0));
+ if (reg_val != cmd.u64) {
+ rsp->u64 = reg_val;
+ break;
+ }
+ }
+ if (count == OTX_EP_MBOX_TIMEOUT_MS) {
+ otx_ep_err("mbox send Timeout count:%d\n", count);
+ return OTX_EP_MBOX_TIMEOUT_MS;
+ }
+ if (rsp->s.type != OTX_EP_MBOX_TYPE_RSP_ACK) {
+ otx_ep_err("mbox received NACK from PF\n");
+ return OTX_EP_MBOX_CMD_STATUS_NACK;
+ }
+
+ rsp->u64 = reg_val;
+ return 0;
+}
+
+static int
+otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep,
+ union otx_ep_mbox_word cmd,
+ union otx_ep_mbox_word *rsp)
+{
+ int ret;
+
+ rte_spinlock_lock(&otx_ep->mbox_lock);
+ ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, rsp);
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return ret;
+}
+
+static int
+otx_ep_mbox_bulk_read(struct otx_ep_device *otx_ep,
+ enum otx_ep_mbox_opcode opcode,
+ uint8_t *data, int32_t *size)
+{
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int read_cnt, i = 0, ret;
+ int data_len = 0, tmp_len = 0;
+
+ rte_spinlock_lock(&otx_ep->mbox_lock);
+ cmd.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 0;
+ /* Send cmd to read data from PF */
+ ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("mbox bulk read data request failed\n");
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return ret;
+ }
+ /* PF sends the data length of requested CMD
+ * in ACK
+ */
+ memcpy(&data_len, rsp.s_data.data, sizeof(data_len));
+ tmp_len = data_len;
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ while (data_len) {
+ ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("mbox bulk read data request failed\n");
+ otx_ep->mbox_data_index = 0;
+ memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE);
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return ret;
+ }
+ if (data_len > OTX_EP_MBOX_MAX_DATA_SIZE) {
+ data_len -= OTX_EP_MBOX_MAX_DATA_SIZE;
+ read_cnt = OTX_EP_MBOX_MAX_DATA_SIZE;
+ } else {
+ read_cnt = data_len;
+ data_len = 0;
+ }
+ for (i = 0; i < read_cnt; i++) {
+ otx_ep->mbox_data_buf[otx_ep->mbox_data_index] =
+ rsp.s_data.data[i];
+ otx_ep->mbox_data_index++;
+ }
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ }
+ memcpy(data, otx_ep->mbox_data_buf, tmp_len);
+ *size = tmp_len;
+ otx_ep->mbox_data_index = 0;
+ memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE);
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return 0;
+}
+
+int
+otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret = 0;
+
+ cmd.u64 = 0;
+ cmd.s_set_mtu.opcode = OTX_EP_MBOX_CMD_SET_MTU;
+ cmd.s_set_mtu.mtu = mtu;
+
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("set MTU failed\n");
+ return -EINVAL;
+ }
+ otx_ep_dbg("mtu set success mtu %u\n", mtu);
+
+ return 0;
+}
+
+int
+otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_SET_MAC_ADDR;
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
+ cmd.s_set_mac.mac_addr[i] = mac_addr->addr_bytes[i];
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("set MAC address failed\n");
+ return -EINVAL;
+ }
+ otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n",
+ __func__, RTE_ETHER_ADDR_BYTES(mac_addr));
+ rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
+ return 0;
+}
+
+int
+otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_GET_MAC_ADDR;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("get MAC address failed\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
+ mac_addr->addr_bytes[i] = rsp.s_set_mac.mac_addr[i];
+ otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n",
+ __func__, RTE_ETHER_ADDR_BYTES(mac_addr));
+ return 0;
+}
+
+int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev,
+ uint8_t *oper_up)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OTX_EP_MBOX_CMD_GET_LINK_STATUS;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("Get link status failed\n");
+ return -EINVAL;
+ }
+ *oper_up = rsp.s_link_status.status;
+ return 0;
+}
+
+int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev,
+ struct rte_eth_link *link)
+{
+ int32_t ret, size;
+ struct otx_ep_iface_link_info link_info;
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ memset(&link_info, 0, sizeof(struct otx_ep_iface_link_info));
+ ret = otx_ep_mbox_bulk_read(otx_ep, OTX_EP_MBOX_CMD_GET_LINK_INFO,
+ (uint8_t *)&link_info, (int32_t *)&size);
+ if (ret) {
+ otx_ep_err("Get link info failed\n");
+ return ret;
+ }
+ link->link_status = RTE_ETH_LINK_UP;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = (link_info.autoneg ==
+ OTX_EP_LINK_AUTONEG) ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
+
+ link->link_autoneg = link_info.autoneg;
+ link->link_speed = link_info.speed;
+ return 0;
+}
+
+void
+otx_ep_mbox_enable_interrupt(struct otx_ep_device *otx_ep)
+{
+ rte_write64(0x2, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_MBOX_PF_VF_INT(0));
+}
+
+void
+otx_ep_mbox_disable_interrupt(struct otx_ep_device *otx_ep)
+{
+ rte_write64(0x00, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_MBOX_PF_VF_INT(0));
+}
+
+int
+otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret;
+
+ rsp.u64 = 0;
+ cmd.u64 = 0;
+ cmd.s_get_mtu.opcode = OTX_EP_MBOX_CMD_GET_MTU;
+
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret)
+ return ret;
+ return rsp.s_get_mtu.mtu;
+}
+
+int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_version.opcode = OTX_EP_MBOX_CMD_VERSION;
+ cmd.s_version.version = OTX_EP_MBOX_VERSION;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (!ret)
+ return 0;
+ if (ret == OTX_EP_MBOX_CMD_STATUS_NACK) {
+ otx_ep_err("VF Mbox version:%u is not compatible with PF\n",
+ (uint32_t)cmd.s_version.version);
+ }
+ return ret;
+}
+
+int otx_ep_mbox_send_dev_exit(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_version.opcode = OTX_EP_MBOX_CMD_DEV_REMOVE;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, NULL);
+ return ret;
+}
diff --git a/drivers/net/octeon_ep/otx_ep_mbox.h b/drivers/net/octeon_ep/otx_ep_mbox.h
new file mode 100644
index 0000000000..9df3c53edd
--- /dev/null
+++ b/drivers/net/octeon_ep/otx_ep_mbox.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _OTX_EP_MBOX_H_
+#define _OTX_EP_MBOX_H_
+
+
+#define OTX_EP_MBOX_VERSION 1
+
+enum otx_ep_mbox_opcode {
+ OTX_EP_MBOX_CMD_VERSION,
+ OTX_EP_MBOX_CMD_SET_MTU,
+ OTX_EP_MBOX_CMD_SET_MAC_ADDR,
+ OTX_EP_MBOX_CMD_GET_MAC_ADDR,
+ OTX_EP_MBOX_CMD_GET_LINK_INFO,
+ OTX_EP_MBOX_CMD_GET_STATS,
+ OTX_EP_MBOX_CMD_SET_RX_STATE,
+ OTX_EP_MBOX_CMD_SET_LINK_STATUS,
+ OTX_EP_MBOX_CMD_GET_LINK_STATUS,
+ OTX_EP_MBOX_CMD_GET_MTU,
+ OTX_EP_MBOX_CMD_DEV_REMOVE,
+ OTX_EP_MBOX_CMD_LAST,
+};
+
+enum otx_ep_mbox_word_type {
+ OTX_EP_MBOX_TYPE_CMD,
+ OTX_EP_MBOX_TYPE_RSP_ACK,
+ OTX_EP_MBOX_TYPE_RSP_NACK,
+};
+
+enum otx_ep_mbox_cmd_status {
+ OTX_EP_MBOX_CMD_STATUS_NOT_SETUP = 1,
+ OTX_EP_MBOX_CMD_STATUS_TIMEDOUT = 2,
+ OTX_EP_MBOX_CMD_STATUS_NACK = 3,
+ OTX_EP_MBOX_CMD_STATUS_BUSY = 4
+};
+
+enum otx_ep_mbox_state {
+ OTX_EP_MBOX_STATE_IDLE = 0,
+ OTX_EP_MBOX_STATE_BUSY = 1,
+};
+
+enum otx_ep_link_status {
+ OTX_EP_LINK_STATUS_DOWN,
+ OTX_EP_LINK_STATUS_UP,
+};
+
+enum otx_ep_link_duplex {
+ OTX_EP_LINK_HALF_DUPLEX,
+ OTX_EP_LINK_FULL_DUPLEX,
+};
+
+enum otx_ep_link_autoneg {
+ OTX_EP_LINK_FIXED,
+ OTX_EP_LINK_AUTONEG,
+};
+
+#define OTX_EP_MBOX_TIMEOUT_MS 1200
+#define OTX_EP_MBOX_MAX_RETRIES 2
+#define OTX_EP_MBOX_MAX_DATA_SIZE 6
+#define OTX_EP_MBOX_MAX_DATA_BUF_SIZE 256
+#define OTX_EP_MBOX_MORE_FRAG_FLAG 1
+#define OTX_EP_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
+
+union otx_ep_mbox_word {
+ uint64_t u64;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t data:48;
+ } s;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t frag:1;
+ uint64_t rsvd:5;
+ uint8_t data[6];
+ } s_data;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t version:48;
+ } s_version;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint8_t mac_addr[6];
+ } s_set_mac;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t mtu:48;
+ } s_set_mtu;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t mtu:48;
+ } s_get_mtu;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t state:1;
+ uint64_t rsvd:53;
+ } s_link_state;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t status:1;
+ uint64_t rsvd:53;
+ } s_link_status;
+} __rte_packed;
+
+/* Hardware interface link state information. */
+struct otx_ep_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ uint64_t supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ uint64_t advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ uint32_t speed;
+
+ /* MTU */
+ uint16_t mtu;
+
+ /* Autonegotiation state. */
+#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ uint8_t autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ uint8_t pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ uint8_t admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ uint8_t oper_up;
+};
+
+int otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
+int otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr);
+int otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr);
+int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev,
+ uint8_t *oper_up);
+int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev, struct rte_eth_link *link);
+void otx_ep_mbox_enable_interrupt(struct otx_ep_device *otx_ep);
+void otx_ep_mbox_disable_interrupt(struct otx_ep_device *otx_ep);
+int otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev);
+int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev);
+int otx_ep_mbox_send_dev_exit(struct rte_eth_dev *eth_dev);
+#endif
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* Re: [PATCH v2 08/10] net/octeon_ep: support Mailbox between VF and PF
2023-04-05 14:25 ` [PATCH v2 08/10] net/octeon_ep: support Mailbox between VF and PF Sathesh Edara
@ 2023-04-21 5:00 ` Jerin Jacob
0 siblings, 0 replies; 50+ messages in thread
From: Jerin Jacob @ 2023-04-21 5:00 UTC (permalink / raw)
To: Sathesh Edara
Cc: sburla, jerinj, Radha Mohan Chintakuntla, Veerasenareddy Burru, dev
On Wed, Apr 5, 2023 at 7:56 PM Sathesh Edara <sedara@marvell.com> wrote:
>
> This patch adds the mailbox communication between
> VF and PF and supports the following mailbox
> messages.
> - Get and set MAC address
> - Get link information
> - Get stats
> - Get and set link status
> - Set and get MTU
> - Send notification to PF
>
> Signed-off-by: Sathesh Edara <sedara@marvell.com>
1) Change "Mailbox" to "mailbox" in subject line
2) Please cross check, Do you need to update new items in
doc/guides/nics/features/octeon_ep.ini by adding this new features.
See doc/guides/nics/features.rst for list of features.
> ---
> drivers/net/octeon_ep/cnxk_ep_vf.c | 1 +
> drivers/net/octeon_ep/cnxk_ep_vf.h | 12 +-
> drivers/net/octeon_ep/meson.build | 1 +
> drivers/net/octeon_ep/otx_ep_common.h | 26 +++
> drivers/net/octeon_ep/otx_ep_ethdev.c | 143 +++++++++++-
> drivers/net/octeon_ep/otx_ep_mbox.c | 309 ++++++++++++++++++++++++++
> drivers/net/octeon_ep/otx_ep_mbox.h | 163 ++++++++++++++
> 7 files changed, 642 insertions(+), 13 deletions(-)
> create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.c
> create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.h
>
> diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
> index a437ae68cb..cadb4ecbf9 100644
> --- a/drivers/net/octeon_ep/cnxk_ep_vf.c
> +++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
> @@ -8,6 +8,7 @@
> #include <rte_common.h>
> #include <rte_cycles.h>
> #include <rte_memzone.h>
> +#include "otx_ep_common.h"
> #include "cnxk_ep_vf.h"
>
> static void
> diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
> index 072b38ea15..86277449ea 100644
> --- a/drivers/net/octeon_ep/cnxk_ep_vf.h
> +++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
> @@ -5,7 +5,7 @@
> #define _CNXK_EP_VF_H_
>
> #include <rte_io.h>
> -#include "otx_ep_common.h"
> +
> #define CNXK_CONFIG_XPANSION_BAR 0x38
> #define CNXK_CONFIG_PCIE_CAP 0x70
> #define CNXK_CONFIG_PCIE_DEVCAP 0x74
> @@ -92,6 +92,10 @@
> #define CNXK_EP_R_OUT_BYTE_CNT_START 0x10190
> #define CNXK_EP_R_OUT_CNTS_ISM_START 0x10510
>
> +#define CNXK_EP_R_MBOX_PF_VF_DATA_START 0x10210
> +#define CNXK_EP_R_MBOX_VF_PF_DATA_START 0x10230
> +#define CNXK_EP_R_MBOX_PF_VF_INT_START 0x10220
> +
> #define CNXK_EP_R_OUT_CNTS(ring) \
> (CNXK_EP_R_OUT_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
>
> @@ -125,6 +129,12 @@
> #define CNXK_EP_R_OUT_CNTS_ISM(ring) \
> (CNXK_EP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
>
> +#define CNXK_EP_R_MBOX_VF_PF_DATA(ring) \
> + (CNXK_EP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_EP_RING_OFFSET))
> +
> +#define CNXK_EP_R_MBOX_PF_VF_INT(ring) \
> + (CNXK_EP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_EP_RING_OFFSET))
> +
> /*------------------ R_OUT Masks ----------------*/
> #define CNXK_EP_R_OUT_INT_LEVELS_BMODE (1ULL << 63)
> #define CNXK_EP_R_OUT_INT_LEVELS_TIMET (32)
> diff --git a/drivers/net/octeon_ep/meson.build b/drivers/net/octeon_ep/meson.build
> index a267b60290..e698bf9792 100644
> --- a/drivers/net/octeon_ep/meson.build
> +++ b/drivers/net/octeon_ep/meson.build
> @@ -8,4 +8,5 @@ sources = files(
> 'otx_ep_vf.c',
> 'otx2_ep_vf.c',
> 'cnxk_ep_vf.c',
> + 'otx_ep_mbox.c',
> )
> diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
> index 3beec71968..0bf5454a39 100644
> --- a/drivers/net/octeon_ep/otx_ep_common.h
> +++ b/drivers/net/octeon_ep/otx_ep_common.h
> @@ -4,6 +4,7 @@
> #ifndef _OTX_EP_COMMON_H_
> #define _OTX_EP_COMMON_H_
>
> +#include <rte_spinlock.h>
>
> #define OTX_EP_NW_PKT_OP 0x1220
> #define OTX_EP_NW_CMD_OP 0x1221
> @@ -67,6 +68,9 @@
> #define oct_ep_read64(addr) rte_read64_relaxed((void *)(addr))
> #define oct_ep_write64(val, addr) rte_write64_relaxed((val), (void *)(addr))
>
> +/* Mailbox maximum data size */
> +#define MBOX_MAX_DATA_BUF_SIZE 320
> +
> /* Input Request Header format */
> union otx_ep_instr_irh {
> uint64_t u64;
> @@ -488,6 +492,18 @@ struct otx_ep_device {
>
> /* DMA buffer for SDP ISM messages */
> const struct rte_memzone *ism_buffer_mz;
> +
> + /* Mailbox lock */
> + rte_spinlock_t mbox_lock;
> +
> + /* Mailbox data */
> + uint8_t mbox_data_buf[MBOX_MAX_DATA_BUF_SIZE];
> +
> + /* Mailbox data index */
> + int32_t mbox_data_index;
> +
> + /* Mailbox receive message length */
> + int32_t mbox_rcv_message_len;
> };
>
> int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
> @@ -541,6 +557,16 @@ struct otx_ep_buf_free_info {
> #define OTX_EP_CLEAR_SLIST_DBELL 0xFFFFFFFF
> #define OTX_EP_CLEAR_SDP_OUT_PKT_CNT 0xFFFFFFFFF
>
> +/* Max overhead includes
> + * - Ethernet hdr
> + * - CRC
> + * - nested VLANs
> + * - octeon rx info
> + */
> +#define OTX_EP_ETH_OVERHEAD \
> + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
> + (2 * RTE_VLAN_HLEN) + OTX_EP_DROQ_INFO_SIZE)
> +
> /* PCI IDs */
> #define PCI_VENDOR_ID_CAVIUM 0x177D
>
> diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
> index 0f710b1ffa..885fbb475f 100644
> --- a/drivers/net/octeon_ep/otx_ep_ethdev.c
> +++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
> @@ -10,6 +10,7 @@
> #include "otx2_ep_vf.h"
> #include "cnxk_ep_vf.h"
> #include "otx_ep_rxtx.h"
> +#include "otx_ep_mbox.h"
>
> #define OTX_EP_DEV(_eth_dev) \
> ((struct otx_ep_device *)(_eth_dev)->data->dev_private)
> @@ -31,15 +32,24 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
> struct rte_eth_dev_info *devinfo)
> {
> struct otx_ep_device *otx_epvf;
> + int max_rx_pktlen;
>
> otx_epvf = OTX_EP_DEV(eth_dev);
>
> + max_rx_pktlen = otx_ep_mbox_get_max_pkt_len(eth_dev);
> + if (!max_rx_pktlen) {
> + otx_ep_err("Failed to get Max Rx packet length");
> + return -EINVAL;
> + }
> +
> devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
> devinfo->max_rx_queues = otx_epvf->max_rx_queues;
> devinfo->max_tx_queues = otx_epvf->max_tx_queues;
>
> devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
> - devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
> + devinfo->max_rx_pktlen = max_rx_pktlen;
> + devinfo->max_mtu = devinfo->max_rx_pktlen - OTX_EP_ETH_OVERHEAD;
> + devinfo->min_mtu = RTE_ETHER_MIN_LEN;
> devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
> devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
>
> @@ -54,6 +64,71 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
> return 0;
> }
>
> +static int
> +otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
> +{
> + RTE_SET_USED(wait_to_complete);
> +
> + if (!eth_dev->data->dev_started)
> + return 0;
> + struct rte_eth_link link;
> + int ret = 0;
> +
> + memset(&link, 0, sizeof(link));
> + ret = otx_ep_mbox_get_link_info(eth_dev, &link);
> + if (ret)
> + return -EINVAL;
> + otx_ep_dbg("link status resp link %d duplex %d autoneg %d link_speed %d\n",
> + link.link_status, link.link_duplex, link.link_autoneg, link.link_speed);
> + return rte_eth_linkstatus_set(eth_dev, &link);
> +}
> +
> +static int
> +otx_ep_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
> +{
> + struct rte_eth_dev_info devinfo;
> + int32_t ret = 0;
> +
> + if (otx_ep_dev_info_get(eth_dev, &devinfo)) {
> + otx_ep_err("Cannot set MTU to %u: failed to get device info", mtu);
> + return -EPERM;
> + }
> +
> + /* Check if MTU is within the allowed range */
> + if (mtu < devinfo.min_mtu) {
> + otx_ep_err("Invalid MTU %u: lower than minimum MTU %u", mtu, devinfo.min_mtu);
> + return -EINVAL;
> + }
> +
> + if (mtu > devinfo.max_mtu) {
> + otx_ep_err("Invalid MTU %u; higher than maximum MTU %u", mtu, devinfo.max_mtu);
> + return -EINVAL;
> + }
> +
> + ret = otx_ep_mbox_set_mtu(eth_dev, mtu);
> + if (ret)
> + return -EINVAL;
> +
> + otx_ep_dbg("MTU is set to %u", mtu);
> +
> + return 0;
> +}
> +
> +static int
> +otx_ep_dev_set_default_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr)
> +{
> + int ret;
> +
> + ret = otx_ep_mbox_set_mac_addr(eth_dev, mac_addr);
> + if (ret)
> + return -EINVAL;
> + otx_ep_dbg("Default MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
> + return 0;
> +}
> +
> static int
> otx_ep_dev_start(struct rte_eth_dev *eth_dev)
> {
> @@ -78,6 +153,7 @@ otx_ep_dev_start(struct rte_eth_dev *eth_dev)
> rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
> }
>
> + otx_ep_dev_link_update(eth_dev, 0);
> otx_ep_info("dev started\n");
>
> return 0;
> @@ -454,6 +530,7 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
> struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
> uint32_t num_queues, q_no;
>
> + otx_ep_mbox_send_dev_exit(eth_dev);
> otx_epvf->fn_list.disable_io_queues(otx_epvf);
> num_queues = otx_epvf->nb_rx_queues;
> for (q_no = 0; q_no < num_queues; q_no++) {
> @@ -482,19 +559,17 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
> }
>
> static int
> -otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
> +otx_ep_dev_get_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr)
> {
> - RTE_SET_USED(wait_to_complete);
> -
> - if (!eth_dev->data->dev_started)
> - return 0;
> - struct rte_eth_link link;
> + int ret;
>
> - memset(&link, 0, sizeof(link));
> - link.link_status = RTE_ETH_LINK_UP;
> - link.link_speed = RTE_ETH_SPEED_NUM_10G;
> - link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
> - return rte_eth_linkstatus_set(eth_dev, &link);
> + ret = otx_ep_mbox_get_mac_addr(eth_dev, mac_addr);
> + if (ret)
> + return -EINVAL;
> + otx_ep_dbg("Get MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + return 0;
> }
>
> /* Define our ethernet definitions */
> @@ -511,6 +586,8 @@ static const struct eth_dev_ops otx_ep_eth_dev_ops = {
> .stats_reset = otx_ep_dev_stats_reset,
> .link_update = otx_ep_dev_link_update,
> .dev_close = otx_ep_dev_close,
> + .mtu_set = otx_ep_dev_mtu_set,
> + .mac_addr_set = otx_ep_dev_set_default_mac_addr,
> };
>
> static int
> @@ -526,6 +603,37 @@ otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
> return 0;
> }
>
> +static int otx_ep_eth_dev_query_set_vf_mac(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr)
> +{
> + int ret_val;
> +
> + memset(mac_addr, 0, sizeof(struct rte_ether_addr));
> + ret_val = otx_ep_dev_get_mac_addr(eth_dev, mac_addr);
> + if (!ret_val) {
> + if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
> + otx_ep_dbg("PF doesn't have valid VF MAC addr" RTE_ETHER_ADDR_PRT_FMT "\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + rte_eth_random_addr(mac_addr->addr_bytes);
> + otx_ep_dbg("Setting Random MAC address" RTE_ETHER_ADDR_PRT_FMT "\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + ret_val = otx_ep_dev_set_default_mac_addr(eth_dev, mac_addr);
> + if (ret_val) {
> + otx_ep_err("Setting MAC address " RTE_ETHER_ADDR_PRT_FMT "fails\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + return ret_val;
> + }
> + }
> + otx_ep_dbg("Received valid MAC addr from PF" RTE_ETHER_ADDR_PRT_FMT "\n",
> + RTE_ETHER_ADDR_BYTES(mac_addr));
> + } else {
> + otx_ep_err("Getting MAC address from PF via Mbox fails with ret_val: %d\n",
> + ret_val);
> + return ret_val;
> + }
> + return 0;
> +}
> +
> static int
> otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
> {
> @@ -541,6 +649,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
> otx_epvf->eth_dev = eth_dev;
> otx_epvf->port_id = eth_dev->data->port_id;
> eth_dev->dev_ops = &otx_ep_eth_dev_ops;
> + rte_spinlock_init(&otx_epvf->mbox_lock);
> eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
> if (eth_dev->data->mac_addrs == NULL) {
> otx_ep_err("MAC addresses memory allocation failed\n");
> @@ -572,6 +681,16 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
> return -EINVAL;
> }
>
> + if (otx_ep_mbox_version_check(eth_dev))
> + return -EINVAL;
> +
> + if (otx_ep_eth_dev_query_set_vf_mac(eth_dev,
> + (struct rte_ether_addr *)&vf_mac_addr)) {
> + otx_ep_err("set mac addr failed\n");
> + return -ENODEV;
> + }
> + rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
> +
> return 0;
> }
>
> diff --git a/drivers/net/octeon_ep/otx_ep_mbox.c b/drivers/net/octeon_ep/otx_ep_mbox.c
> new file mode 100644
> index 0000000000..1ad36e14c8
> --- /dev/null
> +++ b/drivers/net/octeon_ep/otx_ep_mbox.c
> @@ -0,0 +1,309 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +
> +#include <ethdev_pci.h>
> +#include <rte_ether.h>
> +#include <rte_kvargs.h>
> +
> +#include "otx_ep_common.h"
> +#include "otx_ep_vf.h"
> +#include "otx2_ep_vf.h"
> +#include "cnxk_ep_vf.h"
> +#include "otx_ep_mbox.h"
> +
> +static int
> +__otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep,
> + union otx_ep_mbox_word cmd,
> + union otx_ep_mbox_word *rsp)
> +{
> + volatile uint64_t reg_val = 0ull;
> + int count = 0;
> +
> + cmd.s.type = OTX_EP_MBOX_TYPE_CMD;
> + otx2_write64(cmd.u64, otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0));
> +
> + /* No response for notification messages */
> + if (!rsp)
> + return 0;
> +
> + for (count = 0; count < OTX_EP_MBOX_TIMEOUT_MS; count++) {
> + rte_delay_ms(1);
> + reg_val = otx2_read64(otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0));
> + if (reg_val != cmd.u64) {
> + rsp->u64 = reg_val;
> + break;
> + }
> + }
> + if (count == OTX_EP_MBOX_TIMEOUT_MS) {
> + otx_ep_err("mbox send Timeout count:%d\n", count);
> + return OTX_EP_MBOX_TIMEOUT_MS;
> + }
> + if (rsp->s.type != OTX_EP_MBOX_TYPE_RSP_ACK) {
> + otx_ep_err("mbox received NACK from PF\n");
> + return OTX_EP_MBOX_CMD_STATUS_NACK;
> + }
> +
> + rsp->u64 = reg_val;
> + return 0;
> +}
> +
> +static int
> +otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep,
> + union otx_ep_mbox_word cmd,
> + union otx_ep_mbox_word *rsp)
> +{
> + int ret;
> +
> + rte_spinlock_lock(&otx_ep->mbox_lock);
> + ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, rsp);
> + rte_spinlock_unlock(&otx_ep->mbox_lock);
> + return ret;
> +}
> +
> +static int
> +otx_ep_mbox_bulk_read(struct otx_ep_device *otx_ep,
> + enum otx_ep_mbox_opcode opcode,
> + uint8_t *data, int32_t *size)
> +{
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int read_cnt, i = 0, ret;
> + int data_len = 0, tmp_len = 0;
> +
> + rte_spinlock_lock(&otx_ep->mbox_lock);
> + cmd.u64 = 0;
> + cmd.s_data.opcode = opcode;
> + cmd.s_data.frag = 0;
> + /* Send cmd to read data from PF */
> + ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("mbox bulk read data request failed\n");
> + rte_spinlock_unlock(&otx_ep->mbox_lock);
> + return ret;
> + }
> + /* PF sends the data length of requested CMD
> + * in ACK
> + */
> + memcpy(&data_len, rsp.s_data.data, sizeof(data_len));
> + tmp_len = data_len;
> + cmd.u64 = 0;
> + rsp.u64 = 0;
> + cmd.s_data.opcode = opcode;
> + cmd.s_data.frag = 1;
> + while (data_len) {
> + ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("mbox bulk read data request failed\n");
> + otx_ep->mbox_data_index = 0;
> + memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE);
> + rte_spinlock_unlock(&otx_ep->mbox_lock);
> + return ret;
> + }
> + if (data_len > OTX_EP_MBOX_MAX_DATA_SIZE) {
> + data_len -= OTX_EP_MBOX_MAX_DATA_SIZE;
> + read_cnt = OTX_EP_MBOX_MAX_DATA_SIZE;
> + } else {
> + read_cnt = data_len;
> + data_len = 0;
> + }
> + for (i = 0; i < read_cnt; i++) {
> + otx_ep->mbox_data_buf[otx_ep->mbox_data_index] =
> + rsp.s_data.data[i];
> + otx_ep->mbox_data_index++;
> + }
> + cmd.u64 = 0;
> + rsp.u64 = 0;
> + cmd.s_data.opcode = opcode;
> + cmd.s_data.frag = 1;
> + }
> + memcpy(data, otx_ep->mbox_data_buf, tmp_len);
> + *size = tmp_len;
> + otx_ep->mbox_data_index = 0;
> + memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE);
> + rte_spinlock_unlock(&otx_ep->mbox_lock);
> + return 0;
> +}
> +
> +int
> +otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int ret = 0;
> +
> + cmd.u64 = 0;
> + cmd.s_set_mtu.opcode = OTX_EP_MBOX_CMD_SET_MTU;
> + cmd.s_set_mtu.mtu = mtu;
> +
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("set MTU failed\n");
> + return -EINVAL;
> + }
> + otx_ep_dbg("mtu set success mtu %u\n", mtu);
> +
> + return 0;
> +}
> +
> +int
> +otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int i, ret;
> +
> + cmd.u64 = 0;
> + cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_SET_MAC_ADDR;
> + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
> + cmd.s_set_mac.mac_addr[i] = mac_addr->addr_bytes[i];
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("set MAC address failed\n");
> + return -EINVAL;
> + }
> + otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n",
> + __func__, RTE_ETHER_ADDR_BYTES(mac_addr));
> + rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
> + return 0;
> +}
> +
> +int
> +otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int i, ret;
> +
> + cmd.u64 = 0;
> + cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_GET_MAC_ADDR;
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("get MAC address failed\n");
> + return -EINVAL;
> + }
> + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
> + mac_addr->addr_bytes[i] = rsp.s_set_mac.mac_addr[i];
> + otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n",
> + __func__, RTE_ETHER_ADDR_BYTES(mac_addr));
> + return 0;
> +}
> +
> +int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev,
> + uint8_t *oper_up)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int ret;
> +
> + cmd.u64 = 0;
> + cmd.s_link_status.opcode = OTX_EP_MBOX_CMD_GET_LINK_STATUS;
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret) {
> + otx_ep_err("Get link status failed\n");
> + return -EINVAL;
> + }
> + *oper_up = rsp.s_link_status.status;
> + return 0;
> +}
> +
> +int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev,
> + struct rte_eth_link *link)
> +{
> + int32_t ret, size;
> + struct otx_ep_iface_link_info link_info;
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + memset(&link_info, 0, sizeof(struct otx_ep_iface_link_info));
> + ret = otx_ep_mbox_bulk_read(otx_ep, OTX_EP_MBOX_CMD_GET_LINK_INFO,
> + (uint8_t *)&link_info, (int32_t *)&size);
> + if (ret) {
> + otx_ep_err("Get link info failed\n");
> + return ret;
> + }
> + link->link_status = RTE_ETH_LINK_UP;
> + link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
> + link->link_autoneg = (link_info.autoneg ==
> + OTX_EP_LINK_AUTONEG) ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
> +
> + link->link_autoneg = link_info.autoneg;
> + link->link_speed = link_info.speed;
> + return 0;
> +}
> +
> +void
> +otx_ep_mbox_enable_interrupt(struct otx_ep_device *otx_ep)
> +{
> + rte_write64(0x2, (uint8_t *)otx_ep->hw_addr +
> + CNXK_EP_R_MBOX_PF_VF_INT(0));
> +}
> +
> +void
> +otx_ep_mbox_disable_interrupt(struct otx_ep_device *otx_ep)
> +{
> + rte_write64(0x00, (uint8_t *)otx_ep->hw_addr +
> + CNXK_EP_R_MBOX_PF_VF_INT(0));
> +}
> +
> +int
> +otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int ret;
> +
> + rsp.u64 = 0;
> + cmd.u64 = 0;
> + cmd.s_get_mtu.opcode = OTX_EP_MBOX_CMD_GET_MTU;
> +
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (ret)
> + return ret;
> + return rsp.s_get_mtu.mtu;
> +}
> +
> +int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + union otx_ep_mbox_word rsp;
> + int ret;
> +
> + cmd.u64 = 0;
> + cmd.s_version.opcode = OTX_EP_MBOX_CMD_VERSION;
> + cmd.s_version.version = OTX_EP_MBOX_VERSION;
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
> + if (!ret)
> + return 0;
> + if (ret == OTX_EP_MBOX_CMD_STATUS_NACK) {
> + otx_ep_err("VF Mbox version:%u is not compatible with PF\n",
> + (uint32_t)cmd.s_version.version);
> + }
> + return ret;
> +}
> +
> +int otx_ep_mbox_send_dev_exit(struct rte_eth_dev *eth_dev)
> +{
> + struct otx_ep_device *otx_ep =
> + (struct otx_ep_device *)(eth_dev)->data->dev_private;
> + union otx_ep_mbox_word cmd;
> + int ret;
> +
> + cmd.u64 = 0;
> + cmd.s_version.opcode = OTX_EP_MBOX_CMD_DEV_REMOVE;
> + ret = otx_ep_send_mbox_cmd(otx_ep, cmd, NULL);
> + return ret;
> +}
> diff --git a/drivers/net/octeon_ep/otx_ep_mbox.h b/drivers/net/octeon_ep/otx_ep_mbox.h
> new file mode 100644
> index 0000000000..9df3c53edd
> --- /dev/null
> +++ b/drivers/net/octeon_ep/otx_ep_mbox.h
> @@ -0,0 +1,163 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +
> +#ifndef _OTX_EP_MBOX_H_
> +#define _OTX_EP_MBOX_H_
> +
> +
> +#define OTX_EP_MBOX_VERSION 1
> +
> +enum otx_ep_mbox_opcode {
> + OTX_EP_MBOX_CMD_VERSION,
> + OTX_EP_MBOX_CMD_SET_MTU,
> + OTX_EP_MBOX_CMD_SET_MAC_ADDR,
> + OTX_EP_MBOX_CMD_GET_MAC_ADDR,
> + OTX_EP_MBOX_CMD_GET_LINK_INFO,
> + OTX_EP_MBOX_CMD_GET_STATS,
> + OTX_EP_MBOX_CMD_SET_RX_STATE,
> + OTX_EP_MBOX_CMD_SET_LINK_STATUS,
> + OTX_EP_MBOX_CMD_GET_LINK_STATUS,
> + OTX_EP_MBOX_CMD_GET_MTU,
> + OTX_EP_MBOX_CMD_DEV_REMOVE,
> + OTX_EP_MBOX_CMD_LAST,
> +};
> +
> +enum otx_ep_mbox_word_type {
> + OTX_EP_MBOX_TYPE_CMD,
> + OTX_EP_MBOX_TYPE_RSP_ACK,
> + OTX_EP_MBOX_TYPE_RSP_NACK,
> +};
> +
> +enum otx_ep_mbox_cmd_status {
> + OTX_EP_MBOX_CMD_STATUS_NOT_SETUP = 1,
> + OTX_EP_MBOX_CMD_STATUS_TIMEDOUT = 2,
> + OTX_EP_MBOX_CMD_STATUS_NACK = 3,
> + OTX_EP_MBOX_CMD_STATUS_BUSY = 4
> +};
> +
> +enum otx_ep_mbox_state {
> + OTX_EP_MBOX_STATE_IDLE = 0,
> + OTX_EP_MBOX_STATE_BUSY = 1,
> +};
> +
> +enum otx_ep_link_status {
> + OTX_EP_LINK_STATUS_DOWN,
> + OTX_EP_LINK_STATUS_UP,
> +};
> +
> +enum otx_ep_link_duplex {
> + OTX_EP_LINK_HALF_DUPLEX,
> + OTX_EP_LINK_FULL_DUPLEX,
> +};
> +
> +enum otx_ep_link_autoneg {
> + OTX_EP_LINK_FIXED,
> + OTX_EP_LINK_AUTONEG,
> +};
> +
> +#define OTX_EP_MBOX_TIMEOUT_MS 1200
> +#define OTX_EP_MBOX_MAX_RETRIES 2
> +#define OTX_EP_MBOX_MAX_DATA_SIZE 6
> +#define OTX_EP_MBOX_MAX_DATA_BUF_SIZE 256
> +#define OTX_EP_MBOX_MORE_FRAG_FLAG 1
> +#define OTX_EP_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
> +
> +union otx_ep_mbox_word {
> + uint64_t u64;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t rsvd:6;
> + uint64_t data:48;
> + } s;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t frag:1;
> + uint64_t rsvd:5;
> + uint8_t data[6];
> + } s_data;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t rsvd:6;
> + uint64_t version:48;
> + } s_version;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t rsvd:6;
> + uint8_t mac_addr[6];
> + } s_set_mac;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t rsvd:6;
> + uint64_t mtu:48;
> + } s_set_mtu;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t rsvd:6;
> + uint64_t mtu:48;
> + } s_get_mtu;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t state:1;
> + uint64_t rsvd:53;
> + } s_link_state;
> + struct {
> + uint64_t opcode:8;
> + uint64_t type:2;
> + uint64_t status:1;
> + uint64_t rsvd:53;
> + } s_link_status;
> +} __rte_packed;
> +
> +/* Hardware interface link state information. */
> +struct otx_ep_iface_link_info {
> + /* Bitmap of Supported link speeds/modes. */
> + uint64_t supported_modes;
> +
> + /* Bitmap of Advertised link speeds/modes. */
> + uint64_t advertised_modes;
> +
> + /* Negotiated link speed in Mbps. */
> + uint32_t speed;
> +
> + /* MTU */
> + uint16_t mtu;
> +
> + /* Autonegotiation state. */
> +#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
> +#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
> + uint8_t autoneg;
> +
> + /* Pause frames setting. */
> +#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0)
> +#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1)
> + uint8_t pause;
> +
> + /* Admin state of the link (ifconfig <iface> up/down */
> + uint8_t admin_up;
> +
> + /* Operational state of the link: physical link is up down */
> + uint8_t oper_up;
> +};
> +
> +int otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
> +int otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr);
> +int otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev,
> + struct rte_ether_addr *mac_addr);
> +int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev,
> + uint8_t *oper_up);
> +int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev, struct rte_eth_link *link);
> +void otx_ep_mbox_enable_interrupt(struct otx_ep_device *otx_ep);
> +void otx_ep_mbox_disable_interrupt(struct otx_ep_device *otx_ep);
> +int otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev);
> +int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev);
> +int otx_ep_mbox_send_dev_exit(struct rte_eth_dev *eth_dev);
> +#endif
> --
> 2.31.1
>
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v2 09/10] net/octeon_ep: set watermark for output queues
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
` (7 preceding siblings ...)
2023-04-05 14:25 ` [PATCH v2 08/10] net/octeon_ep: support Mailbox between VF and PF Sathesh Edara
@ 2023-04-05 14:25 ` Sathesh Edara
2023-04-05 14:25 ` [PATCH v2 10/10] net/octeon_ep: set secondary process dev ops Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-05 14:25 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
This patch sets the watermark level for SDP
output queues to send backpressure to NIX,
when available Rx buffers fall below watermark.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.c | 7 ++++++-
drivers/net/octeon_ep/otx_ep_common.h | 1 +
2 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index cadb4ecbf9..92c2d2ca5c 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -245,7 +245,12 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
- return 0;
+
+ /* Set Watermark for backpressure */
+ oct_ep_write64(OTX_EP_OQ_WMARK_MIN,
+ otx_ep->hw_addr + CNXK_EP_R_OUT_WMARK(oq_no));
+
+ return 0;
}
static int
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 0bf5454a39..81d6857c52 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -23,6 +23,7 @@
#define OTX_EP_MAX_OQ_DESCRIPTORS (8192)
#define OTX_EP_OQ_BUF_SIZE (2048)
#define OTX_EP_MIN_RX_BUF_SIZE (64)
+#define OTX_EP_OQ_WMARK_MIN (256)
#define OTX_EP_OQ_INFOPTR_MODE (0)
#define OTX_EP_OQ_REFIL_THRESHOLD (16)
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v2 10/10] net/octeon_ep: set secondary process dev ops
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
` (8 preceding siblings ...)
2023-04-05 14:25 ` [PATCH v2 09/10] net/octeon_ep: set watermark for output queues Sathesh Edara
@ 2023-04-05 14:25 ` Sathesh Edara
2023-04-21 5:03 ` Jerin Jacob
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
10 siblings, 1 reply; 50+ messages in thread
From: Sathesh Edara @ 2023-04-05 14:25 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla,
Veerasenareddy Burru, Anatoly Burakov
Cc: dev
This patch sets the dev ops and transmit/receive
callbacks for secondary process.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx_ep_ethdev.c | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 885fbb475f..a9868909f8 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -527,9 +527,17 @@ otx_ep_dev_stats_get(struct rte_eth_dev *eth_dev,
static int
otx_ep_dev_close(struct rte_eth_dev *eth_dev)
{
- struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
+ struct otx_ep_device *otx_epvf;
uint32_t num_queues, q_no;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ return 0;
+ }
+
+ otx_epvf = OTX_EP_DEV(eth_dev);
otx_ep_mbox_send_dev_exit(eth_dev);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
num_queues = otx_epvf->nb_rx_queues;
@@ -593,8 +601,12 @@ static const struct eth_dev_ops otx_ep_eth_dev_ops = {
static int
otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
{
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
return 0;
+ }
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
@@ -642,8 +654,12 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
struct rte_ether_addr vf_mac_addr;
/* Single process support */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev->dev_ops = &otx_ep_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
+ eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
return 0;
+ }
rte_eth_copy_pci_info(eth_dev, pdev);
otx_epvf->eth_dev = eth_dev;
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* Re: [PATCH v2 10/10] net/octeon_ep: set secondary process dev ops
2023-04-05 14:25 ` [PATCH v2 10/10] net/octeon_ep: set secondary process dev ops Sathesh Edara
@ 2023-04-21 5:03 ` Jerin Jacob
0 siblings, 0 replies; 50+ messages in thread
From: Jerin Jacob @ 2023-04-21 5:03 UTC (permalink / raw)
To: Sathesh Edara
Cc: sburla, jerinj, Radha Mohan Chintakuntla, Veerasenareddy Burru,
Anatoly Burakov, dev
On Wed, Apr 5, 2023 at 7:57 PM Sathesh Edara <sedara@marvell.com> wrote:
>
> This patch sets the dev ops and transmit/receive
> callbacks for secondary process.
Change the message as "fix ..." and fixes: tag if it just bug fixes.
BTW, "Multiprocess aware" is missing in doc/guides/nics/features/octeon_ep.ini
>
> Signed-off-by: Sathesh Edara <sedara@marvell.com>
> ---
> drivers/net/octeon_ep/otx_ep_ethdev.c | 22 +++++++++++++++++++---
> 1 file changed, 19 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
> index 885fbb475f..a9868909f8 100644
> --- a/drivers/net/octeon_ep/otx_ep_ethdev.c
> +++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
> @@ -527,9 +527,17 @@ otx_ep_dev_stats_get(struct rte_eth_dev *eth_dev,
> static int
> otx_ep_dev_close(struct rte_eth_dev *eth_dev)
> {
> - struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
> + struct otx_ep_device *otx_epvf;
> uint32_t num_queues, q_no;
>
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
> + eth_dev->dev_ops = NULL;
> + eth_dev->rx_pkt_burst = NULL;
> + eth_dev->tx_pkt_burst = NULL;
> + return 0;
> + }
> +
> + otx_epvf = OTX_EP_DEV(eth_dev);
> otx_ep_mbox_send_dev_exit(eth_dev);
> otx_epvf->fn_list.disable_io_queues(otx_epvf);
> num_queues = otx_epvf->nb_rx_queues;
> @@ -593,8 +601,12 @@ static const struct eth_dev_ops otx_ep_eth_dev_ops = {
> static int
> otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
> {
> - if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
> + eth_dev->dev_ops = NULL;
> + eth_dev->rx_pkt_burst = NULL;
> + eth_dev->tx_pkt_burst = NULL;
> return 0;
> + }
>
> eth_dev->dev_ops = NULL;
> eth_dev->rx_pkt_burst = NULL;
> @@ -642,8 +654,12 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
> struct rte_ether_addr vf_mac_addr;
>
> /* Single process support */
> - if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
> + eth_dev->dev_ops = &otx_ep_eth_dev_ops;
> + eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
> + eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
> return 0;
> + }
>
> rte_eth_copy_pci_info(eth_dev, pdev);
> otx_epvf->eth_dev = eth_dev;
> --
> 2.31.1
>
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 00/11] extend octeon ep driver functionality
2023-04-05 14:25 ` [PATCH v2 00/10] extend octeon ep driver functionality Sathesh Edara
` (9 preceding siblings ...)
2023-04-05 14:25 ` [PATCH v2 10/10] net/octeon_ep: set secondary process dev ops Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
` (10 more replies)
10 siblings, 11 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara; +Cc: dev
This patch set adds the following support to
octeon_ep driver
- extends support for newer SoCs
- support new features like IQ/OQ reset, ISM,
mailbox between VF and PF and sets the watermark
level for output queues.
V3 changes:
- Addresses V2 review comments
Sathesh Edara (11):
net/octeon_ep: support cnf95n and cnf95o SoC
net/octeon_ep: support CNX10K series SoC
net/octeon_ep: support error propagation
net/octeon_ep: support IQ/OQ reset
devtools: add acronym in dictionary for commit checks
net/octeon_ep: support ISM
net/octeon_ep: flush pending DMA operations
net/octeon_ep: update queue size checks
net/octeon_ep: support mailbox between VF and PF
net/octeon_ep: set watermark for output queues
net/octeon_ep: set secondary process dev ops
devtools/words-case.txt | 1 +
doc/guides/nics/features/octeon_ep.ini | 2 +
drivers/net/octeon_ep/cnxk_ep_vf.c | 92 ++++++--
drivers/net/octeon_ep/cnxk_ep_vf.h | 29 ++-
drivers/net/octeon_ep/meson.build | 1 +
drivers/net/octeon_ep/otx2_ep_vf.c | 279 ++++++++++++++++++++--
drivers/net/octeon_ep/otx2_ep_vf.h | 77 +++++-
drivers/net/octeon_ep/otx_ep_common.h | 71 +++++-
drivers/net/octeon_ep/otx_ep_ethdev.c | 264 ++++++++++++++++++---
drivers/net/octeon_ep/otx_ep_mbox.c | 309 +++++++++++++++++++++++++
drivers/net/octeon_ep/otx_ep_mbox.h | 163 +++++++++++++
drivers/net/octeon_ep/otx_ep_rxtx.c | 111 +++++----
drivers/net/octeon_ep/otx_ep_rxtx.h | 4 +-
drivers/net/octeon_ep/otx_ep_vf.c | 71 ++++--
14 files changed, 1319 insertions(+), 155 deletions(-)
create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.c
create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.h
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 02/11] net/octeon_ep: support CNX10K series SoC Sathesh Edara
` (9 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds the required functionality in the Octeon endpoint
driver to support the cnf95n and cnf95o endpoint device.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx2_ep_vf.h | 2 ++
drivers/net/octeon_ep/otx_ep_ethdev.c | 13 +++++++++++--
2 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.h b/drivers/net/octeon_ep/otx2_ep_vf.h
index 757eeae9f0..8f00acd737 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.h
+++ b/drivers/net/octeon_ep/otx2_ep_vf.h
@@ -115,6 +115,8 @@
#define PCI_DEVID_CN9K_EP_NET_VF 0xB203 /* OCTEON 9 EP mode */
#define PCI_DEVID_CN98XX_EP_NET_VF 0xB103
+#define PCI_DEVID_CNF95N_EP_NET_VF 0xB403
+#define PCI_DEVID_CNF95O_EP_NET_VF 0xB603
int
otx2_ep_vf_setup_device(struct otx_ep_device *sdpvf);
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index f43db1e398..24f62c3e49 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -105,6 +105,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
break;
case PCI_DEVID_CN9K_EP_NET_VF:
case PCI_DEVID_CN98XX_EP_NET_VF:
+ case PCI_DEVID_CNF95N_EP_NET_VF:
+ case PCI_DEVID_CNF95O_EP_NET_VF:
otx_epvf->chip_id = dev_id;
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
@@ -144,7 +146,9 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
else if (otx_epvf->chip_id == PCI_DEVID_CNXK_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
@@ -494,7 +498,10 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->pdev = pdev;
otx_epdev_init(otx_epvf);
- if (pdev->id.device_id == PCI_DEVID_CN9K_EP_NET_VF)
+ if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
otx_epvf->pkind = SDP_OTX2_PKIND_FS0;
else
otx_epvf->pkind = SDP_PKIND;
@@ -524,6 +531,8 @@ static const struct rte_pci_id pci_id_otx_ep_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95N_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95O_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_EP_NET_VF) },
{ .vendor_id = 0, /* sentinel */ }
};
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 02/11] net/octeon_ep: support CNX10K series SoC
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 03/11] net/octeon_ep: support error propagation Sathesh Edara
` (8 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds the required functionality in the Octeon endpoint driver
to support the following CNX10K series endpoint devices.
- CN10KA
- CN10KB
- CNF10KA
- CNF10KB
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.h | 5 ++++-
drivers/net/octeon_ep/otx_ep_ethdev.c | 21 +++++++++++++++++----
2 files changed, 21 insertions(+), 5 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
index 7162461dd9..aaa5774552 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.h
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
@@ -134,7 +134,10 @@
#define CNXK_EP_R_OUT_CTL_ROR_P (1ULL << 24)
#define CNXK_EP_R_OUT_CTL_IMODE (1ULL << 23)
-#define PCI_DEVID_CNXK_EP_NET_VF 0xB903
+#define PCI_DEVID_CN10KA_EP_NET_VF 0xB903
+#define PCI_DEVID_CNF10KA_EP_NET_VF 0xBA03
+#define PCI_DEVID_CNF10KB_EP_NET_VF 0xBC03
+#define PCI_DEVID_CN10KB_EP_NET_VF 0xBD03
int
cnxk_ep_vf_setup_device(struct otx_ep_device *sdpvf);
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 24f62c3e49..b23d52ff84 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -111,7 +111,10 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
break;
- case PCI_DEVID_CNXK_EP_NET_VF:
+ case PCI_DEVID_CN10KA_EP_NET_VF:
+ case PCI_DEVID_CN10KB_EP_NET_VF:
+ case PCI_DEVID_CNF10KA_EP_NET_VF:
+ case PCI_DEVID_CNF10KB_EP_NET_VF:
otx_epvf->chip_id = dev_id;
ret = cnxk_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
@@ -150,7 +153,10 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
- else if (otx_epvf->chip_id == PCI_DEVID_CNXK_EP_NET_VF)
+ else if (otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
otx_epvf->max_rx_queues = ethdev_queues;
@@ -501,7 +507,11 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
otx_epvf->pkind = SDP_OTX2_PKIND_FS0;
else
otx_epvf->pkind = SDP_PKIND;
@@ -533,7 +543,10 @@ static const struct rte_pci_id pci_id_otx_ep_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95N_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF95O_EP_NET_VF) },
- { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KA_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF10KA_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNF10KB_EP_NET_VF) },
{ .vendor_id = 0, /* sentinel */ }
};
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 03/11] net/octeon_ep: support error propagation
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 01/11] net/octeon_ep: support cnf95n and cnf95o SoC Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 02/11] net/octeon_ep: support CNX10K series SoC Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 04/11] net/octeon_ep: support IQ/OQ reset Sathesh Edara
` (7 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds detection of loop limits being hit,
and propagate errors up the call chain
when this happens.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.c | 51 +++++++++++--------
drivers/net/octeon_ep/otx2_ep_vf.c | 49 ++++++++++--------
drivers/net/octeon_ep/otx_ep_common.h | 6 +--
drivers/net/octeon_ep/otx_ep_ethdev.c | 27 +++++++---
drivers/net/octeon_ep/otx_ep_rxtx.c | 51 +++++++++----------
drivers/net/octeon_ep/otx_ep_vf.c | 71 +++++++++++++++++++--------
6 files changed, 155 insertions(+), 100 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 3427fb213b..1a92887109 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -47,36 +47,43 @@ cnxk_ep_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
oct_ep_write64(reg_val, otx_ep->hw_addr + CNXK_EP_R_OUT_CONTROL(q_no));
}
-static void
+static int
cnxk_ep_vf_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
cnxk_ep_vf_setup_global_iq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
cnxk_ep_vf_setup_global_output_regs(struct otx_ep_device *otx_ep)
{
uint32_t q_no;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
cnxk_ep_vf_setup_global_oq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
cnxk_ep_vf_setup_device_regs(struct otx_ep_device *otx_ep)
{
- cnxk_ep_vf_setup_global_input_regs(otx_ep);
- cnxk_ep_vf_setup_global_output_regs(otx_ep);
+ int ret;
+
+ ret = cnxk_ep_vf_setup_global_input_regs(otx_ep);
+ if (ret)
+ return ret;
+ ret = cnxk_ep_vf_setup_global_output_regs(otx_ep);
+ return ret;
}
-static void
+static int
cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
volatile uint64_t reg_val = 0ull;
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(iq_no));
@@ -91,9 +98,9 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
} while ((!(reg_val & CNXK_EP_R_IN_CTL_IDLE)) && loop--);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("IDLE bit is not set\n");
- return;
+ return -EIO;
}
/* Write the start of the input queue's ring and its size */
@@ -115,9 +122,9 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
rte_delay_ms(1);
} while (reg_val != 0 && loop--);
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INST CNT REGISTER is not zero\n");
- return;
+ return -EIO;
}
/* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
@@ -125,14 +132,15 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + CNXK_EP_R_IN_INT_LEVELS(iq_no));
+ return 0;
}
-static void
+static int
cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
/* Wait on IDLE to set to 1, supposed to configure BADDR
@@ -145,9 +153,9 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("OUT CNT REGISTER value is zero\n");
- return;
+ return -EIO;
}
oct_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr + CNXK_EP_R_OUT_SLIST_BADDR(oq_no));
@@ -181,9 +189,9 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets credit register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_credit:%x", oq_no, rte_read32(droq->pkts_credit_reg));
@@ -201,18 +209,19 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets sent register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
+ return 0;
}
static int
cnxk_ep_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
uint64_t reg_val = 0ull;
/* Resetting doorbells during IQ enabling also to handle abrupt
@@ -225,7 +234,7 @@ cnxk_ep_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INSTR DBELL not coming back to 0\n");
return -EIO;
}
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 3c9a70157e..3ffc7275c7 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -49,32 +49,39 @@ otx2_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
}
-static void
+static int
otx2_vf_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_iq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
otx2_vf_setup_global_output_regs(struct otx_ep_device *otx_ep)
{
uint32_t q_no;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_oq_reg(otx_ep, q_no);
+ return 0;
}
-static void
+static int
otx2_vf_setup_device_regs(struct otx_ep_device *otx_ep)
{
- otx2_vf_setup_global_input_regs(otx_ep);
- otx2_vf_setup_global_output_regs(otx_ep);
+ int ret;
+
+ ret = otx2_vf_setup_global_input_regs(otx_ep);
+ if (ret)
+ return ret;
+ ret = otx2_vf_setup_global_output_regs(otx_ep);
+ return ret;
}
-static void
+static int
otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
@@ -92,9 +99,9 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
} while ((!(reg_val & SDP_VF_R_IN_CTL_IDLE)) && loop--);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("IDLE bit is not set\n");
- return;
+ return -EIO;
}
/* Write the start of the input queue's ring and its size */
@@ -115,9 +122,9 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
rte_write32(reg_val, iq->inst_cnt_reg);
} while (reg_val != 0 && loop--);
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INST CNT REGISTER is not zero\n");
- return;
+ return -EIO;
}
/* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
@@ -125,14 +132,15 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
+ return 0;
}
-static void
+static int
otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
/* Wait on IDLE to set to 1, supposed to configure BADDR
@@ -145,9 +153,9 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("OUT CNT REGISTER value is zero\n");
- return;
+ return -EIO;
}
oct_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_BADDR(oq_no));
@@ -181,9 +189,9 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets credit register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_credit:%x", oq_no, rte_read32(droq->pkts_credit_reg));
@@ -200,17 +208,18 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("Packets sent register value is not cleared\n");
- return;
+ return -EIO;
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
+ return 0;
}
static int
otx2_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t loop = SDP_VF_BUSY_LOOP_COUNT;
+ int loop = SDP_VF_BUSY_LOOP_COUNT;
uint64_t reg_val = 0ull;
/* Resetting doorbells during IQ enabling also to handle abrupt
@@ -223,7 +232,7 @@ otx2_vf_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
rte_delay_ms(1);
}
- if (!loop) {
+ if (loop < 0) {
otx_ep_err("INSTR DBELL not coming back to 0\n");
return -EIO;
}
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index e4c92270d4..479bb1a1a0 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -394,11 +394,11 @@ struct otx_ep_sriov_info {
/* Required functions for each VF device */
struct otx_ep_fn_list {
- void (*setup_iq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
+ int (*setup_iq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
- void (*setup_oq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
+ int (*setup_oq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
- void (*setup_device_regs)(struct otx_ep_device *otx_ep);
+ int (*setup_device_regs)(struct otx_ep_device *otx_ep);
int (*enable_io_queues)(struct otx_ep_device *otx_ep);
void (*disable_io_queues)(struct otx_ep_device *otx_ep);
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index b23d52ff84..5677a2d6a6 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -151,13 +151,17 @@ otx_epdev_init(struct otx_ep_device *otx_epvf)
else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF)
- otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
- else if (otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CNF95O_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF) {
otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
+ } else {
+ otx_ep_err("Invalid chip_id\n");
+ ret = -EINVAL;
+ goto setup_fail;
+ }
ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
otx_epvf->max_rx_queues = ethdev_queues;
otx_epvf->max_tx_queues = ethdev_queues;
@@ -489,6 +493,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ rte_eth_copy_pci_info(eth_dev, pdev);
otx_epvf->eth_dev = eth_dev;
otx_epvf->port_id = eth_dev->data->port_id;
eth_dev->dev_ops = &otx_ep_eth_dev_ops;
@@ -503,7 +508,8 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->hw_addr = pdev->mem_resource[0].addr;
otx_epvf->pdev = pdev;
- otx_epdev_init(otx_epvf);
+ if (otx_epdev_init(otx_epvf))
+ return -ENOMEM;
if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF95N_EP_NET_VF ||
@@ -511,11 +517,16 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->chip_id == PCI_DEVID_CN10KA_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CN10KB_EP_NET_VF ||
otx_epvf->chip_id == PCI_DEVID_CNF10KA_EP_NET_VF ||
- otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF)
+ otx_epvf->chip_id == PCI_DEVID_CNF10KB_EP_NET_VF) {
otx_epvf->pkind = SDP_OTX2_PKIND_FS0;
- else
+ otx_ep_info("using pkind %d\n", otx_epvf->pkind);
+ } else if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF) {
otx_epvf->pkind = SDP_PKIND;
- otx_ep_info("using pkind %d\n", otx_epvf->pkind);
+ otx_ep_info("Using pkind %d.\n", otx_epvf->pkind);
+ } else {
+ otx_ep_err("Invalid chip id\n");
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 6912ca2401..9712e6cce6 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -3,7 +3,7 @@
*/
#include <unistd.h>
-
+#include <assert.h>
#include <rte_eal.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
@@ -81,6 +81,7 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
const struct otx_ep_config *conf;
struct otx_ep_instr_queue *iq;
uint32_t q_size;
+ int ret;
conf = otx_ep->conf;
iq = otx_ep->instr_queue[iq_no];
@@ -140,7 +141,9 @@ otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
iq->iqcmd_64B = (conf->iq.instr_type == 64);
/* Set up IQ registers */
- otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
+ ret = otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
+ if (ret)
+ return ret;
return 0;
@@ -271,6 +274,7 @@ otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
uint32_t c_refill_threshold;
struct otx_ep_droq *droq;
uint32_t desc_ring_size;
+ int ret;
otx_ep_info("OQ[%d] Init start\n", q_no);
@@ -318,7 +322,9 @@ otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
droq->refill_threshold = c_refill_threshold;
/* Set up OQ registers */
- otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
+ ret = otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
+ if (ret)
+ return ret;
otx_ep->io_qmask.oq |= (1ull << q_no);
@@ -852,19 +858,15 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
* droq->pkts_pending);
*/
droq->stats.pkts_delayed_data++;
- while (retry && !info->length)
+ while (retry && !info->length) {
retry--;
+ rte_delay_us_block(50);
+ }
if (!retry && !info->length) {
otx_ep_err("OCTEON DROQ[%d]: read_idx: %d; Retry failed !!\n",
droq->q_no, droq->read_idx);
/* May be zero length packet; drop it */
- rte_pktmbuf_free(droq_pkt);
- droq->recv_buf_list[droq->read_idx] = NULL;
- droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
- droq->nb_desc);
- droq->stats.dropped_zlp++;
- droq->refill_count++;
- goto oq_read_fail;
+ assert(0);
}
}
if (next_fetch) {
@@ -938,6 +940,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
last_buf = droq_pkt;
} else {
otx_ep_err("no buf\n");
+ assert(0);
}
pkt_len += cpy_len;
@@ -953,16 +956,7 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
droq_pkt->l3_len = hdr_lens.l3_len;
droq_pkt->l4_len = hdr_lens.l4_len;
- if (droq_pkt->nb_segs > 1 &&
- !(otx_ep->rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) {
- rte_pktmbuf_free(droq_pkt);
- goto oq_read_fail;
- }
-
return droq_pkt;
-
-oq_read_fail:
- return NULL;
}
static inline uint32_t
@@ -992,6 +986,7 @@ otx_ep_recv_pkts(void *rx_queue,
struct rte_mbuf *oq_pkt;
uint32_t pkts = 0;
+ uint32_t valid_pkts = 0;
uint32_t new_pkts = 0;
int next_fetch;
@@ -1019,14 +1014,15 @@ otx_ep_recv_pkts(void *rx_queue,
"last_pkt_count %" PRIu64 "new_pkts %d.\n",
droq->pkts_pending, droq->last_pkt_count,
new_pkts);
- droq->pkts_pending -= pkts;
droq->stats.rx_err++;
- goto finish;
+ continue;
+ } else {
+ rx_pkts[valid_pkts] = oq_pkt;
+ valid_pkts++;
+ /* Stats */
+ droq->stats.pkts_received++;
+ droq->stats.bytes_received += oq_pkt->pkt_len;
}
- rx_pkts[pkts] = oq_pkt;
- /* Stats */
- droq->stats.pkts_received++;
- droq->stats.bytes_received += oq_pkt->pkt_len;
}
droq->pkts_pending -= pkts;
@@ -1053,6 +1049,5 @@ otx_ep_recv_pkts(void *rx_queue,
rte_write32(0, droq->pkts_credit_reg);
}
-finish:
- return pkts;
+ return valid_pkts;
}
diff --git a/drivers/net/octeon_ep/otx_ep_vf.c b/drivers/net/octeon_ep/otx_ep_vf.c
index 96366b2a7f..4f3538146b 100644
--- a/drivers/net/octeon_ep/otx_ep_vf.c
+++ b/drivers/net/octeon_ep/otx_ep_vf.c
@@ -12,10 +12,11 @@
#include "otx_ep_vf.h"
-static void
+static int
otx_ep_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
{
volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
/* Select ES, RO, NS, RDSIZE,DPTR Format#0 for IQs
* IS_64B is by default enabled.
@@ -33,8 +34,11 @@ otx_ep_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
do {
reg_val = rte_read64(otx_ep->hw_addr +
OTX_EP_R_IN_CONTROL(q_no));
- } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE));
+ } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE) && loop--);
+ if (loop < 0)
+ return -EIO;
}
+ return 0;
}
static void
@@ -60,13 +64,18 @@ otx_ep_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
otx_ep_write64(reg_val, otx_ep->hw_addr, OTX_EP_R_OUT_CONTROL(q_no));
}
-static void
+static int
otx_ep_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
+ int ret = 0;
- for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
- otx_ep_setup_global_iq_reg(otx_ep, q_no);
+ for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++) {
+ ret = otx_ep_setup_global_iq_reg(otx_ep, q_no);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static void
@@ -78,18 +87,24 @@ otx_ep_setup_global_output_regs(struct otx_ep_device *otx_ep)
otx_ep_setup_global_oq_reg(otx_ep, q_no);
}
-static void
+static int
otx_ep_setup_device_regs(struct otx_ep_device *otx_ep)
{
- otx_ep_setup_global_input_regs(otx_ep);
+ int ret;
+
+ ret = otx_ep_setup_global_input_regs(otx_ep);
+ if (ret)
+ return ret;
otx_ep_setup_global_output_regs(otx_ep);
+ return 0;
}
-static void
+static int
otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_IN_CONTROL(iq_no));
@@ -100,7 +115,9 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
do {
reg_val = rte_read64(otx_ep->hw_addr +
OTX_EP_R_IN_CONTROL(iq_no));
- } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE));
+ } while (!(reg_val & OTX_EP_R_IN_CTL_IDLE) && loop--);
+ if (loop < 0)
+ return -EIO;
}
/* Write the start of the input queue's ring and its size */
@@ -120,10 +137,13 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p\n",
iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+ loop = OTX_EP_BUSY_LOOP_COUNT;
do {
reg_val = rte_read32(iq->inst_cnt_reg);
rte_write32(reg_val, iq->inst_cnt_reg);
- } while (reg_val != 0);
+ } while ((reg_val != 0) && loop--);
+ if (loop < 0)
+ return -EIO;
/* IN INTR_THRESHOLD is set to max(FFFFFFFF) which disable the IN INTR
* to raise
@@ -133,13 +153,15 @@ otx_ep_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
otx_ep_write64(OTX_EP_CLEAR_IN_INT_LVLS, otx_ep->hw_addr,
OTX_EP_R_IN_INT_LEVELS(iq_no));
+ return 0;
}
-static void
+static int
otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
@@ -150,10 +172,12 @@ otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
reg_val = rte_read64(otx_ep->hw_addr + OTX_EP_R_OUT_CONTROL(oq_no));
- while (!(reg_val & OTX_EP_R_OUT_CTL_IDLE)) {
+ while (!(reg_val & OTX_EP_R_OUT_CTL_IDLE) && loop--) {
reg_val = rte_read64(otx_ep->hw_addr +
OTX_EP_R_OUT_CONTROL(oq_no));
}
+ if (loop < 0)
+ return -EIO;
otx_ep_write64(droq->desc_ring_dma, otx_ep->hw_addr,
OTX_EP_R_OUT_SLIST_BADDR(oq_no));
@@ -180,11 +204,14 @@ otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
OTX_EP_R_OUT_INT_LEVELS(oq_no));
/* Clear the OQ doorbell */
+ loop = OTX_EP_BUSY_LOOP_COUNT;
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
- while ((rte_read32(droq->pkts_credit_reg) != 0ull)) {
+ while ((rte_read32(droq->pkts_credit_reg) != 0ull) && loop--) {
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
rte_delay_ms(1);
}
+ if (loop < 0)
+ return -EIO;
otx_ep_dbg("OTX_EP_R[%d]_credit:%x\n", oq_no,
rte_read32(droq->pkts_credit_reg));
@@ -195,18 +222,22 @@ otx_ep_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
otx_ep_dbg("OTX_EP_R[%d]_sent: %x\n", oq_no,
rte_read32(droq->pkts_sent_reg));
- while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
+ loop = OTX_EP_BUSY_LOOP_COUNT;
+ while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
}
+ if (loop < 0)
+ return -EIO;
+ return 0;
}
static int
otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
- uint64_t reg_val = 0ull;
+ volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
/* Resetting doorbells during IQ enabling also to handle abrupt
* guest reboot. IQ reset does not clear the doorbells.
@@ -219,7 +250,7 @@ otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
rte_delay_ms(1);
}
- if (loop == 0) {
+ if (loop < 0) {
otx_ep_err("dbell reset failed\n");
return -EIO;
}
@@ -238,8 +269,8 @@ otx_ep_enable_iq(struct otx_ep_device *otx_ep, uint32_t q_no)
static int
otx_ep_enable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
{
- uint64_t reg_val = 0ull;
- uint64_t loop = OTX_EP_BUSY_LOOP_COUNT;
+ volatile uint64_t reg_val = 0ull;
+ int loop = OTX_EP_BUSY_LOOP_COUNT;
/* Resetting doorbells during IQ enabling also to handle abrupt
* guest reboot. IQ reset does not clear the doorbells.
@@ -250,7 +281,7 @@ otx_ep_enable_oq(struct otx_ep_device *otx_ep, uint32_t q_no)
OTX_EP_R_OUT_SLIST_DBELL(q_no))) != 0ull) && loop--) {
rte_delay_ms(1);
}
- if (loop == 0) {
+ if (loop < 0) {
otx_ep_err("dbell reset failed\n");
return -EIO;
}
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 04/11] net/octeon_ep: support IQ/OQ reset
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (2 preceding siblings ...)
2023-04-24 12:55 ` [PATCH v3 03/11] net/octeon_ep: support error propagation Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks Sathesh Edara
` (6 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds input and output queue reset functionality,
also receive queue interrupt enable and disable
functionality.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx2_ep_vf.c | 193 +++++++++++++++++++++++++-
drivers/net/octeon_ep/otx2_ep_vf.h | 61 ++++++--
drivers/net/octeon_ep/otx_ep_common.h | 5 +-
3 files changed, 244 insertions(+), 15 deletions(-)
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 3ffc7275c7..3e4895862b 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -9,6 +9,117 @@
#include "otx_ep_common.h"
#include "otx2_ep_vf.h"
+static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no);
+
+static int
+otx2_vf_reset_iq(struct otx_ep_device *otx_ep, int q_no)
+{
+ int loop = SDP_VF_BUSY_LOOP_COUNT;
+ volatile uint64_t d64 = 0ull;
+
+ /* There is no RST for a ring.
+ * Clear all registers one by one after disabling the ring
+ */
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_ENABLE(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_BADDR(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_RSIZE(q_no));
+
+ d64 = 0xFFFFFFFF; /* ~0ull */
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no));
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_IN_INSTR_DBELL(q_no));
+
+ while ((d64 != 0) && loop--) {
+ rte_delay_ms(1);
+ d64 = otx2_read64(otx_ep->hw_addr +
+ SDP_VF_R_IN_INSTR_DBELL(q_no));
+ }
+ if (loop < 0) {
+ otx_ep_err("%s: doorbell init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
+ do {
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_IN_CNTS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_CNTS(q_no));
+ rte_delay_ms(1);
+ } while ((d64 & ~SDP_VF_R_IN_CNTS_OUT_INT) != 0 && loop--);
+ if (loop < 0) {
+ otx_ep_err("%s: in_cnts init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ d64 = 0ull;
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_PKT_CNT(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_IN_BYTE_CNT(q_no));
+
+ return 0;
+}
+
+static int
+otx2_vf_reset_oq(struct otx_ep_device *otx_ep, int q_no)
+{
+ int loop = SDP_VF_BUSY_LOOP_COUNT;
+ volatile uint64_t d64 = 0ull;
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_ENABLE(q_no));
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_BADDR(q_no));
+
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_RSIZE(q_no));
+
+ d64 = 0xFFFFFFFF;
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no));
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_SLIST_DBELL(q_no));
+ while ((d64 != 0) && loop--) {
+ rte_delay_ms(1);
+ d64 = otx2_read64(otx_ep->hw_addr +
+ SDP_VF_R_OUT_SLIST_DBELL(q_no));
+ }
+ if (loop < 0) {
+ otx_ep_err("%s: doorbell init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ if (otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CNTS(q_no))
+ & SDP_VF_R_OUT_CNTS_OUT_INT) {
+ /*
+ * The OUT_INT bit is set. This interrupt must be enabled in
+ * order to clear the interrupt. Interrupts are disabled
+ * at the end of this function.
+ */
+ union out_int_lvl_t out_int_lvl;
+
+ out_int_lvl.d64 = otx2_read64(otx_ep->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.time_cnt_en = 1;
+ out_int_lvl.s.cnt = 0;
+ otx2_write64(out_int_lvl.d64, otx_ep->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ }
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
+ do {
+ d64 = otx2_read64(otx_ep->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ rte_delay_ms(1);
+ } while ((d64 & ~SDP_VF_R_OUT_CNTS_IN_INT) != 0 && loop--);
+ if (loop < 0) {
+ otx_ep_err("%s: out_cnts init retry limit exceeded.\n", __func__);
+ return -EIO;
+ }
+
+ d64 = 0ull;
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_INT_LEVELS(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_PKT_CNT(q_no));
+ otx2_write64(d64, otx_ep->hw_addr + SDP_VF_R_OUT_BYTE_CNT(q_no));
+
+ return 0;
+}
+
static void
otx2_vf_setup_global_iq_reg(struct otx_ep_device *otx_ep, int q_no)
{
@@ -49,24 +160,63 @@ otx2_vf_setup_global_oq_reg(struct otx_ep_device *otx_ep, int q_no)
oct_ep_write64(reg_val, otx_ep->hw_addr + SDP_VF_R_OUT_CONTROL(q_no));
}
+static int
+otx2_vf_reset_input_queues(struct otx_ep_device *otx_ep)
+{
+ uint32_t q_no = 0;
+ int ret = 0;
+
+ for (q_no = 0; q_no < otx_ep->sriov_info.rings_per_vf; q_no++) {
+ ret = otx2_vf_reset_iq(otx_ep, q_no);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+otx2_vf_reset_output_queues(struct otx_ep_device *otx_ep)
+{
+ uint64_t q_no = 0ull;
+ int ret = 0;
+
+ for (q_no = 0; q_no < otx_ep->sriov_info.rings_per_vf; q_no++) {
+ ret = otx2_vf_reset_oq(otx_ep, q_no);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static int
otx2_vf_setup_global_input_regs(struct otx_ep_device *otx_ep)
{
uint64_t q_no = 0ull;
+ int ret = 0;
+
+ ret = otx2_vf_reset_input_queues(otx_ep);
+ if (ret)
+ return ret;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_iq_reg(otx_ep, q_no);
- return 0;
+ return ret;
}
static int
otx2_vf_setup_global_output_regs(struct otx_ep_device *otx_ep)
{
uint32_t q_no;
+ int ret = 0;
+ ret = otx2_vf_reset_output_queues(otx_ep);
+ if (ret)
+ return ret;
for (q_no = 0; q_no < (otx_ep->sriov_info.rings_per_vf); q_no++)
otx2_vf_setup_global_oq_reg(otx_ep, q_no);
- return 0;
+ return ret;
}
static int
@@ -181,8 +331,8 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_write64(OTX_EP_CLEAR_SDP_OUT_PKT_CNT, (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_OUT_PKT_CNT(oq_no));
- loop = OTX_EP_BUSY_LOOP_COUNT;
/* Clear the OQ doorbell */
+ loop = OTX_EP_BUSY_LOOP_COUNT;
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
while ((rte_read32(droq->pkts_credit_reg) != 0ull) && loop--) {
rte_write32(OTX_EP_CLEAR_SLIST_DBELL, droq->pkts_credit_reg);
@@ -344,6 +494,40 @@ otx2_ep_get_defconf(struct otx_ep_device *otx_ep_dev __rte_unused)
return default_conf;
}
+static int otx2_vf_enable_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no)
+{
+ union out_int_lvl_t out_int_lvl;
+ union out_cnts_t out_cnts;
+
+ out_int_lvl.d64 = otx2_read64(otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.time_cnt_en = 1;
+ out_int_lvl.s.cnt = 0;
+ otx2_write64(out_int_lvl.d64, otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_cnts.d64 = 0;
+ out_cnts.s.resend = 1;
+ otx2_write64(out_cnts.d64, otx_epvf->hw_addr + SDP_VF_R_OUT_CNTS(q_no));
+ return 0;
+}
+
+static int otx2_vf_disable_rxq_intr(struct otx_ep_device *otx_epvf,
+ uint16_t q_no)
+{
+ union out_int_lvl_t out_int_lvl;
+
+ /* Disable the interrupt for this queue */
+ out_int_lvl.d64 = otx2_read64(otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+ out_int_lvl.s.time_cnt_en = 0;
+ out_int_lvl.s.cnt = 0;
+ otx2_write64(out_int_lvl.d64, otx_epvf->hw_addr +
+ SDP_VF_R_OUT_INT_LEVELS(q_no));
+
+ return 0;
+}
+
int
otx2_ep_vf_setup_device(struct otx_ep_device *otx_ep)
{
@@ -381,5 +565,8 @@ otx2_ep_vf_setup_device(struct otx_ep_device *otx_ep)
otx_ep->fn_list.enable_oq = otx2_vf_enable_oq;
otx_ep->fn_list.disable_oq = otx2_vf_disable_oq;
+ otx_ep->fn_list.enable_rxq_intr = otx2_vf_enable_rxq_intr;
+ otx_ep->fn_list.disable_rxq_intr = otx2_vf_disable_rxq_intr;
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.h b/drivers/net/octeon_ep/otx2_ep_vf.h
index 8f00acd737..36c0b25dea 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.h
+++ b/drivers/net/octeon_ep/otx2_ep_vf.h
@@ -14,17 +14,20 @@
#define SDP_VF_BUSY_LOOP_COUNT (10000)
/* SDP VF OQ Masks */
-#define SDP_VF_R_OUT_CTL_IDLE (1ull << 40)
-#define SDP_VF_R_OUT_CTL_ES_I (1ull << 34)
-#define SDP_VF_R_OUT_CTL_NSR_I (1ull << 33)
-#define SDP_VF_R_OUT_CTL_ROR_I (1ull << 32)
-#define SDP_VF_R_OUT_CTL_ES_D (1ull << 30)
-#define SDP_VF_R_OUT_CTL_NSR_D (1ull << 29)
-#define SDP_VF_R_OUT_CTL_ROR_D (1ull << 28)
-#define SDP_VF_R_OUT_CTL_ES_P (1ull << 26)
-#define SDP_VF_R_OUT_CTL_NSR_P (1ull << 25)
-#define SDP_VF_R_OUT_CTL_ROR_P (1ull << 24)
-#define SDP_VF_R_OUT_CTL_IMODE (1ull << 23)
+#define SDP_VF_R_OUT_CTL_IDLE (0x1ull << 40)
+#define SDP_VF_R_OUT_CTL_ES_I (0x1ull << 34)
+#define SDP_VF_R_OUT_CTL_NSR_I (0x1ull << 33)
+#define SDP_VF_R_OUT_CTL_ROR_I (0x1ull << 32)
+#define SDP_VF_R_OUT_CTL_ES_D (0x1ull << 30)
+#define SDP_VF_R_OUT_CTL_NSR_D (0x1ull << 29)
+#define SDP_VF_R_OUT_CTL_ROR_D (0x1ull << 28)
+#define SDP_VF_R_OUT_CTL_ES_P (0x1ull << 26)
+#define SDP_VF_R_OUT_CTL_NSR_P (0x1ull << 25)
+#define SDP_VF_R_OUT_CTL_ROR_P (0x1ull << 24)
+#define SDP_VF_R_OUT_CTL_IMODE (0x1ull << 23)
+#define SDP_VF_R_OUT_CNTS_OUT_INT (0x1ull << 62)
+#define SDP_VF_R_OUT_CNTS_IN_INT (0x1ull << 61)
+#define SDP_VF_R_IN_CNTS_OUT_INT (0x1ull << 62)
/* SDP VF Register definitions */
#define SDP_VF_RING_OFFSET (0x1ull << 17)
@@ -140,4 +143,40 @@ struct otx2_ep_instr_64B {
uint64_t exhdr[4];
};
+union out_int_lvl_t {
+ uint64_t d64;
+ struct {
+ uint64_t cnt:32;
+ uint64_t timet:22;
+ uint64_t max_len:7;
+ uint64_t max_len_en:1;
+ uint64_t time_cnt_en:1;
+ uint64_t bmode:1;
+ } s;
+};
+
+union out_cnts_t {
+ uint64_t d64;
+ struct {
+ uint64_t cnt:32;
+ uint64_t timer:22;
+ uint64_t rsvd:5;
+ uint64_t resend:1;
+ uint64_t mbox_int:1;
+ uint64_t in_int:1;
+ uint64_t out_int:1;
+ uint64_t send_ism:1;
+ } s;
+};
+
+#define OTX2_EP_64B_INSTR_SIZE (sizeof(otx2_ep_instr_64B))
+
+#define NIX_MAX_HW_FRS 9212
+#define NIX_MAX_VTAG_INS 2
+#define NIX_MAX_VTAG_ACT_SIZE (4 * NIX_MAX_VTAG_INS)
+#define NIX_MAX_FRS \
+ (NIX_MAX_HW_FRS + RTE_ETHER_CRC_LEN - NIX_MAX_VTAG_ACT_SIZE)
+
+#define CN93XX_INTR_R_OUT_INT (1ULL << 62)
+#define CN93XX_INTR_R_IN_INT (1ULL << 61)
#endif /*_OTX2_EP_VF_H_ */
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 479bb1a1a0..a3260d5243 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -408,6 +408,9 @@ struct otx_ep_fn_list {
int (*enable_oq)(struct otx_ep_device *otx_ep, uint32_t q_no);
void (*disable_oq)(struct otx_ep_device *otx_ep, uint32_t q_no);
+
+ int (*enable_rxq_intr)(struct otx_ep_device *otx_epvf, uint16_t q_no);
+ int (*disable_rxq_intr)(struct otx_ep_device *otx_epvf, uint16_t q_no);
};
/* OTX_EP EP VF device data structure */
@@ -498,7 +501,7 @@ struct otx_ep_buf_free_info {
struct otx_ep_gather g;
};
-#define OTX_EP_MAX_PKT_SZ 64000U
+#define OTX_EP_MAX_PKT_SZ 65498U
#define OTX_EP_MAX_MAC_ADDRS 1
#define OTX_EP_SG_ALIGN 8
#define OTX_EP_CLEAR_ISIZE_BSIZE 0x7FFFFFULL
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (3 preceding siblings ...)
2023-04-24 12:55 ` [PATCH v3 04/11] net/octeon_ep: support IQ/OQ reset Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
2023-05-03 7:16 ` Jerin Jacob
2023-04-24 12:55 ` [PATCH v3 06/11] net/octeon_ep: support ISM Sathesh Edara
` (5 subsequent siblings)
10 siblings, 1 reply; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara, Thomas Monjalon; +Cc: dev
ISM -> Interrupt Status Messages
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
devtools/words-case.txt | 1 +
1 file changed, 1 insertion(+)
diff --git a/devtools/words-case.txt b/devtools/words-case.txt
index 53e029a958..3a7af902bd 100644
--- a/devtools/words-case.txt
+++ b/devtools/words-case.txt
@@ -35,6 +35,7 @@ IP
IPsec
IPv4
IPv6
+ISM
L2
L3
L4
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* Re: [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks
2023-04-24 12:55 ` [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks Sathesh Edara
@ 2023-05-03 7:16 ` Jerin Jacob
0 siblings, 0 replies; 50+ messages in thread
From: Jerin Jacob @ 2023-05-03 7:16 UTC (permalink / raw)
To: Sathesh Edara; +Cc: sburla, jerinj, Thomas Monjalon, dev
On Mon, Apr 24, 2023 at 8:07 PM Sathesh Edara <sedara@marvell.com> wrote:
>
> ISM -> Interrupt Status Messages
>
> Signed-off-by: Sathesh Edara <sedara@marvell.com>
> ---
> devtools/words-case.txt | 1 +
Squashed this patch with 6/11.
Series applied to dpdk-next-net-mrvl/for-next-net. Thanks
> 1 file changed, 1 insertion(+)
>
> diff --git a/devtools/words-case.txt b/devtools/words-case.txt
> index 53e029a958..3a7af902bd 100644
> --- a/devtools/words-case.txt
> +++ b/devtools/words-case.txt
> @@ -35,6 +35,7 @@ IP
> IPsec
> IPv4
> IPv6
> +ISM
> L2
> L3
> L4
> --
> 2.31.1
>
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 06/11] net/octeon_ep: support ISM
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (4 preceding siblings ...)
2023-04-24 12:55 ` [PATCH v3 05/11] devtools: add acronym in dictionary for commit checks Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 07/11] net/octeon_ep: flush pending DMA operations Sathesh Edara
` (4 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds the ISM specific functionality.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.c | 35 +++++++++++++++--
drivers/net/octeon_ep/cnxk_ep_vf.h | 12 ++++++
drivers/net/octeon_ep/otx2_ep_vf.c | 45 ++++++++++++++++++---
drivers/net/octeon_ep/otx2_ep_vf.h | 14 +++++++
drivers/net/octeon_ep/otx_ep_common.h | 16 ++++++++
drivers/net/octeon_ep/otx_ep_ethdev.c | 36 +++++++++++++++++
drivers/net/octeon_ep/otx_ep_rxtx.c | 56 +++++++++++++++++++++------
7 files changed, 194 insertions(+), 20 deletions(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index 1a92887109..a437ae68cb 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -2,11 +2,12 @@
* Copyright(C) 2022 Marvell.
*/
+#include <inttypes.h>
#include <errno.h>
#include <rte_common.h>
#include <rte_cycles.h>
-
+#include <rte_memzone.h>
#include "cnxk_ep_vf.h"
static void
@@ -85,6 +86,7 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
int loop = OTX_EP_BUSY_LOOP_COUNT;
volatile uint64_t reg_val = 0ull;
+ uint64_t ism_addr;
reg_val = oct_ep_read64(otx_ep->hw_addr + CNXK_EP_R_IN_CONTROL(iq_no));
@@ -132,6 +134,19 @@ cnxk_ep_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + CNXK_EP_R_IN_INT_LEVELS(iq_no));
+ /* Set up IQ ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | CNXK_EP_ISM_EN
+ | CNXK_EP_ISM_MSIX_DIS)
+ + CNXK_EP_IQ_ISM_OFFSET(iq_no);
+ rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_IN_CNTS_ISM(iq_no));
+ iq->inst_cnt_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + CNXK_EP_IQ_ISM_OFFSET(iq_no));
+ otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%" PRIX64, iq_no,
+ (void *)iq->inst_cnt_ism, ism_addr);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
return 0;
}
@@ -142,6 +157,7 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
uint64_t oq_ctl = 0ull;
int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
+ uint64_t ism_addr;
/* Wait on IDLE to set to 1, supposed to configure BADDR
* as long as IDLE is 0
@@ -201,9 +217,22 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
- loop = OTX_EP_BUSY_LOOP_COUNT;
+ /* Set up ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | CNXK_EP_ISM_EN
+ | CNXK_EP_ISM_MSIX_DIS)
+ + CNXK_EP_OQ_ISM_OFFSET(oq_no);
+ rte_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_OUT_CNTS_ISM(oq_no));
+ droq->pkts_sent_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + CNXK_EP_OQ_ISM_OFFSET(oq_no));
+ otx_ep_err("SDP_R[%d] OQ ISM virt: %p dma: 0x%" PRIX64,
+ oq_no, (void *)droq->pkts_sent_ism, ism_addr);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
- while (((rte_read32(droq->pkts_sent_reg)) != 0ull)) {
+ loop = OTX_EP_BUSY_LOOP_COUNT;
+ while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
index aaa5774552..072b38ea15 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.h
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
@@ -27,6 +27,7 @@
#define CNXK_EP_R_IN_INT_LEVELS_START 0x10060
#define CNXK_EP_R_IN_PKT_CNT_START 0x10080
#define CNXK_EP_R_IN_BYTE_CNT_START 0x10090
+#define CNXK_EP_R_IN_CNTS_ISM_START 0x10520
#define CNXK_EP_R_IN_CONTROL(ring) \
(CNXK_EP_R_IN_CONTROL_START + ((ring) * CNXK_EP_RING_OFFSET))
@@ -55,6 +56,8 @@
#define CNXK_EP_R_IN_BYTE_CNT(ring) \
(CNXK_EP_R_IN_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
+#define CNXK_EP_R_IN_CNTS_ISM(ring) \
+ (CNXK_EP_R_IN_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
/** Rings per Virtual Function **/
#define CNXK_EP_R_IN_CTL_RPVF_MASK (0xF)
@@ -87,6 +90,7 @@
#define CNXK_EP_R_OUT_ENABLE_START 0x10170
#define CNXK_EP_R_OUT_PKT_CNT_START 0x10180
#define CNXK_EP_R_OUT_BYTE_CNT_START 0x10190
+#define CNXK_EP_R_OUT_CNTS_ISM_START 0x10510
#define CNXK_EP_R_OUT_CNTS(ring) \
(CNXK_EP_R_OUT_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
@@ -118,6 +122,9 @@
#define CNXK_EP_R_OUT_BYTE_CNT(ring) \
(CNXK_EP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_EP_RING_OFFSET))
+#define CNXK_EP_R_OUT_CNTS_ISM(ring) \
+ (CNXK_EP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
+
/*------------------ R_OUT Masks ----------------*/
#define CNXK_EP_R_OUT_INT_LEVELS_BMODE (1ULL << 63)
#define CNXK_EP_R_OUT_INT_LEVELS_TIMET (32)
@@ -161,4 +168,9 @@ struct cnxk_ep_instr_64B {
uint64_t exhdr[4];
};
+#define CNXK_EP_IQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue) + 4)
+#define CNXK_EP_OQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue))
+#define CNXK_EP_ISM_EN (0x1)
+#define CNXK_EP_ISM_MSIX_DIS (0x2)
+
#endif /*_CNXK_EP_VF_H_ */
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.c b/drivers/net/octeon_ep/otx2_ep_vf.c
index 3e4895862b..ced3a415a5 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.c
+++ b/drivers/net/octeon_ep/otx2_ep_vf.c
@@ -6,6 +6,7 @@
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_memzone.h>
#include "otx_ep_common.h"
#include "otx2_ep_vf.h"
@@ -236,6 +237,7 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
{
struct otx_ep_instr_queue *iq = otx_ep->instr_queue[iq_no];
volatile uint64_t reg_val = 0ull;
+ uint64_t ism_addr;
int loop = SDP_VF_BUSY_LOOP_COUNT;
reg_val = oct_ep_read64(otx_ep->hw_addr + SDP_VF_R_IN_CONTROL(iq_no));
@@ -282,6 +284,22 @@ otx2_vf_setup_iq_regs(struct otx_ep_device *otx_ep, uint32_t iq_no)
*/
oct_ep_write64(OTX_EP_CLEAR_SDP_IN_INT_LVLS,
otx_ep->hw_addr + SDP_VF_R_IN_INT_LEVELS(iq_no));
+
+ /* Set up IQ ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | OTX2_EP_ISM_EN
+ | OTX2_EP_ISM_MSIX_DIS)
+ + OTX2_EP_IQ_ISM_OFFSET(iq_no);
+ oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ SDP_VF_R_IN_CNTS_ISM(iq_no));
+ iq->inst_cnt_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + OTX2_EP_IQ_ISM_OFFSET(iq_no));
+ otx_ep_err("SDP_R[%d] INST Q ISM virt: %p, dma: 0x%x", iq_no,
+ (void *)iq->inst_cnt_ism,
+ (unsigned int)ism_addr);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
+
return 0;
}
@@ -290,6 +308,7 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
{
volatile uint64_t reg_val = 0ull;
uint64_t oq_ctl = 0ull;
+ uint64_t ism_addr;
int loop = OTX_EP_BUSY_LOOP_COUNT;
struct otx_ep_droq *droq = otx_ep->droq[oq_no];
@@ -351,18 +370,32 @@ otx2_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
- loop = OTX_EP_BUSY_LOOP_COUNT;
+ /* Set up ISM registers and structures */
+ ism_addr = (otx_ep->ism_buffer_mz->iova | OTX2_EP_ISM_EN
+ | OTX2_EP_ISM_MSIX_DIS)
+ + OTX2_EP_OQ_ISM_OFFSET(oq_no);
+ oct_ep_write64(ism_addr, (uint8_t *)otx_ep->hw_addr +
+ SDP_VF_R_OUT_CNTS_ISM(oq_no));
+ droq->pkts_sent_ism =
+ (uint32_t *)((uint8_t *)otx_ep->ism_buffer_mz->addr
+ + OTX2_EP_OQ_ISM_OFFSET(oq_no));
+ otx_ep_err("SDP_R[%d] OQ ISM virt: %p, dma: 0x%x", oq_no,
+ (void *)droq->pkts_sent_ism,
+ (unsigned int)ism_addr);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
+
+ loop = SDP_VF_BUSY_LOOP_COUNT;
while (((rte_read32(droq->pkts_sent_reg)) != 0ull) && loop--) {
reg_val = rte_read32(droq->pkts_sent_reg);
rte_write32((uint32_t)reg_val, droq->pkts_sent_reg);
rte_delay_ms(1);
}
-
- if (loop < 0) {
- otx_ep_err("Packets sent register value is not cleared\n");
+ if (loop < 0)
return -EIO;
- }
- otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
+ otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no,
+ rte_read32(droq->pkts_sent_reg));
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx2_ep_vf.h b/drivers/net/octeon_ep/otx2_ep_vf.h
index 36c0b25dea..7c799475ab 100644
--- a/drivers/net/octeon_ep/otx2_ep_vf.h
+++ b/drivers/net/octeon_ep/otx2_ep_vf.h
@@ -42,6 +42,7 @@
#define SDP_VF_R_IN_INT_LEVELS_START (0x10060)
#define SDP_VF_R_IN_PKT_CNT_START (0x10080)
#define SDP_VF_R_IN_BYTE_CNT_START (0x10090)
+#define SDP_VF_R_IN_CNTS_ISM_START (0x10520)
#define SDP_VF_R_IN_CONTROL(ring) \
(SDP_VF_R_IN_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
@@ -70,6 +71,9 @@
#define SDP_VF_R_IN_BYTE_CNT(ring) \
(SDP_VF_R_IN_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+#define SDP_VF_R_IN_CNTS_ISM(ring) \
+ (SDP_VF_R_IN_CNTS_ISM_START + (SDP_VF_RING_OFFSET * (ring)))
+
/* SDP VF OQ Registers */
#define SDP_VF_R_OUT_CNTS_START (0x10100)
#define SDP_VF_R_OUT_INT_LEVELS_START (0x10110)
@@ -80,6 +84,7 @@
#define SDP_VF_R_OUT_ENABLE_START (0x10160)
#define SDP_VF_R_OUT_PKT_CNT_START (0x10180)
#define SDP_VF_R_OUT_BYTE_CNT_START (0x10190)
+#define SDP_VF_R_OUT_CNTS_ISM_START (0x10510)
#define SDP_VF_R_OUT_CONTROL(ring) \
(SDP_VF_R_OUT_CONTROL_START + ((ring) * SDP_VF_RING_OFFSET))
@@ -108,6 +113,9 @@
#define SDP_VF_R_OUT_BYTE_CNT(ring) \
(SDP_VF_R_OUT_BYTE_CNT_START + ((ring) * SDP_VF_RING_OFFSET))
+#define SDP_VF_R_OUT_CNTS_ISM(ring) \
+ (SDP_VF_R_OUT_CNTS_ISM_START + (SDP_VF_RING_OFFSET * (ring)))
+
/* SDP VF IQ Masks */
#define SDP_VF_R_IN_CTL_RPVF_MASK (0xF)
#define SDP_VF_R_IN_CTL_RPVF_POS (48)
@@ -143,6 +151,12 @@ struct otx2_ep_instr_64B {
uint64_t exhdr[4];
};
+#define OTX2_EP_IQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue) + 4)
+#define OTX2_EP_OQ_ISM_OFFSET(queue) (RTE_CACHE_LINE_SIZE * (queue))
+#define OTX2_EP_ISM_EN (0x1)
+#define OTX2_EP_ISM_MSIX_DIS (0x2)
+#define OTX2_EP_MAX_RX_PKT_LEN (16384)
+
union out_int_lvl_t {
uint64_t d64;
struct {
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index a3260d5243..76528ed49d 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -185,6 +185,9 @@ struct otx_ep_instr_queue {
*/
uint32_t flush_index;
+ /* Free-running/wrapping instruction counter for IQ. */
+ uint32_t inst_cnt;
+
/* This keeps track of the instructions pending in this queue. */
uint64_t instr_pending;
@@ -211,6 +214,12 @@ struct otx_ep_instr_queue {
/* Memory zone */
const struct rte_memzone *iq_mz;
+
+ /* Location in memory updated by SDP ISM */
+ uint32_t *inst_cnt_ism;
+
+ /* track inst count locally to consolidate HW counter updates */
+ uint32_t inst_cnt_ism_prev;
};
/** Descriptor format.
@@ -355,6 +364,10 @@ struct otx_ep_droq {
const struct rte_memzone *desc_ring_mz;
const struct rte_memzone *info_mz;
+
+ /* Pointer to host memory copy of output packet count, set by ISM */
+ uint32_t *pkts_sent_ism;
+ uint32_t pkts_sent_ism_prev;
};
#define OTX_EP_DROQ_SIZE (sizeof(struct otx_ep_droq))
@@ -459,6 +472,9 @@ struct otx_ep_device {
uint64_t rx_offloads;
uint64_t tx_offloads;
+
+ /* DMA buffer for SDP ISM messages */
+ const struct rte_memzone *ism_buffer_mz;
};
int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 5677a2d6a6..30a7a450fb 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -2,6 +2,7 @@
* Copyright(C) 2021 Marvell.
*/
+#include <inttypes.h>
#include <ethdev_pci.h>
#include "otx_ep_common.h"
@@ -90,6 +91,32 @@ otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
return 0;
}
+/*
+ * We only need 2 uint32_t locations per IOQ, but separate these so
+ * each IOQ has the variables on its own cache line.
+ */
+#define OTX_EP_ISM_BUFFER_SIZE (OTX_EP_MAX_IOQS_PER_VF * RTE_CACHE_LINE_SIZE)
+static int
+otx_ep_ism_setup(struct otx_ep_device *otx_epvf)
+{
+ otx_epvf->ism_buffer_mz =
+ rte_eth_dma_zone_reserve(otx_epvf->eth_dev, "ism",
+ 0, OTX_EP_ISM_BUFFER_SIZE,
+ OTX_EP_PCI_RING_ALIGN, 0);
+
+ /* Same DMA buffer is shared by OQ and IQ, clear it at start */
+ memset(otx_epvf->ism_buffer_mz->addr, 0, OTX_EP_ISM_BUFFER_SIZE);
+ if (otx_epvf->ism_buffer_mz == NULL) {
+ otx_ep_err("Failed to allocate ISM buffer\n");
+ return(-1);
+ }
+ otx_ep_dbg("ISM: virt: 0x%p, dma: 0x%" PRIX64,
+ (void *)otx_epvf->ism_buffer_mz->addr,
+ otx_epvf->ism_buffer_mz->iova);
+
+ return 0;
+}
+
static int
otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
{
@@ -110,6 +137,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
otx_epvf->chip_id = dev_id;
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
+ if (otx_ep_ism_setup(otx_epvf))
+ ret = -EINVAL;
break;
case PCI_DEVID_CN10KA_EP_NET_VF:
case PCI_DEVID_CN10KB_EP_NET_VF:
@@ -118,6 +147,8 @@ otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
otx_epvf->chip_id = dev_id;
ret = cnxk_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
+ if (otx_ep_ism_setup(otx_epvf))
+ ret = -EINVAL;
break;
default:
otx_ep_err("Unsupported device\n");
@@ -434,6 +465,11 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
}
otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
+ if (rte_eth_dma_zone_free(eth_dev, "ism", 0)) {
+ otx_ep_err("Failed to delete ISM buffer\n");
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index 9712e6cce6..c4153bd583 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -20,6 +20,7 @@
#define OTX_EP_INFO_SIZE 8
#define OTX_EP_FSZ_FS0 0
#define DROQ_REFILL_THRESHOLD 16
+#define OTX2_SDP_REQUEST_ISM (0x1ULL << 63)
static void
otx_ep_dmazone_free(const struct rte_memzone *mz)
@@ -412,15 +413,32 @@ otx_ep_iqreq_add(struct otx_ep_instr_queue *iq, void *buf,
static uint32_t
otx_vf_update_read_index(struct otx_ep_instr_queue *iq)
{
- uint32_t new_idx = rte_read32(iq->inst_cnt_reg);
- if (unlikely(new_idx == 0xFFFFFFFFU))
- rte_write32(new_idx, iq->inst_cnt_reg);
+ uint32_t val;
+
+ /*
+ * Batch subtractions from the HW counter to reduce PCIe traffic
+ * This adds an extra local variable, but almost halves the
+ * number of PCIe writes.
+ */
+ val = *iq->inst_cnt_ism;
+ iq->inst_cnt += val - iq->inst_cnt_ism_prev;
+ iq->inst_cnt_ism_prev = val;
+
+ if (val > (uint32_t)(1 << 31)) {
+ /*
+ * Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write32(val, iq->inst_cnt_reg);
+ *iq->inst_cnt_ism = 0;
+ iq->inst_cnt_ism_prev = 0;
+ }
+ rte_write64(OTX2_SDP_REQUEST_ISM, iq->inst_cnt_reg);
+
/* Modulo of the new index with the IQ size will give us
* the new index.
*/
- new_idx &= (iq->nb_desc - 1);
-
- return new_idx;
+ return iq->inst_cnt & (iq->nb_desc - 1);
}
static void
@@ -962,14 +980,30 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
static inline uint32_t
otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
{
- volatile uint64_t pkt_count;
uint32_t new_pkts;
+ uint32_t val;
+
+ /*
+ * Batch subtractions from the HW counter to reduce PCIe traffic
+ * This adds an extra local variable, but almost halves the
+ * number of PCIe writes.
+ */
+ val = *droq->pkts_sent_ism;
+ new_pkts = val - droq->pkts_sent_ism_prev;
+ droq->pkts_sent_ism_prev = val;
- /* Latest available OQ packets */
- pkt_count = rte_read32(droq->pkts_sent_reg);
- rte_write32(pkt_count, droq->pkts_sent_reg);
- new_pkts = pkt_count;
+ if (val > (uint32_t)(1 << 31)) {
+ /*
+ * Only subtract the packet count in the HW counter
+ * when count above halfway to saturation.
+ */
+ rte_write32(val, droq->pkts_sent_reg);
+ *droq->pkts_sent_ism = 0;
+ droq->pkts_sent_ism_prev = 0;
+ }
+ rte_write64(OTX2_SDP_REQUEST_ISM, droq->pkts_sent_reg);
droq->pkts_pending += new_pkts;
+
return new_pkts;
}
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 07/11] net/octeon_ep: flush pending DMA operations
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (5 preceding siblings ...)
2023-04-24 12:55 ` [PATCH v3 06/11] net/octeon_ep: support ISM Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 08/11] net/octeon_ep: update queue size checks Sathesh Edara
` (3 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Flushes the pending DMA operations while reading
the packets by reading control and status register.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx_ep_common.h | 8 ++++++++
drivers/net/octeon_ep/otx_ep_rxtx.c | 4 ++++
2 files changed, 12 insertions(+)
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 76528ed49d..444136923f 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -345,6 +345,14 @@ struct otx_ep_droq {
*/
void *pkts_sent_reg;
+ /** Handle DMA incompletion during pkt reads.
+ * This variable is used to initiate a sent_reg_read
+ * that completes pending dma
+ * this variable is used as lvalue so compiler cannot optimize
+ * the reads.
+ */
+ uint32_t sent_reg_val;
+
/* Statistics for this DROQ. */
struct otx_ep_droq_stats stats;
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.c b/drivers/net/octeon_ep/otx_ep_rxtx.c
index c4153bd583..ca968f6fe7 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.c
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.c
@@ -917,6 +917,10 @@ otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
struct rte_mbuf *first_buf = NULL;
struct rte_mbuf *last_buf = NULL;
+ /* csr read helps to flush pending dma */
+ droq->sent_reg_val = rte_read32(droq->pkts_sent_reg);
+ rte_rmb();
+
while (pkt_len < total_pkt_len) {
int cpy_len = 0;
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 08/11] net/octeon_ep: update queue size checks
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (6 preceding siblings ...)
2023-04-24 12:55 ` [PATCH v3 07/11] net/octeon_ep: flush pending DMA operations Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 09/11] net/octeon_ep: support mailbox between VF and PF Sathesh Edara
` (2 subsequent siblings)
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Updates the output queue size checks to ensure
that queue is larger than backpressure watermark.
Add setting of default queue sizes to the minimum
so that applications like testpmd can be started
without explicit queue size arguments.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/otx_ep_common.h | 9 +++++++--
drivers/net/octeon_ep/otx_ep_ethdev.c | 12 ++++++++++--
drivers/net/octeon_ep/otx_ep_rxtx.h | 4 ++--
3 files changed, 19 insertions(+), 6 deletions(-)
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 444136923f..3582f3087b 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -11,8 +11,13 @@
#define OTX_EP_MAX_RINGS_PER_VF (8)
#define OTX_EP_CFG_IO_QUEUES OTX_EP_MAX_RINGS_PER_VF
#define OTX_EP_64BYTE_INSTR (64)
-#define OTX_EP_MIN_IQ_DESCRIPTORS (128)
-#define OTX_EP_MIN_OQ_DESCRIPTORS (128)
+/*
+ * Backpressure for SDP is configured on Octeon, and the minimum queue sizes
+ * must be much larger than the backpressure watermark configured in the Octeon
+ * SDP driver. IQ and OQ backpressure configurations are separate.
+ */
+#define OTX_EP_MIN_IQ_DESCRIPTORS (2048)
+#define OTX_EP_MIN_OQ_DESCRIPTORS (2048)
#define OTX_EP_MAX_IQ_DESCRIPTORS (8192)
#define OTX_EP_MAX_OQ_DESCRIPTORS (8192)
#define OTX_EP_OQ_BUF_SIZE (2048)
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 30a7a450fb..0f710b1ffa 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -48,6 +48,9 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
+ devinfo->default_rxportconf.ring_size = OTX_EP_MIN_OQ_DESCRIPTORS;
+ devinfo->default_txportconf.ring_size = OTX_EP_MIN_IQ_DESCRIPTORS;
+
return 0;
}
@@ -274,8 +277,8 @@ otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
return -EINVAL;
}
if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
- otx_ep_err("Invalid rx desc number should at least be greater than 8xwmark %u\n",
- num_rx_descs);
+ otx_ep_err("Invalid rx desc number(%u) should at least be greater than 8xwmark %u\n",
+ num_rx_descs, (SDP_GBL_WMARK * 8));
return -EINVAL;
}
@@ -357,6 +360,11 @@ otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
num_tx_descs);
return -EINVAL;
}
+ if (num_tx_descs < (SDP_GBL_WMARK * 8)) {
+ otx_ep_err("Invalid tx desc number(%u) should at least be greater than 8*wmark(%u)\n",
+ num_tx_descs, (SDP_GBL_WMARK * 8));
+ return -EINVAL;
+ }
retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
diff --git a/drivers/net/octeon_ep/otx_ep_rxtx.h b/drivers/net/octeon_ep/otx_ep_rxtx.h
index 1527d350b5..7012888100 100644
--- a/drivers/net/octeon_ep/otx_ep_rxtx.h
+++ b/drivers/net/octeon_ep/otx_ep_rxtx.h
@@ -7,8 +7,8 @@
#include <rte_byteorder.h>
-#define OTX_EP_RXD_ALIGN 1
-#define OTX_EP_TXD_ALIGN 1
+#define OTX_EP_RXD_ALIGN 2
+#define OTX_EP_TXD_ALIGN 2
#define OTX_EP_IQ_SEND_FAILED (-1)
#define OTX_EP_IQ_SEND_SUCCESS (0)
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 09/11] net/octeon_ep: support mailbox between VF and PF
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (7 preceding siblings ...)
2023-04-24 12:55 ` [PATCH v3 08/11] net/octeon_ep: update queue size checks Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 10/11] net/octeon_ep: set watermark for output queues Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 11/11] net/octeon_ep: set secondary process dev ops Sathesh Edara
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Adds the mailbox communication between VF and
PF and supports the following mailbox messages.
- Get and set MAC address
- Get link information
- Get stats
- Set and get MTU
- Send notification to PF
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
doc/guides/nics/features/octeon_ep.ini | 1 +
drivers/net/octeon_ep/cnxk_ep_vf.c | 1 +
drivers/net/octeon_ep/cnxk_ep_vf.h | 12 +-
drivers/net/octeon_ep/meson.build | 1 +
drivers/net/octeon_ep/otx_ep_common.h | 26 +++
drivers/net/octeon_ep/otx_ep_ethdev.c | 143 +++++++++++-
drivers/net/octeon_ep/otx_ep_mbox.c | 309 +++++++++++++++++++++++++
drivers/net/octeon_ep/otx_ep_mbox.h | 163 +++++++++++++
8 files changed, 643 insertions(+), 13 deletions(-)
create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.c
create mode 100644 drivers/net/octeon_ep/otx_ep_mbox.h
diff --git a/doc/guides/nics/features/octeon_ep.ini b/doc/guides/nics/features/octeon_ep.ini
index 305e219262..f3b821c89e 100644
--- a/doc/guides/nics/features/octeon_ep.ini
+++ b/doc/guides/nics/features/octeon_ep.ini
@@ -10,4 +10,5 @@ Linux = Y
x86-64 = Y
Basic stats = Y
Link status = Y
+MTU update = Y
Usage doc = Y
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index a437ae68cb..cadb4ecbf9 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -8,6 +8,7 @@
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_memzone.h>
+#include "otx_ep_common.h"
#include "cnxk_ep_vf.h"
static void
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.h b/drivers/net/octeon_ep/cnxk_ep_vf.h
index 072b38ea15..86277449ea 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.h
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.h
@@ -5,7 +5,7 @@
#define _CNXK_EP_VF_H_
#include <rte_io.h>
-#include "otx_ep_common.h"
+
#define CNXK_CONFIG_XPANSION_BAR 0x38
#define CNXK_CONFIG_PCIE_CAP 0x70
#define CNXK_CONFIG_PCIE_DEVCAP 0x74
@@ -92,6 +92,10 @@
#define CNXK_EP_R_OUT_BYTE_CNT_START 0x10190
#define CNXK_EP_R_OUT_CNTS_ISM_START 0x10510
+#define CNXK_EP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CNXK_EP_R_MBOX_VF_PF_DATA_START 0x10230
+#define CNXK_EP_R_MBOX_PF_VF_INT_START 0x10220
+
#define CNXK_EP_R_OUT_CNTS(ring) \
(CNXK_EP_R_OUT_CNTS_START + ((ring) * CNXK_EP_RING_OFFSET))
@@ -125,6 +129,12 @@
#define CNXK_EP_R_OUT_CNTS_ISM(ring) \
(CNXK_EP_R_OUT_CNTS_ISM_START + ((ring) * CNXK_EP_RING_OFFSET))
+#define CNXK_EP_R_MBOX_VF_PF_DATA(ring) \
+ (CNXK_EP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_EP_RING_OFFSET))
+
+#define CNXK_EP_R_MBOX_PF_VF_INT(ring) \
+ (CNXK_EP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_EP_RING_OFFSET))
+
/*------------------ R_OUT Masks ----------------*/
#define CNXK_EP_R_OUT_INT_LEVELS_BMODE (1ULL << 63)
#define CNXK_EP_R_OUT_INT_LEVELS_TIMET (32)
diff --git a/drivers/net/octeon_ep/meson.build b/drivers/net/octeon_ep/meson.build
index a267b60290..e698bf9792 100644
--- a/drivers/net/octeon_ep/meson.build
+++ b/drivers/net/octeon_ep/meson.build
@@ -8,4 +8,5 @@ sources = files(
'otx_ep_vf.c',
'otx2_ep_vf.c',
'cnxk_ep_vf.c',
+ 'otx_ep_mbox.c',
)
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index 3582f3087b..dadc8d1579 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -4,6 +4,7 @@
#ifndef _OTX_EP_COMMON_H_
#define _OTX_EP_COMMON_H_
+#include <rte_spinlock.h>
#define OTX_EP_NW_PKT_OP 0x1220
#define OTX_EP_NW_CMD_OP 0x1221
@@ -67,6 +68,9 @@
#define oct_ep_read64(addr) rte_read64_relaxed((void *)(addr))
#define oct_ep_write64(val, addr) rte_write64_relaxed((val), (void *)(addr))
+/* Mailbox maximum data size */
+#define MBOX_MAX_DATA_BUF_SIZE 320
+
/* Input Request Header format */
union otx_ep_instr_irh {
uint64_t u64;
@@ -488,6 +492,18 @@ struct otx_ep_device {
/* DMA buffer for SDP ISM messages */
const struct rte_memzone *ism_buffer_mz;
+
+ /* Mailbox lock */
+ rte_spinlock_t mbox_lock;
+
+ /* Mailbox data */
+ uint8_t mbox_data_buf[MBOX_MAX_DATA_BUF_SIZE];
+
+ /* Mailbox data index */
+ int32_t mbox_data_index;
+
+ /* Mailbox receive message length */
+ int32_t mbox_rcv_message_len;
};
int otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no,
@@ -541,6 +557,16 @@ struct otx_ep_buf_free_info {
#define OTX_EP_CLEAR_SLIST_DBELL 0xFFFFFFFF
#define OTX_EP_CLEAR_SDP_OUT_PKT_CNT 0xFFFFFFFFF
+/* Max overhead includes
+ * - Ethernet hdr
+ * - CRC
+ * - nested VLANs
+ * - octeon rx info
+ */
+#define OTX_EP_ETH_OVERHEAD \
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + \
+ (2 * RTE_VLAN_HLEN) + OTX_EP_DROQ_INFO_SIZE)
+
/* PCI IDs */
#define PCI_VENDOR_ID_CAVIUM 0x177D
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 0f710b1ffa..885fbb475f 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -10,6 +10,7 @@
#include "otx2_ep_vf.h"
#include "cnxk_ep_vf.h"
#include "otx_ep_rxtx.h"
+#include "otx_ep_mbox.h"
#define OTX_EP_DEV(_eth_dev) \
((struct otx_ep_device *)(_eth_dev)->data->dev_private)
@@ -31,15 +32,24 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *devinfo)
{
struct otx_ep_device *otx_epvf;
+ int max_rx_pktlen;
otx_epvf = OTX_EP_DEV(eth_dev);
+ max_rx_pktlen = otx_ep_mbox_get_max_pkt_len(eth_dev);
+ if (!max_rx_pktlen) {
+ otx_ep_err("Failed to get Max Rx packet length");
+ return -EINVAL;
+ }
+
devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
devinfo->max_rx_queues = otx_epvf->max_rx_queues;
devinfo->max_tx_queues = otx_epvf->max_tx_queues;
devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
- devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
+ devinfo->max_rx_pktlen = max_rx_pktlen;
+ devinfo->max_mtu = devinfo->max_rx_pktlen - OTX_EP_ETH_OVERHEAD;
+ devinfo->min_mtu = RTE_ETHER_MIN_LEN;
devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
@@ -54,6 +64,71 @@ otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
return 0;
}
+static int
+otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
+{
+ RTE_SET_USED(wait_to_complete);
+
+ if (!eth_dev->data->dev_started)
+ return 0;
+ struct rte_eth_link link;
+ int ret = 0;
+
+ memset(&link, 0, sizeof(link));
+ ret = otx_ep_mbox_get_link_info(eth_dev, &link);
+ if (ret)
+ return -EINVAL;
+ otx_ep_dbg("link status resp link %d duplex %d autoneg %d link_speed %d\n",
+ link.link_status, link.link_duplex, link.link_autoneg, link.link_speed);
+ return rte_eth_linkstatus_set(eth_dev, &link);
+}
+
+static int
+otx_ep_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct rte_eth_dev_info devinfo;
+ int32_t ret = 0;
+
+ if (otx_ep_dev_info_get(eth_dev, &devinfo)) {
+ otx_ep_err("Cannot set MTU to %u: failed to get device info", mtu);
+ return -EPERM;
+ }
+
+ /* Check if MTU is within the allowed range */
+ if (mtu < devinfo.min_mtu) {
+ otx_ep_err("Invalid MTU %u: lower than minimum MTU %u", mtu, devinfo.min_mtu);
+ return -EINVAL;
+ }
+
+ if (mtu > devinfo.max_mtu) {
+ otx_ep_err("Invalid MTU %u; higher than maximum MTU %u", mtu, devinfo.max_mtu);
+ return -EINVAL;
+ }
+
+ ret = otx_ep_mbox_set_mtu(eth_dev, mtu);
+ if (ret)
+ return -EINVAL;
+
+ otx_ep_dbg("MTU is set to %u", mtu);
+
+ return 0;
+}
+
+static int
+otx_ep_dev_set_default_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ int ret;
+
+ ret = otx_ep_mbox_set_mac_addr(eth_dev, mac_addr);
+ if (ret)
+ return -EINVAL;
+ otx_ep_dbg("Default MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
+ return 0;
+}
+
static int
otx_ep_dev_start(struct rte_eth_dev *eth_dev)
{
@@ -78,6 +153,7 @@ otx_ep_dev_start(struct rte_eth_dev *eth_dev)
rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
}
+ otx_ep_dev_link_update(eth_dev, 0);
otx_ep_info("dev started\n");
return 0;
@@ -454,6 +530,7 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
uint32_t num_queues, q_no;
+ otx_ep_mbox_send_dev_exit(eth_dev);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
num_queues = otx_epvf->nb_rx_queues;
for (q_no = 0; q_no < num_queues; q_no++) {
@@ -482,19 +559,17 @@ otx_ep_dev_close(struct rte_eth_dev *eth_dev)
}
static int
-otx_ep_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
+otx_ep_dev_get_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
{
- RTE_SET_USED(wait_to_complete);
-
- if (!eth_dev->data->dev_started)
- return 0;
- struct rte_eth_link link;
+ int ret;
- memset(&link, 0, sizeof(link));
- link.link_status = RTE_ETH_LINK_UP;
- link.link_speed = RTE_ETH_SPEED_NUM_10G;
- link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
- return rte_eth_linkstatus_set(eth_dev, &link);
+ ret = otx_ep_mbox_get_mac_addr(eth_dev, mac_addr);
+ if (ret)
+ return -EINVAL;
+ otx_ep_dbg("Get MAC address " RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ return 0;
}
/* Define our ethernet definitions */
@@ -511,6 +586,8 @@ static const struct eth_dev_ops otx_ep_eth_dev_ops = {
.stats_reset = otx_ep_dev_stats_reset,
.link_update = otx_ep_dev_link_update,
.dev_close = otx_ep_dev_close,
+ .mtu_set = otx_ep_dev_mtu_set,
+ .mac_addr_set = otx_ep_dev_set_default_mac_addr,
};
static int
@@ -526,6 +603,37 @@ otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+static int otx_ep_eth_dev_query_set_vf_mac(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ int ret_val;
+
+ memset(mac_addr, 0, sizeof(struct rte_ether_addr));
+ ret_val = otx_ep_dev_get_mac_addr(eth_dev, mac_addr);
+ if (!ret_val) {
+ if (!rte_is_valid_assigned_ether_addr(mac_addr)) {
+ otx_ep_dbg("PF doesn't have valid VF MAC addr" RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ rte_eth_random_addr(mac_addr->addr_bytes);
+ otx_ep_dbg("Setting Random MAC address" RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ ret_val = otx_ep_dev_set_default_mac_addr(eth_dev, mac_addr);
+ if (ret_val) {
+ otx_ep_err("Setting MAC address " RTE_ETHER_ADDR_PRT_FMT "fails\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ return ret_val;
+ }
+ }
+ otx_ep_dbg("Received valid MAC addr from PF" RTE_ETHER_ADDR_PRT_FMT "\n",
+ RTE_ETHER_ADDR_BYTES(mac_addr));
+ } else {
+ otx_ep_err("Getting MAC address from PF via Mbox fails with ret_val: %d\n",
+ ret_val);
+ return ret_val;
+ }
+ return 0;
+}
+
static int
otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
{
@@ -541,6 +649,7 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
otx_epvf->eth_dev = eth_dev;
otx_epvf->port_id = eth_dev->data->port_id;
eth_dev->dev_ops = &otx_ep_eth_dev_ops;
+ rte_spinlock_init(&otx_epvf->mbox_lock);
eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
otx_ep_err("MAC addresses memory allocation failed\n");
@@ -572,6 +681,16 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
return -EINVAL;
}
+ if (otx_ep_mbox_version_check(eth_dev))
+ return -EINVAL;
+
+ if (otx_ep_eth_dev_query_set_vf_mac(eth_dev,
+ (struct rte_ether_addr *)&vf_mac_addr)) {
+ otx_ep_err("set mac addr failed\n");
+ return -ENODEV;
+ }
+ rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
+
return 0;
}
diff --git a/drivers/net/octeon_ep/otx_ep_mbox.c b/drivers/net/octeon_ep/otx_ep_mbox.c
new file mode 100644
index 0000000000..1ad36e14c8
--- /dev/null
+++ b/drivers/net/octeon_ep/otx_ep_mbox.c
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <ethdev_pci.h>
+#include <rte_ether.h>
+#include <rte_kvargs.h>
+
+#include "otx_ep_common.h"
+#include "otx_ep_vf.h"
+#include "otx2_ep_vf.h"
+#include "cnxk_ep_vf.h"
+#include "otx_ep_mbox.h"
+
+static int
+__otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep,
+ union otx_ep_mbox_word cmd,
+ union otx_ep_mbox_word *rsp)
+{
+ volatile uint64_t reg_val = 0ull;
+ int count = 0;
+
+ cmd.s.type = OTX_EP_MBOX_TYPE_CMD;
+ otx2_write64(cmd.u64, otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0));
+
+ /* No response for notification messages */
+ if (!rsp)
+ return 0;
+
+ for (count = 0; count < OTX_EP_MBOX_TIMEOUT_MS; count++) {
+ rte_delay_ms(1);
+ reg_val = otx2_read64(otx_ep->hw_addr + CNXK_EP_R_MBOX_VF_PF_DATA(0));
+ if (reg_val != cmd.u64) {
+ rsp->u64 = reg_val;
+ break;
+ }
+ }
+ if (count == OTX_EP_MBOX_TIMEOUT_MS) {
+ otx_ep_err("mbox send Timeout count:%d\n", count);
+ return OTX_EP_MBOX_TIMEOUT_MS;
+ }
+ if (rsp->s.type != OTX_EP_MBOX_TYPE_RSP_ACK) {
+ otx_ep_err("mbox received NACK from PF\n");
+ return OTX_EP_MBOX_CMD_STATUS_NACK;
+ }
+
+ rsp->u64 = reg_val;
+ return 0;
+}
+
+static int
+otx_ep_send_mbox_cmd(struct otx_ep_device *otx_ep,
+ union otx_ep_mbox_word cmd,
+ union otx_ep_mbox_word *rsp)
+{
+ int ret;
+
+ rte_spinlock_lock(&otx_ep->mbox_lock);
+ ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, rsp);
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return ret;
+}
+
+static int
+otx_ep_mbox_bulk_read(struct otx_ep_device *otx_ep,
+ enum otx_ep_mbox_opcode opcode,
+ uint8_t *data, int32_t *size)
+{
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int read_cnt, i = 0, ret;
+ int data_len = 0, tmp_len = 0;
+
+ rte_spinlock_lock(&otx_ep->mbox_lock);
+ cmd.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 0;
+ /* Send cmd to read data from PF */
+ ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("mbox bulk read data request failed\n");
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return ret;
+ }
+ /* PF sends the data length of requested CMD
+ * in ACK
+ */
+ memcpy(&data_len, rsp.s_data.data, sizeof(data_len));
+ tmp_len = data_len;
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ while (data_len) {
+ ret = __otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("mbox bulk read data request failed\n");
+ otx_ep->mbox_data_index = 0;
+ memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE);
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return ret;
+ }
+ if (data_len > OTX_EP_MBOX_MAX_DATA_SIZE) {
+ data_len -= OTX_EP_MBOX_MAX_DATA_SIZE;
+ read_cnt = OTX_EP_MBOX_MAX_DATA_SIZE;
+ } else {
+ read_cnt = data_len;
+ data_len = 0;
+ }
+ for (i = 0; i < read_cnt; i++) {
+ otx_ep->mbox_data_buf[otx_ep->mbox_data_index] =
+ rsp.s_data.data[i];
+ otx_ep->mbox_data_index++;
+ }
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ }
+ memcpy(data, otx_ep->mbox_data_buf, tmp_len);
+ *size = tmp_len;
+ otx_ep->mbox_data_index = 0;
+ memset(otx_ep->mbox_data_buf, 0, OTX_EP_MBOX_MAX_DATA_BUF_SIZE);
+ rte_spinlock_unlock(&otx_ep->mbox_lock);
+ return 0;
+}
+
+int
+otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret = 0;
+
+ cmd.u64 = 0;
+ cmd.s_set_mtu.opcode = OTX_EP_MBOX_CMD_SET_MTU;
+ cmd.s_set_mtu.mtu = mtu;
+
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("set MTU failed\n");
+ return -EINVAL;
+ }
+ otx_ep_dbg("mtu set success mtu %u\n", mtu);
+
+ return 0;
+}
+
+int
+otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_SET_MAC_ADDR;
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
+ cmd.s_set_mac.mac_addr[i] = mac_addr->addr_bytes[i];
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("set MAC address failed\n");
+ return -EINVAL;
+ }
+ otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n",
+ __func__, RTE_ETHER_ADDR_BYTES(mac_addr));
+ rte_ether_addr_copy(mac_addr, eth_dev->data->mac_addrs);
+ return 0;
+}
+
+int
+otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OTX_EP_MBOX_CMD_GET_MAC_ADDR;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("get MAC address failed\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++)
+ mac_addr->addr_bytes[i] = rsp.s_set_mac.mac_addr[i];
+ otx_ep_dbg("%s VF MAC " RTE_ETHER_ADDR_PRT_FMT "\n",
+ __func__, RTE_ETHER_ADDR_BYTES(mac_addr));
+ return 0;
+}
+
+int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev,
+ uint8_t *oper_up)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OTX_EP_MBOX_CMD_GET_LINK_STATUS;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret) {
+ otx_ep_err("Get link status failed\n");
+ return -EINVAL;
+ }
+ *oper_up = rsp.s_link_status.status;
+ return 0;
+}
+
+int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev,
+ struct rte_eth_link *link)
+{
+ int32_t ret, size;
+ struct otx_ep_iface_link_info link_info;
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ memset(&link_info, 0, sizeof(struct otx_ep_iface_link_info));
+ ret = otx_ep_mbox_bulk_read(otx_ep, OTX_EP_MBOX_CMD_GET_LINK_INFO,
+ (uint8_t *)&link_info, (int32_t *)&size);
+ if (ret) {
+ otx_ep_err("Get link info failed\n");
+ return ret;
+ }
+ link->link_status = RTE_ETH_LINK_UP;
+ link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = (link_info.autoneg ==
+ OTX_EP_LINK_AUTONEG) ? RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED;
+
+ link->link_autoneg = link_info.autoneg;
+ link->link_speed = link_info.speed;
+ return 0;
+}
+
+void
+otx_ep_mbox_enable_interrupt(struct otx_ep_device *otx_ep)
+{
+ rte_write64(0x2, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_MBOX_PF_VF_INT(0));
+}
+
+void
+otx_ep_mbox_disable_interrupt(struct otx_ep_device *otx_ep)
+{
+ rte_write64(0x00, (uint8_t *)otx_ep->hw_addr +
+ CNXK_EP_R_MBOX_PF_VF_INT(0));
+}
+
+int
+otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret;
+
+ rsp.u64 = 0;
+ cmd.u64 = 0;
+ cmd.s_get_mtu.opcode = OTX_EP_MBOX_CMD_GET_MTU;
+
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (ret)
+ return ret;
+ return rsp.s_get_mtu.mtu;
+}
+
+int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ union otx_ep_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_version.opcode = OTX_EP_MBOX_CMD_VERSION;
+ cmd.s_version.version = OTX_EP_MBOX_VERSION;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, &rsp);
+ if (!ret)
+ return 0;
+ if (ret == OTX_EP_MBOX_CMD_STATUS_NACK) {
+ otx_ep_err("VF Mbox version:%u is not compatible with PF\n",
+ (uint32_t)cmd.s_version.version);
+ }
+ return ret;
+}
+
+int otx_ep_mbox_send_dev_exit(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_ep =
+ (struct otx_ep_device *)(eth_dev)->data->dev_private;
+ union otx_ep_mbox_word cmd;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_version.opcode = OTX_EP_MBOX_CMD_DEV_REMOVE;
+ ret = otx_ep_send_mbox_cmd(otx_ep, cmd, NULL);
+ return ret;
+}
diff --git a/drivers/net/octeon_ep/otx_ep_mbox.h b/drivers/net/octeon_ep/otx_ep_mbox.h
new file mode 100644
index 0000000000..9df3c53edd
--- /dev/null
+++ b/drivers/net/octeon_ep/otx_ep_mbox.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _OTX_EP_MBOX_H_
+#define _OTX_EP_MBOX_H_
+
+
+#define OTX_EP_MBOX_VERSION 1
+
+enum otx_ep_mbox_opcode {
+ OTX_EP_MBOX_CMD_VERSION,
+ OTX_EP_MBOX_CMD_SET_MTU,
+ OTX_EP_MBOX_CMD_SET_MAC_ADDR,
+ OTX_EP_MBOX_CMD_GET_MAC_ADDR,
+ OTX_EP_MBOX_CMD_GET_LINK_INFO,
+ OTX_EP_MBOX_CMD_GET_STATS,
+ OTX_EP_MBOX_CMD_SET_RX_STATE,
+ OTX_EP_MBOX_CMD_SET_LINK_STATUS,
+ OTX_EP_MBOX_CMD_GET_LINK_STATUS,
+ OTX_EP_MBOX_CMD_GET_MTU,
+ OTX_EP_MBOX_CMD_DEV_REMOVE,
+ OTX_EP_MBOX_CMD_LAST,
+};
+
+enum otx_ep_mbox_word_type {
+ OTX_EP_MBOX_TYPE_CMD,
+ OTX_EP_MBOX_TYPE_RSP_ACK,
+ OTX_EP_MBOX_TYPE_RSP_NACK,
+};
+
+enum otx_ep_mbox_cmd_status {
+ OTX_EP_MBOX_CMD_STATUS_NOT_SETUP = 1,
+ OTX_EP_MBOX_CMD_STATUS_TIMEDOUT = 2,
+ OTX_EP_MBOX_CMD_STATUS_NACK = 3,
+ OTX_EP_MBOX_CMD_STATUS_BUSY = 4
+};
+
+enum otx_ep_mbox_state {
+ OTX_EP_MBOX_STATE_IDLE = 0,
+ OTX_EP_MBOX_STATE_BUSY = 1,
+};
+
+enum otx_ep_link_status {
+ OTX_EP_LINK_STATUS_DOWN,
+ OTX_EP_LINK_STATUS_UP,
+};
+
+enum otx_ep_link_duplex {
+ OTX_EP_LINK_HALF_DUPLEX,
+ OTX_EP_LINK_FULL_DUPLEX,
+};
+
+enum otx_ep_link_autoneg {
+ OTX_EP_LINK_FIXED,
+ OTX_EP_LINK_AUTONEG,
+};
+
+#define OTX_EP_MBOX_TIMEOUT_MS 1200
+#define OTX_EP_MBOX_MAX_RETRIES 2
+#define OTX_EP_MBOX_MAX_DATA_SIZE 6
+#define OTX_EP_MBOX_MAX_DATA_BUF_SIZE 256
+#define OTX_EP_MBOX_MORE_FRAG_FLAG 1
+#define OTX_EP_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
+
+union otx_ep_mbox_word {
+ uint64_t u64;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t data:48;
+ } s;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t frag:1;
+ uint64_t rsvd:5;
+ uint8_t data[6];
+ } s_data;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t version:48;
+ } s_version;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint8_t mac_addr[6];
+ } s_set_mac;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t mtu:48;
+ } s_set_mtu;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t rsvd:6;
+ uint64_t mtu:48;
+ } s_get_mtu;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t state:1;
+ uint64_t rsvd:53;
+ } s_link_state;
+ struct {
+ uint64_t opcode:8;
+ uint64_t type:2;
+ uint64_t status:1;
+ uint64_t rsvd:53;
+ } s_link_status;
+} __rte_packed;
+
+/* Hardware interface link state information. */
+struct otx_ep_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ uint64_t supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ uint64_t advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ uint32_t speed;
+
+ /* MTU */
+ uint16_t mtu;
+
+ /* Autonegotiation state. */
+#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ uint8_t autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ uint8_t pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ uint8_t admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ uint8_t oper_up;
+};
+
+int otx_ep_mbox_set_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
+int otx_ep_mbox_set_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr);
+int otx_ep_mbox_get_mac_addr(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mac_addr);
+int otx_ep_mbox_get_link_status(struct rte_eth_dev *eth_dev,
+ uint8_t *oper_up);
+int otx_ep_mbox_get_link_info(struct rte_eth_dev *eth_dev, struct rte_eth_link *link);
+void otx_ep_mbox_enable_interrupt(struct otx_ep_device *otx_ep);
+void otx_ep_mbox_disable_interrupt(struct otx_ep_device *otx_ep);
+int otx_ep_mbox_get_max_pkt_len(struct rte_eth_dev *eth_dev);
+int otx_ep_mbox_version_check(struct rte_eth_dev *eth_dev);
+int otx_ep_mbox_send_dev_exit(struct rte_eth_dev *eth_dev);
+#endif
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 10/11] net/octeon_ep: set watermark for output queues
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (8 preceding siblings ...)
2023-04-24 12:55 ` [PATCH v3 09/11] net/octeon_ep: support mailbox between VF and PF Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
2023-04-24 12:55 ` [PATCH v3 11/11] net/octeon_ep: set secondary process dev ops Sathesh Edara
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla, Veerasenareddy Burru
Cc: dev
Sets the watermark level for SDP output queues
to send backpressure to NIX, when available Rx
buffers fall below watermark.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
drivers/net/octeon_ep/cnxk_ep_vf.c | 7 ++++++-
drivers/net/octeon_ep/otx_ep_common.h | 1 +
2 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/drivers/net/octeon_ep/cnxk_ep_vf.c b/drivers/net/octeon_ep/cnxk_ep_vf.c
index cadb4ecbf9..92c2d2ca5c 100644
--- a/drivers/net/octeon_ep/cnxk_ep_vf.c
+++ b/drivers/net/octeon_ep/cnxk_ep_vf.c
@@ -245,7 +245,12 @@ cnxk_ep_vf_setup_oq_regs(struct otx_ep_device *otx_ep, uint32_t oq_no)
}
otx_ep_dbg("SDP_R[%d]_sent: %x", oq_no, rte_read32(droq->pkts_sent_reg));
- return 0;
+
+ /* Set Watermark for backpressure */
+ oct_ep_write64(OTX_EP_OQ_WMARK_MIN,
+ otx_ep->hw_addr + CNXK_EP_R_OUT_WMARK(oq_no));
+
+ return 0;
}
static int
diff --git a/drivers/net/octeon_ep/otx_ep_common.h b/drivers/net/octeon_ep/otx_ep_common.h
index dadc8d1579..0305079af9 100644
--- a/drivers/net/octeon_ep/otx_ep_common.h
+++ b/drivers/net/octeon_ep/otx_ep_common.h
@@ -23,6 +23,7 @@
#define OTX_EP_MAX_OQ_DESCRIPTORS (8192)
#define OTX_EP_OQ_BUF_SIZE (2048)
#define OTX_EP_MIN_RX_BUF_SIZE (64)
+#define OTX_EP_OQ_WMARK_MIN (256)
#define OTX_EP_OQ_INFOPTR_MODE (0)
#define OTX_EP_OQ_REFIL_THRESHOLD (16)
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread
* [PATCH v3 11/11] net/octeon_ep: set secondary process dev ops
2023-04-24 12:55 ` [PATCH v3 00/11] extend octeon ep driver functionality Sathesh Edara
` (9 preceding siblings ...)
2023-04-24 12:55 ` [PATCH v3 10/11] net/octeon_ep: set watermark for output queues Sathesh Edara
@ 2023-04-24 12:55 ` Sathesh Edara
10 siblings, 0 replies; 50+ messages in thread
From: Sathesh Edara @ 2023-04-24 12:55 UTC (permalink / raw)
To: sburla, jerinj, sedara, Radha Mohan Chintakuntla,
Veerasenareddy Burru, Anatoly Burakov
Cc: dev
Sets the dev ops and transmit/receive callbacks
for secondary process.
Signed-off-by: Sathesh Edara <sedara@marvell.com>
---
doc/guides/nics/features/octeon_ep.ini | 1 +
drivers/net/octeon_ep/otx_ep_ethdev.c | 22 +++++++++++++++++++---
2 files changed, 20 insertions(+), 3 deletions(-)
diff --git a/doc/guides/nics/features/octeon_ep.ini b/doc/guides/nics/features/octeon_ep.ini
index f3b821c89e..d52491afa3 100644
--- a/doc/guides/nics/features/octeon_ep.ini
+++ b/doc/guides/nics/features/octeon_ep.ini
@@ -11,4 +11,5 @@ x86-64 = Y
Basic stats = Y
Link status = Y
MTU update = Y
+Multiprocess aware = Y
Usage doc = Y
diff --git a/drivers/net/octeon_ep/otx_ep_ethdev.c b/drivers/net/octeon_ep/otx_ep_ethdev.c
index 885fbb475f..a9868909f8 100644
--- a/drivers/net/octeon_ep/otx_ep_ethdev.c
+++ b/drivers/net/octeon_ep/otx_ep_ethdev.c
@@ -527,9 +527,17 @@ otx_ep_dev_stats_get(struct rte_eth_dev *eth_dev,
static int
otx_ep_dev_close(struct rte_eth_dev *eth_dev)
{
- struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
+ struct otx_ep_device *otx_epvf;
uint32_t num_queues, q_no;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ return 0;
+ }
+
+ otx_epvf = OTX_EP_DEV(eth_dev);
otx_ep_mbox_send_dev_exit(eth_dev);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
num_queues = otx_epvf->nb_rx_queues;
@@ -593,8 +601,12 @@ static const struct eth_dev_ops otx_ep_eth_dev_ops = {
static int
otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
{
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
return 0;
+ }
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
@@ -642,8 +654,12 @@ otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
struct rte_ether_addr vf_mac_addr;
/* Single process support */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ eth_dev->dev_ops = &otx_ep_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
+ eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
return 0;
+ }
rte_eth_copy_pci_info(eth_dev, pdev);
otx_epvf->eth_dev = eth_dev;
--
2.31.1
^ permalink raw reply [flat|nested] 50+ messages in thread