From: Ajit Khaparde <ajit.khaparde@broadcom.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com,
Kalesh AP <kalesh-anakkur.purayil@broadcom.com>,
Santoshkumar Karanappa Rastapur <santosh.rastapur@broadcom.com>,
Somnath Kotur <somnath.kotur@broadcom.com>
Subject: [dpdk-dev] [PATCH v3 02/15] net/bnxt: prevent device access when device is in reset
Date: Tue, 1 Oct 2019 18:23:22 -0700 [thread overview]
Message-ID: <20191002012335.85324-3-ajit.khaparde@broadcom.com> (raw)
In-Reply-To: <20191002012335.85324-1-ajit.khaparde@broadcom.com>
From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Refactor init and uninit functions so that the driver can fail
the eth_dev_ops callbacks and accessing Tx and Rx queues
when device is in reset or in error state.
Transmit and receive queues are freed during reset cleanup and
reallocated during recovery. So we block all data path handling
in this state. The eth_dev dev_started field is updated depending
on the status of the device.
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Santoshkumar Karanappa Rastapur <santosh.rastapur@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
drivers/net/bnxt/bnxt.h | 3 +
drivers/net/bnxt/bnxt_cpr.c | 3 +
drivers/net/bnxt/bnxt_ethdev.c | 461 ++++++++++++++++++++++-----------
drivers/net/bnxt/bnxt_hwrm.c | 2 -
drivers/net/bnxt/bnxt_ring.c | 32 +++
drivers/net/bnxt/bnxt_ring.h | 1 +
drivers/net/bnxt/bnxt_rxq.c | 25 ++
drivers/net/bnxt/bnxt_rxr.c | 17 ++
drivers/net/bnxt/bnxt_rxr.h | 2 +
drivers/net/bnxt/bnxt_stats.c | 36 ++-
drivers/net/bnxt/bnxt_txq.c | 7 +
drivers/net/bnxt/bnxt_txr.c | 27 ++
drivers/net/bnxt/bnxt_txr.h | 2 +
13 files changed, 465 insertions(+), 153 deletions(-)
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 0c9f994eaa..37b4c717d6 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -358,6 +358,8 @@ struct bnxt {
#define BNXT_FLAG_DFLT_VNIC_SET (1 << 12)
#define BNXT_FLAG_THOR_CHIP (1 << 13)
#define BNXT_FLAG_STINGRAY (1 << 14)
+#define BNXT_FLAG_FW_RESET (1 << 15)
+#define BNXT_FLAG_FATAL_ERROR (1 << 16)
#define BNXT_FLAG_EXT_STATS_SUPPORTED (1 << 29)
#define BNXT_FLAG_NEW_RM (1 << 30)
#define BNXT_FLAG_INIT_DONE (1U << 31)
@@ -465,6 +467,7 @@ struct bnxt {
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
+int is_bnxt_in_error(struct bnxt *bp);
bool is_bnxt_supported(struct rte_eth_dev *dev);
bool bnxt_stratus_device(struct bnxt *bp);
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 655bcf1a8d..bbcdb42f10 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -142,6 +142,9 @@ int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
return evt;
}
+ if (unlikely(is_bnxt_in_error(bp)))
+ return 0;
+
switch (CMP_TYPE(cmp)) {
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
/* Handle any async event */
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index eb8701131a..d90a6e4202 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -167,6 +167,16 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
+int is_bnxt_in_error(struct bnxt *bp)
+{
+ if (bp->flags & BNXT_FLAG_FATAL_ERROR)
+ return -EIO;
+ if (bp->flags & BNXT_FLAG_FW_RESET)
+ return -EBUSY;
+
+ return 0;
+}
+
/***********************/
/*
@@ -207,6 +217,10 @@ static int bnxt_alloc_mem(struct bnxt *bp)
{
int rc;
+ rc = bnxt_alloc_ring_grps(bp);
+ if (rc)
+ goto alloc_mem_err;
+
rc = bnxt_alloc_async_ring_struct(bp);
if (rc)
goto alloc_mem_err;
@@ -500,6 +514,11 @@ static int bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
struct bnxt *bp = eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
unsigned int max_rx_rings;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
/* MAC Specifics */
dev_info->max_mac_addrs = bp->max_l2_ctx;
@@ -604,6 +623,10 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
rc = bnxt_hwrm_check_vf_rings(bp);
if (rc) {
@@ -793,8 +816,10 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = bnxt_receive_function(eth_dev);
eth_dev->tx_pkt_burst = bnxt_transmit_function(eth_dev);
+
bnxt_enable_int(bp);
bp->flags |= BNXT_FLAG_INIT_DONE;
+ eth_dev->data->dev_started = 1;
bp->dev_stopped = 0;
return 0;
@@ -837,6 +862,11 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ eth_dev->data->dev_started = 0;
+ /* Prevent crashes when queues are still in use */
+ eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
+ eth_dev->tx_pkt_burst = &bnxt_dummy_xmit_pkts;
+
bnxt_disable_int(bp);
/* disable uio/vfio intr/eventfd mapping */
@@ -891,6 +921,9 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
struct bnxt_filter_info *filter, *temp_filter;
uint32_t i;
+ if (is_bnxt_in_error(bp))
+ return;
+
/*
* Loop through all VNICs from the specified filter flow pools to
* remove the corresponding MAC addr filter
@@ -926,6 +959,10 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
struct bnxt_filter_info *filter;
int rc = 0;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
return -ENOTSUP;
@@ -971,6 +1008,10 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
struct rte_eth_link new;
unsigned int cnt = BNXT_LINK_WAIT_CNT;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
memset(&new, 0, sizeof(new));
do {
/* Retrieve link info from hardware */
@@ -1013,6 +1054,10 @@ static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
uint32_t old_flags;
int rc;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (bp->vnic_info == NULL)
return 0;
@@ -1034,6 +1079,10 @@ static int bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev)
uint32_t old_flags;
int rc;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (bp->vnic_info == NULL)
return 0;
@@ -1055,6 +1104,10 @@ static int bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev)
uint32_t old_flags;
int rc;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (bp->vnic_info == NULL)
return 0;
@@ -1076,6 +1129,10 @@ static int bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev)
uint32_t old_flags;
int rc;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (bp->vnic_info == NULL)
return 0;
@@ -1130,7 +1187,11 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
uint16_t idx, sft;
- int i;
+ int i, rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
if (!vnic->rss_table)
return -EINVAL;
@@ -1186,6 +1247,11 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
uint16_t tbl_size = bnxt_rss_hash_tbl_size(bp);
uint16_t idx, sft, i;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
/* Retrieve from the default VNIC */
if (!vnic)
@@ -1232,6 +1298,11 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
struct bnxt_vnic_info *vnic;
uint16_t hash_type = 0;
unsigned int i;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
/*
* If RSS enablement were different than dev_configure,
@@ -1285,9 +1356,13 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
{
struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
- int len;
+ int len, rc;
uint32_t hash_types;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
/* RSS configuration is the same for all VNICs */
if (vnic && vnic->rss_hash_key) {
if (rss_conf->rss_key) {
@@ -1345,6 +1420,10 @@ static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev,
struct rte_eth_link link_info;
int rc;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
rc = bnxt_get_hwrm_link_config(bp, &link_info);
if (rc)
return rc;
@@ -1374,6 +1453,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf)
{
struct bnxt *bp = dev->data->dev_private;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
@@ -1433,6 +1517,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
uint16_t tunnel_type = 0;
int rc = 0;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
@@ -1482,6 +1570,10 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
uint16_t port = 0;
int rc = 0;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
@@ -1635,6 +1727,11 @@ static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev,
uint16_t vlan_id, int on)
{
struct bnxt *bp = eth_dev->data->dev_private;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
/* These operations apply to ALL existing MAC/VLAN filters */
if (on)
@@ -1649,6 +1746,11 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
struct bnxt *bp = dev->data->dev_private;
uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
unsigned int i;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
if (mask & ETH_VLAN_FILTER_MASK) {
if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
@@ -1690,6 +1792,10 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev,
struct bnxt_filter_info *filter;
int rc;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
return -EPERM;
@@ -1729,6 +1835,11 @@ bnxt_dev_set_mc_addr_list_op(struct rte_eth_dev *eth_dev,
char *mc_addr_list = (char *)mc_addr_set;
struct bnxt_vnic_info *vnic;
uint32_t off = 0, i = 0;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
vnic = &bp->vnic_info[0];
@@ -1814,6 +1925,10 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
uint32_t rc = 0;
uint32_t i;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
VLAN_TAG_SIZE * BNXT_NUM_VLANS;
@@ -1891,6 +2006,10 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
uint16_t vlan = bp->vlan;
int rc;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
PMD_DRV_LOG(ERR,
"PVID cannot be modified for this function\n");
@@ -1908,6 +2027,11 @@ static int
bnxt_dev_led_on_op(struct rte_eth_dev *dev)
{
struct bnxt *bp = dev->data->dev_private;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
return bnxt_hwrm_port_led_cfg(bp, true);
}
@@ -1916,6 +2040,11 @@ static int
bnxt_dev_led_off_op(struct rte_eth_dev *dev)
{
struct bnxt *bp = dev->data->dev_private;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
return bnxt_hwrm_port_led_cfg(bp, false);
}
@@ -1923,6 +2052,7 @@ bnxt_dev_led_off_op(struct rte_eth_dev *dev)
static uint32_t
bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
uint32_t desc = 0, raw_cons = 0, cons;
struct bnxt_cp_ring_info *cpr;
struct bnxt_rx_queue *rxq;
@@ -1930,6 +2060,11 @@ bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id)
uint16_t cmp_type;
uint8_t cmp = 1;
bool valid;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
rxq = dev->data->rx_queues[rx_queue_id];
cpr = rxq->cp_ring;
@@ -1974,10 +2109,15 @@ bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset)
struct bnxt_sw_rx_bd *rx_buf;
struct rx_pkt_cmpl *rxcmp;
uint32_t cons, cp_cons;
+ int rc;
if (!rxq)
return -EINVAL;
+ rc = is_bnxt_in_error(rxq->bp);
+ if (rc)
+ return rc;
+
cpr = rxq->cp_ring;
rxr = rxq->rx_ring;
@@ -2012,10 +2152,15 @@ bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset)
struct bnxt_sw_tx_bd *tx_buf;
struct tx_pkt_cmpl *txcmp;
uint32_t cons, cp_cons;
+ int rc;
if (!txq)
return -EINVAL;
+ rc = is_bnxt_in_error(txq->bp);
+ if (rc)
+ return rc;
+
cpr = txq->cp_ring;
txr = txq->tx_ring;
@@ -2845,6 +2990,10 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
{
int ret = 0;
+ ret = is_bnxt_in_error(dev->data->dev_private);
+ if (ret)
+ return ret;
+
switch (filter_type) {
case RTE_ETH_FILTER_TUNNEL:
PMD_DRV_LOG(ERR,
@@ -3160,6 +3309,10 @@ bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
uint32_t dir_entries;
uint32_t entry_length;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);
@@ -3178,6 +3331,11 @@ bnxt_get_eeprom_op(struct rte_eth_dev *dev,
struct bnxt *bp = dev->data->dev_private;
uint32_t index;
uint32_t offset;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
"len = %d\n", bp->pdev->addr.domain,
@@ -3249,6 +3407,11 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
struct bnxt *bp = dev->data->dev_private;
uint8_t index, dir_op;
uint16_t type, ext, ordinal, attr;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
"len = %d\n", bp->pdev->addr.domain,
@@ -3802,19 +3965,139 @@ static int bnxt_setup_mac_addr(struct rte_eth_dev *eth_dev)
return rc;
}
+static void bnxt_config_vf_req_fwd(struct bnxt *bp)
+{
+ if (!BNXT_PF(bp))
+ return;
+
#define ALLOW_FUNC(x) \
{ \
uint32_t arg = (x); \
bp->pf.vf_req_fwd[((arg) >> 5)] &= \
~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
}
+
+ /* Forward all requests if firmware is new enough */
+ if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
+ (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
+ ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
+ memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
+ } else {
+ PMD_DRV_LOG(WARNING,
+ "Firmware too old for VF mailbox functionality\n");
+ memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
+ }
+
+ /*
+ * The following are used for driver cleanup. If we disallow these,
+ * VF drivers can't clean up cleanly.
+ */
+ ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
+ ALLOW_FUNC(HWRM_VNIC_FREE);
+ ALLOW_FUNC(HWRM_RING_FREE);
+ ALLOW_FUNC(HWRM_RING_GRP_FREE);
+ ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
+ ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
+ ALLOW_FUNC(HWRM_STAT_CTX_FREE);
+ ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
+ ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
+}
+
+static int bnxt_init_fw(struct bnxt *bp)
+{
+ uint16_t mtu;
+ int rc = 0;
+
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ return -EIO;
+
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc)
+ return rc;
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_func_qcfg(bp, &mtu);
+ if (rc)
+ return rc;
+
+ if (mtu >= RTE_ETHER_MIN_MTU && mtu <= BNXT_MAX_MTU &&
+ mtu != bp->eth_dev->data->mtu)
+ bp->eth_dev->data->mtu = mtu;
+
+ bnxt_hwrm_port_led_qcaps(bp);
+
+ return 0;
+}
+
+static int bnxt_init_resources(struct bnxt *bp)
+{
+ int rc;
+
+ rc = bnxt_init_fw(bp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_setup_mac_addr(bp->eth_dev);
+ if (rc)
+ return rc;
+
+ bnxt_config_vf_req_fwd(bp);
+
+ rc = bnxt_hwrm_func_driver_register(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to register driver");
+ return -EBUSY;
+ }
+
+ if (BNXT_PF(bp)) {
+ if (bp->pdev->max_vfs) {
+ rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
+ return rc;
+ }
+ } else {
+ rc = bnxt_hwrm_allocate_pf_only(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate PF resources");
+ return rc;
+ }
+ }
+ }
+
+ rc = bnxt_alloc_mem(bp);
+ if (rc)
+ return rc;
+
+ rc = bnxt_setup_int(bp);
+ if (rc)
+ return rc;
+
+ bnxt_init_nic(bp);
+
+ rc = bnxt_request_int(bp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
static int
bnxt_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
static int version_printed;
struct bnxt *bp;
- uint16_t mtu;
int rc;
if (version_printed++ == 0)
@@ -3856,166 +4139,50 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
rc = bnxt_init_board(eth_dev);
if (rc) {
PMD_DRV_LOG(ERR,
- "Board initialization failed rc: %x\n", rc);
- goto error;
+ "Failed to initialize board rc: %x\n", rc);
+ return rc;
}
rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
PMD_DRV_LOG(ERR,
- "hwrm resource allocation failure rc: %x\n", rc);
+ "Failed to allocate hwrm resource rc: %x\n", rc);
goto error_free;
}
- rc = bnxt_hwrm_ver_get(bp);
+ rc = bnxt_init_resources(bp);
if (rc)
goto error_free;
- rc = bnxt_hwrm_func_reset(bp);
- if (rc) {
- PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
- rc = -EIO;
- goto error_free;
- }
-
- rc = bnxt_hwrm_queue_qportcfg(bp);
- if (rc) {
- PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
- goto error_free;
- }
- /* Get the MAX capabilities for this function */
- rc = bnxt_hwrm_func_qcaps(bp);
- if (rc) {
- PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
- goto error_free;
- }
-
rc = bnxt_alloc_stats_mem(bp);
if (rc)
goto error_free;
- if (bp->max_tx_rings == 0) {
- PMD_DRV_LOG(ERR, "No TX rings available!\n");
- rc = -EBUSY;
- goto error_free;
- }
-
- rc = bnxt_setup_mac_addr(eth_dev);
- if (rc)
- goto error_free;
-
- /* THOR does not support ring groups.
- * But we will use the array to save RSS context IDs.
- */
- if (BNXT_CHIP_THOR(bp)) {
- bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
- } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
- /* 1 ring is for default completion ring */
- PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
- rc = -ENOSPC;
- goto error_free;
- }
-
- if (BNXT_HAS_RING_GRPS(bp)) {
- bp->grp_info = rte_zmalloc("bnxt_grp_info",
- sizeof(*bp->grp_info) *
- bp->max_ring_grps, 0);
- if (!bp->grp_info) {
- PMD_DRV_LOG(ERR,
- "Failed to alloc %zu bytes for grp info tbl.\n",
- sizeof(*bp->grp_info) * bp->max_ring_grps);
- rc = -ENOMEM;
- goto error_free;
- }
- }
-
- /* Forward all requests if firmware is new enough */
- if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
- (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
- ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
- memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
- } else {
- PMD_DRV_LOG(WARNING,
- "Firmware too old for VF mailbox functionality\n");
- memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
- }
-
- /*
- * The following are used for driver cleanup. If we disallow these,
- * VF drivers can't clean up cleanly.
- */
- ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
- ALLOW_FUNC(HWRM_VNIC_FREE);
- ALLOW_FUNC(HWRM_RING_FREE);
- ALLOW_FUNC(HWRM_RING_GRP_FREE);
- ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
- ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
- ALLOW_FUNC(HWRM_STAT_CTX_FREE);
- ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
- ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
- rc = bnxt_hwrm_func_driver_register(bp);
- if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to register driver");
- rc = -EBUSY;
- goto error_free;
- }
-
PMD_DRV_LOG(INFO,
- DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
- pci_dev->mem_resource[0].phys_addr,
- pci_dev->mem_resource[0].addr);
-
- rc = bnxt_hwrm_func_qcfg(bp, &mtu);
- if (rc) {
- PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
- goto error_free;
- }
-
- if (mtu >= RTE_ETHER_MIN_MTU && mtu <= BNXT_MAX_MTU &&
- mtu != eth_dev->data->mtu)
- eth_dev->data->mtu = mtu;
-
- if (BNXT_PF(bp)) {
- //if (bp->pf.active_vfs) {
- // TODO: Deallocate VF resources?
- //}
- if (bp->pdev->max_vfs) {
- rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
- if (rc) {
- PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
- goto error_free;
- }
- } else {
- rc = bnxt_hwrm_allocate_pf_only(bp);
- if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to allocate PF resources\n");
- goto error_free;
- }
- }
- }
-
- bnxt_hwrm_port_led_qcaps(bp);
-
- rc = bnxt_setup_int(bp);
- if (rc)
- goto error_free;
-
- rc = bnxt_alloc_mem(bp);
- if (rc)
- goto error_free;
-
- bnxt_init_nic(bp);
-
- rc = bnxt_request_int(bp);
- if (rc)
- goto error_free;
+ DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
+ pci_dev->mem_resource[0].phys_addr,
+ pci_dev->mem_resource[0].addr);
return 0;
error_free:
bnxt_dev_uninit(eth_dev);
-error:
+ return rc;
+}
+
+static int
+bnxt_uninit_resources(struct bnxt *bp)
+{
+ int rc;
+
+ bnxt_disable_int(bp);
+ bnxt_free_int(bp);
+ bnxt_free_mem(bp);
+ bnxt_hwrm_func_buf_unrgtr(bp);
+ rc = bnxt_hwrm_func_driver_unregister(bp, 0);
+ bp->flags &= ~BNXT_FLAG_REGISTERED;
+ bnxt_free_ctx_mem(bp);
+ bnxt_free_hwrm_resources(bp);
+
return rc;
}
@@ -4029,18 +4196,13 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
return -EPERM;
PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
- bnxt_disable_int(bp);
- bnxt_free_int(bp);
- bnxt_free_mem(bp);
- bnxt_hwrm_func_buf_unrgtr(bp);
+ rc = bnxt_uninit_resources(bp);
if (bp->grp_info != NULL) {
rte_free(bp->grp_info);
bp->grp_info = NULL;
}
- rc = bnxt_hwrm_func_driver_unregister(bp, 0);
- bnxt_free_hwrm_resources(bp);
if (bp->tx_mem_zone) {
rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
@@ -4056,7 +4218,6 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
bnxt_dev_close_op(eth_dev);
if (bp->pf.vf_info)
rte_free(bp->pf.vf_info);
- bnxt_free_ctx_mem(bp);
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 9883fb5063..24a5a09147 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -964,8 +964,6 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- bp->flags &= ~BNXT_FLAG_REGISTERED;
-
return rc;
}
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index be15b4bd14..f19865c832 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -50,6 +50,38 @@ int bnxt_init_ring_grps(struct bnxt *bp)
return 0;
}
+int bnxt_alloc_ring_grps(struct bnxt *bp)
+{
+ if (bp->max_tx_rings == 0) {
+ PMD_DRV_LOG(ERR, "No TX rings available!\n");
+ return -EBUSY;
+ }
+
+ /* THOR does not support ring groups.
+ * But we will use the array to save RSS context IDs.
+ */
+ if (BNXT_CHIP_THOR(bp)) {
+ bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_THOR;
+ } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
+ /* 1 ring is for default completion ring */
+ PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
+ return -ENOSPC;
+ }
+
+ if (BNXT_HAS_RING_GRPS(bp)) {
+ bp->grp_info = rte_zmalloc("bnxt_grp_info",
+ sizeof(*bp->grp_info) *
+ bp->max_ring_grps, 0);
+ if (!bp->grp_info) {
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc grp info tbl.\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
/*
* Allocates a completion ring with vmem and stats optionally also allocating
* a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index 04c7b04b82..a31d59ea39 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -67,6 +67,7 @@ struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
void bnxt_free_ring(struct bnxt_ring *ring);
int bnxt_init_ring_grps(struct bnxt *bp);
+int bnxt_alloc_ring_grps(struct bnxt *bp);
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
struct bnxt_tx_queue *txq,
struct bnxt_rx_queue *rxq,
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 1d95f11394..d5fc5268db 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -263,6 +263,9 @@ void bnxt_rx_queue_release_op(void *rx_queue)
struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
if (rxq) {
+ if (is_bnxt_in_error(rxq->bp))
+ return;
+
bnxt_rx_queue_release_mbufs(rxq);
/* Free RX ring hardware descriptors */
@@ -294,6 +297,10 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
int rc = 0;
uint8_t queue_state;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (queue_idx >= bp->max_rx_rings) {
PMD_DRV_LOG(ERR,
"Cannot create Rx ring %d. Only %d rings available\n",
@@ -363,10 +370,15 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
int
bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
{
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_rx_queue *rxq;
struct bnxt_cp_ring_info *cpr;
int rc = 0;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (eth_dev->data->rx_queues) {
rxq = eth_dev->data->rx_queues[queue_id];
if (!rxq) {
@@ -382,10 +394,15 @@ bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
int
bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
{
+ struct bnxt *bp = eth_dev->data->dev_private;
struct bnxt_rx_queue *rxq;
struct bnxt_cp_ring_info *cpr;
int rc = 0;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (eth_dev->data->rx_queues) {
rxq = eth_dev->data->rx_queues[queue_id];
if (!rxq) {
@@ -406,6 +423,10 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct bnxt_vnic_info *vnic = NULL;
int rc = 0;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (rxq == NULL) {
PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
return -EINVAL;
@@ -458,6 +479,10 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct bnxt_rx_queue *rxq = NULL;
int rc = 0;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
/* For the stingray platform and other platforms needing tighter
* control of resource utilization, Rx CQ 0 also works as
* Default CQ for async notifications
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 185a0e376b..12313dd53c 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -539,6 +539,9 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
int rc = 0;
bool evt = false;
+ if (unlikely(is_bnxt_in_error(rxq->bp)))
+ return 0;
+
/* If Rx Q was stopped return. RxQ0 cannot be stopped. */
if (unlikely(((rxq->rx_deferred_start ||
!rte_spinlock_trylock(&rxq->lock)) &&
@@ -625,6 +628,20 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx_pkts;
}
+/*
+ * Dummy DPDK callback for RX.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ */
+uint16_t
+bnxt_dummy_recv_pkts(void *rx_queue __rte_unused,
+ struct rte_mbuf **rx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
void bnxt_free_rx_rings(struct bnxt *bp)
{
int i;
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index 6a80c37c81..493b754066 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -185,6 +185,8 @@ struct bnxt_rx_ring_info {
uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t bnxt_dummy_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
void bnxt_free_rx_rings(struct bnxt *bp);
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 049ad9e398..21012e1fee 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -353,6 +353,10 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
struct bnxt *bp = eth_dev->data->dev_private;
unsigned int num_q_stats;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
memset(bnxt_stats, 0, sizeof(*bnxt_stats));
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
@@ -398,6 +402,10 @@ int bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
unsigned int i;
int ret;
+ ret = is_bnxt_in_error(bp);
+ if (ret)
+ return ret;
+
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
return -EINVAL;
@@ -417,13 +425,17 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
struct bnxt *bp = eth_dev->data->dev_private;
-
unsigned int count, i;
uint64_t tx_drop_pkts;
unsigned int rx_port_stats_ext_cnt;
unsigned int tx_port_stats_ext_cnt;
unsigned int stat_size = sizeof(uint64_t);
unsigned int stat_count;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
memset(xstats, 0, sizeof(*xstats));
@@ -502,7 +514,13 @@ int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev,
RTE_DIM(bnxt_tx_stats_strings) + 1 +
RTE_DIM(bnxt_rx_ext_stats_strings) +
RTE_DIM(bnxt_tx_ext_stats_strings);
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
unsigned int i, count;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
if (xstats_names != NULL) {
count = 0;
@@ -551,6 +569,10 @@ int bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
struct bnxt *bp = eth_dev->data->dev_private;
int ret;
+ ret = is_bnxt_in_error(bp);
+ if (ret)
+ return ret;
+
if (bp->flags & BNXT_FLAG_PORT_STATS && BNXT_SINGLE_PF(bp)) {
ret = bnxt_hwrm_port_clr_stats(bp);
if (ret != 0) {
@@ -586,9 +608,15 @@ int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
RTE_DIM(bnxt_tx_stats_strings) + 1 +
RTE_DIM(bnxt_rx_ext_stats_strings) +
RTE_DIM(bnxt_tx_ext_stats_strings);
+ struct bnxt *bp = dev->data->dev_private;
struct rte_eth_xstat xstats[stat_cnt];
uint64_t values_copy[stat_cnt];
uint16_t i;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
if (!ids)
return bnxt_dev_xstats_get_op(dev, xstats, stat_cnt);
@@ -614,7 +642,13 @@ int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,
RTE_DIM(bnxt_rx_ext_stats_strings) +
RTE_DIM(bnxt_tx_ext_stats_strings);
struct rte_eth_xstat_name xstats_names_copy[stat_cnt];
+ struct bnxt *bp = dev->data->dev_private;
uint16_t i;
+ int rc;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
if (!ids)
return bnxt_dev_xstats_get_names_op(dev, xstats_names,
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index 43b3496c1e..0901324793 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -58,6 +58,9 @@ void bnxt_tx_queue_release_op(void *tx_queue)
struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue;
if (txq) {
+ if (is_bnxt_in_error(txq->bp))
+ return;
+
/* Free TX ring hardware descriptors */
bnxt_tx_queue_release_mbufs(txq);
bnxt_free_ring(txq->tx_ring->tx_ring_struct);
@@ -84,6 +87,10 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
struct bnxt_tx_queue *txq;
int rc = 0;
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
+
if (queue_idx >= bp->max_tx_rings) {
PMD_DRV_LOG(ERR,
"Cannot create Tx ring %d. Only %d rings available\n",
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index c71e6f1892..35e7166bed 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -148,6 +148,9 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
TX_BD_LONG_FLAGS_LHINT_LT2K
};
+ if (unlikely(is_bnxt_in_error(txq->bp)))
+ return -EIO;
+
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
@@ -485,10 +488,29 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx_pkts;
}
+/*
+ * Dummy DPDK callback for TX.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ */
+uint16_t
+bnxt_dummy_xmit_pkts(void *tx_queue __rte_unused,
+ struct rte_mbuf **tx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct bnxt *bp = dev->data->dev_private;
struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+ int rc = 0;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
txq->tx_deferred_start = false;
@@ -501,6 +523,11 @@ int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct bnxt *bp = dev->data->dev_private;
struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+ int rc = 0;
+
+ rc = is_bnxt_in_error(bp);
+ if (rc)
+ return rc;
/* Handle TX completions */
bnxt_handle_tx_cp(txq);
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 08fd2e0142..e7f43f9d1d 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -57,6 +57,8 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t bnxt_dummy_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
#ifdef RTE_ARCH_X86
uint16_t bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
--
2.20.1 (Apple Git-117)
next prev parent reply other threads:[~2019-10-02 1:24 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-08-22 5:53 [dpdk-dev] [PATCH 00/13] bnxt patchset to support device error recovery Ajit Khaparde
2019-08-22 5:53 ` [dpdk-dev] [PATCH 01/13] net/bnxt: hsi version update Ajit Khaparde
2019-08-27 13:51 ` Ferruh Yigit
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 00/13] bnxt patchset to support device error recovery Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 01/13] net/bnxt: add FW reset HWRM command Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 02/13] net/bnxt: prevent device access when device is in reset Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 03/13] net/bnxt: handle reset notify async event from FW Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 04/13] net/bnxt: inform firmware about IF state changes Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 05/13] net/bnxt: handle fatal event from FW under error conditions Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 06/13] net/bnxt: query firmware error recovery capabilities Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 07/13] net/bnxt: map status registers for FW health monitoring Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 08/13] net/bnxt: advertise error recovery capability and handle async event Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 09/13] net/bnxt: add code for periodic FW health monitoring Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 10/13] net/bnxt: add support for FW reset Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 11/13] net/bnxt: reduce verbosity of logs Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 12/13] net/bnxt: use BIT macro instead of bit fields Ajit Khaparde
2019-08-30 16:35 ` [dpdk-dev] [PATCH v2 13/13] net/bnxt: avoid null pointer dereference Ajit Khaparde
2019-09-30 13:29 ` [dpdk-dev] [PATCH v2 00/13] bnxt patchset to support device error recovery Ferruh Yigit
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 00/15] " Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 01/15] net/bnxt: add FW reset HWRM command Ajit Khaparde
2019-10-02 1:23 ` Ajit Khaparde [this message]
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 03/15] net/bnxt: handle reset notify async event from FW Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 04/15] net/bnxt: inform firmware about IF state changes Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 05/15] net/bnxt: handle fatal event from FW under error conditions Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 06/15] net/bnxt: query firmware error recovery capabilities Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 07/15] net/bnxt: map status registers for FW health monitoring Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 08/15] net/bnxt: advertise error recovery capability and handle async event Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 09/15] net/bnxt: add code for periodic FW health monitoring Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 10/15] net/bnxt: add support for FW reset Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 11/15] net/bnxt: add hot firmware upgrade support for Stingray Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 12/15] net/bnxt: reduce verbosity of logs Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 13/15] net/bnxt: avoid null pointer dereference Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 14/15] net/bnxt: use BIT macro instead of bit fields Ajit Khaparde
2019-10-02 1:23 ` [dpdk-dev] [PATCH v3 15/15] net/bnxt: add PTP support for Thor Ajit Khaparde
2019-10-02 17:02 ` [dpdk-dev] [PATCH v3 00/15] bnxt patchset to support device error recovery Ferruh Yigit
2019-08-22 5:53 ` [dpdk-dev] [PATCH 02/13] net/bnxt: prevent device access when device is in reset Ajit Khaparde
2019-08-27 15:00 ` Ferruh Yigit
2019-08-22 5:53 ` [dpdk-dev] [PATCH 03/13] net/bnxt: handle reset notify async event from FW Ajit Khaparde
2019-08-22 5:53 ` [dpdk-dev] [PATCH 04/13] net/bnxt: inform firmware about IF state changes Ajit Khaparde
2019-08-22 5:53 ` [dpdk-dev] [PATCH 05/13] net/bnxt: handle fatal event from FW under error conditions Ajit Khaparde
2019-08-22 5:53 ` [dpdk-dev] [PATCH 06/13] net/bnxt: query firmware error recovery capabilities Ajit Khaparde
2019-08-22 5:53 ` [dpdk-dev] [PATCH 07/13] net/bnxt: map status registers for FW health monitoring Ajit Khaparde
2019-08-22 5:53 ` [dpdk-dev] [PATCH 08/13] net/bnxt: advertise error recovery capability and handle async event Ajit Khaparde
2019-08-22 5:53 ` [dpdk-dev] [PATCH 09/13] net/bnxt: add code for periodic FW health monitoring Ajit Khaparde
2019-08-22 5:53 ` [dpdk-dev] [PATCH 10/13] net/bnxt: use BIT macro instead of bit fields Ajit Khaparde
2019-08-22 5:53 ` [dpdk-dev] [PATCH 11/13] net/bnxt: reschedule the health check alarm correctly Ajit Khaparde
2019-08-22 5:53 ` [dpdk-dev] [PATCH 12/13] net/bnxt: add support for FW reset Ajit Khaparde
2019-08-22 5:54 ` [dpdk-dev] [PATCH 13/13] net/bnxt: reduce verbosity of logs Ajit Khaparde
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20191002012335.85324-3-ajit.khaparde@broadcom.com \
--to=ajit.khaparde@broadcom.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=kalesh-anakkur.purayil@broadcom.com \
--cc=santosh.rastapur@broadcom.com \
--cc=somnath.kotur@broadcom.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).