* [dpdk-dev] [PATCH v2 1/4] net/hns3: fix the statistics problems about Tx/Rx functions
2019-10-25 12:37 [dpdk-dev] [PATCH v2 0/4] some fixes for hns3 PMD driver Wei Hu (Xavier)
@ 2019-10-25 12:37 ` Wei Hu (Xavier)
2019-10-25 12:37 ` [dpdk-dev] [PATCH v2 2/4] net/hns3: fix lack of Rx RSS hash in ol flags Wei Hu (Xavier)
` (3 subsequent siblings)
4 siblings, 0 replies; 8+ messages in thread
From: Wei Hu (Xavier) @ 2019-10-25 12:37 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, linuxarm, xavier_huwei, xavier.huwei, forest.zhouchang
From: Hao Chen <chenhao164@huawei.com>
This patch fixes the statistics problems for sending and receiving
message as belows:
1.In receiving direction, for FCS error messages, drivers should not
record them in rte_eth_stats.ipackets statistics.
2.In sending direction, for messages of illegal length, too long or
equals 0, drivers should not notify the network card hardware to
send them, should not continue to send the remaining message in burst,
and record them in rte_eth_stats.opackets statistics.
Fixes: 8839c5e202f3 ("net/hns3: support device stats")
Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
drivers/net/hns3/hns3_rxtx.c | 10 ++----
drivers/net/hns3/hns3_rxtx.h | 3 --
drivers/net/hns3/hns3_stats.c | 84 ++++++++++---------------------------------
3 files changed, 21 insertions(+), 76 deletions(-)
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index fdac55a..1e8283a 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -661,7 +661,6 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
rxq->l4_csum_erros = 0;
rxq->ol3_csum_erros = 0;
rxq->ol4_csum_erros = 0;
- rxq->errors = 0;
rte_spinlock_lock(&hw->lock);
dev->data->rx_queues[idx] = rxq;
@@ -817,14 +816,12 @@ hns3_handle_bdinfo(struct hns3_rx_queue *rxq, struct rte_mbuf *rxm,
if (unlikely(l234_info & BIT(HNS3_RXD_L2E_B))) {
rxq->l2_errors++;
- rxq->errors++;
return -EINVAL;
}
if (unlikely(rxm->pkt_len == 0 ||
(l234_info & BIT(HNS3_RXD_TRUNCAT_B)))) {
rxq->pkt_len_errors++;
- rxq->errors++;
return -EINVAL;
}
@@ -1098,7 +1095,6 @@ hns3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
txq->next_to_clean = 0;
txq->tx_bd_ready = txq->nb_tx_desc;
txq->port_id = dev->data->port_id;
- txq->pkt_len_errors = 0;
txq->configured = true;
txq->io_base = (void *)((char *)hw->io_base + HNS3_TQP_REG_OFFSET +
idx * HNS3_TQP_REG_SIZE);
@@ -1605,10 +1601,8 @@ hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* will be ignored.
*/
if (unlikely(tx_pkt->pkt_len > HNS3_MAX_FRAME_LEN ||
- tx_pkt->pkt_len == 0)) {
- txq->pkt_len_errors++;
- continue;
- }
+ tx_pkt->pkt_len == 0))
+ break;
m_seg = tx_pkt;
if (unlikely(nb_buf > HNS3_MAX_TX_BD_PER_PKT)) {
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 358f129..daf51f4 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -252,7 +252,6 @@ struct hns3_rx_queue {
uint64_t l4_csum_erros;
uint64_t ol3_csum_erros;
uint64_t ol4_csum_erros;
- uint64_t errors; /* num of error rx packets recorded by driver */
};
struct hns3_tx_queue {
@@ -272,8 +271,6 @@ struct hns3_tx_queue {
bool tx_deferred_start; /* don't start this queue in dev start */
bool configured; /* indicate if tx queue has been configured */
-
- uint64_t pkt_len_errors;
};
#define HNS3_TX_CKSUM_OFFLOAD_MASK ( \
diff --git a/drivers/net/hns3/hns3_stats.c b/drivers/net/hns3/hns3_stats.c
index a0252ea..9948beb 100644
--- a/drivers/net/hns3/hns3_stats.c
+++ b/drivers/net/hns3/hns3_stats.c
@@ -235,12 +235,6 @@ static const struct hns3_xstats_name_offset hns3_rx_bd_error_strings[] = {
HNS3_RX_BD_ERROR_STATS_FIELD_OFFSET(ol4_csum_erros)}
};
-/* The statistic of errors in Tx */
-static const struct hns3_xstats_name_offset hns3_tx_error_strings[] = {
- {"TX_PKT_LEN_ERRORS",
- HNS3_TX_ERROR_STATS_FIELD_OFFSET(pkt_len_errors)}
-};
-
#define HNS3_NUM_MAC_STATS (sizeof(hns3_mac_strings) / \
sizeof(hns3_mac_strings[0]))
@@ -253,9 +247,6 @@ static const struct hns3_xstats_name_offset hns3_tx_error_strings[] = {
#define HNS3_NUM_RX_BD_ERROR_XSTATS (sizeof(hns3_rx_bd_error_strings) / \
sizeof(hns3_rx_bd_error_strings[0]))
-#define HNS3_NUM_TX_ERROR_XSTATS (sizeof(hns3_tx_error_strings) / \
- sizeof(hns3_tx_error_strings[0]))
-
#define HNS3_FIX_NUM_STATS (HNS3_NUM_MAC_STATS + HNS3_NUM_ERROR_INT_XSTATS + \
HNS3_NUM_RESET_XSTATS)
@@ -434,6 +425,7 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
struct hns3_hw *hw = &hns->hw;
struct hns3_tqp_stats *stats = &hw->tqp_stats;
struct hns3_rx_queue *rxq;
+ struct hns3_tx_queue *txq;
uint64_t cnt;
uint64_t num;
uint16_t i;
@@ -446,25 +438,32 @@ hns3_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *rte_stats)
return ret;
}
- rte_stats->ipackets = stats->rcb_rx_ring_pktnum_rcd;
- rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd;
- rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
-
- num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, hw->tqps_num);
- for (i = 0; i < num; i++) {
- rte_stats->q_ipackets[i] = stats->rcb_rx_ring_pktnum[i];
- rte_stats->q_opackets[i] = stats->rcb_tx_ring_pktnum[i];
- }
-
+ /* Get the error stats of received packets */
num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_rx_queues);
for (i = 0; i != num; ++i) {
rxq = eth_dev->data->rx_queues[i];
if (rxq) {
- cnt = rxq->errors;
+ cnt = rxq->l2_errors + rxq->pkt_len_errors;
rte_stats->q_errors[i] = cnt;
+ rte_stats->q_ipackets[i] =
+ stats->rcb_rx_ring_pktnum[i] - cnt;
rte_stats->ierrors += cnt;
}
}
+ /* Get the error stats of transmitted packets */
+ num = RTE_MIN(RTE_ETHDEV_QUEUE_STAT_CNTRS, eth_dev->data->nb_tx_queues);
+ for (i = 0; i < num; i++) {
+ txq = eth_dev->data->tx_queues[i];
+ if (txq)
+ rte_stats->q_opackets[i] = stats->rcb_tx_ring_pktnum[i];
+ }
+
+ rte_stats->oerrors = 0;
+ rte_stats->ipackets = stats->rcb_rx_ring_pktnum_rcd -
+ rte_stats->ierrors;
+ rte_stats->opackets = stats->rcb_tx_ring_pktnum_rcd -
+ rte_stats->oerrors;
+ rte_stats->rx_nombuf = eth_dev->data->rx_mbuf_alloc_failed;
return 0;
}
@@ -477,7 +476,6 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
struct hns3_tqp_stats *stats = &hw->tqp_stats;
struct hns3_cmd_desc desc_reset;
struct hns3_rx_queue *rxq;
- struct hns3_tx_queue *txq;
uint16_t i;
int ret;
@@ -518,12 +516,7 @@ hns3_stats_reset(struct rte_eth_dev *eth_dev)
rxq->l4_csum_erros = 0;
rxq->ol3_csum_erros = 0;
rxq->ol4_csum_erros = 0;
- rxq->errors = 0;
}
-
- txq = eth_dev->data->tx_queues[i];
- if (txq)
- txq->pkt_len_errors = 0;
}
memset(stats, 0, sizeof(struct hns3_tqp_stats));
@@ -554,11 +547,9 @@ hns3_xstats_calc_num(struct rte_eth_dev *dev)
if (hns->is_vf)
return dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS +
- dev->data->nb_tx_queues * HNS3_NUM_TX_ERROR_XSTATS +
HNS3_NUM_RESET_XSTATS;
else
return dev->data->nb_rx_queues * HNS3_NUM_RX_BD_ERROR_XSTATS +
- dev->data->nb_tx_queues * HNS3_NUM_TX_ERROR_XSTATS +
HNS3_FIX_NUM_STATS;
}
@@ -585,7 +576,6 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
struct hns3_mac_stats *mac_stats = &hw->mac_stats;
struct hns3_reset_stats *reset_stats = &hw->reset.stats;
struct hns3_rx_queue *rxq;
- struct hns3_tx_queue *txq;
uint16_t i, j;
char *addr;
int count;
@@ -644,16 +634,6 @@ hns3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
}
}
- /* Get the Tx errors stats */
- for (j = 0; j != dev->data->nb_tx_queues; ++j) {
- for (i = 0; i < HNS3_NUM_TX_ERROR_XSTATS; i++) {
- txq = dev->data->tx_queues[j];
- addr = (char *)txq + hns3_tx_error_strings[i].offset;
- xstats[count].value = *(uint64_t *)addr;
- xstats[count].id = count;
- count++;
- }
- }
return count;
}
@@ -727,14 +707,6 @@ hns3_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
count++;
}
}
- for (j = 0; j < dev->data->nb_tx_queues; j++) {
- for (i = 0; i < HNS3_NUM_TX_ERROR_XSTATS; i++) {
- snprintf(xstats_names[count].name,
- sizeof(xstats_names[count].name),
- "tx_q%u%s", j, hns3_tx_error_strings[i].name);
- count++;
- }
- }
return count;
}
@@ -772,7 +744,6 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
struct hns3_mac_stats *mac_stats = &hw->mac_stats;
struct hns3_reset_stats *reset_stats = &hw->reset.stats;
struct hns3_rx_queue *rxq;
- struct hns3_tx_queue *txq;
const uint32_t cnt_stats = hns3_xstats_calc_num(dev);
uint64_t *values_copy;
uint64_t len;
@@ -831,15 +802,6 @@ hns3_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
}
}
- for (j = 0; j != dev->data->nb_tx_queues; ++j) {
- for (i = 0; i < HNS3_NUM_TX_ERROR_XSTATS; i++) {
- txq = dev->data->tx_queues[j];
- addr = (char *)txq + hns3_tx_error_strings[i].offset;
- values_copy[count] = *(uint64_t *)addr;
- count++;
- }
- }
-
for (i = 0; i < size; i++) {
if (ids[i] >= cnt_stats) {
hns3_err(hw, "ids[%d] (%" PRIx64 ") is invalid, "
@@ -928,14 +890,6 @@ hns3_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
count_name++;
}
}
- for (j = 0; j != dev->data->nb_rx_queues; ++j) {
- for (i = 0; i < HNS3_NUM_TX_ERROR_XSTATS; i++) {
- snprintf(xstats_names_copy[count_name].name,
- sizeof(xstats_names_copy[count_name].name),
- "tx_q%u%s", j, hns3_tx_error_strings[i].name);
- count_name++;
- }
- }
for (i = 0; i < size; i++) {
if (ids[i] >= cnt_stats) {
--
2.7.4
^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH v2 4/4] net/hns3: fix FLR reset failure
2019-10-25 12:37 [dpdk-dev] [PATCH v2 0/4] some fixes for hns3 PMD driver Wei Hu (Xavier)
` (2 preceding siblings ...)
2019-10-25 12:37 ` [dpdk-dev] [PATCH v2 3/4] net/hns3: fix the return value when firmware timeout Wei Hu (Xavier)
@ 2019-10-25 12:37 ` Wei Hu (Xavier)
2019-10-25 15:07 ` [dpdk-dev] [PATCH v2 0/4] some fixes for hns3 PMD driver Ferruh Yigit
4 siblings, 0 replies; 8+ messages in thread
From: Wei Hu (Xavier) @ 2019-10-25 12:37 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, linuxarm, xavier_huwei, xavier.huwei, forest.zhouchang
From: Chunsong Feng <fengchunsong@huawei.com>
PF FLR resets the PCIe ECAM space of all VFs under the PF
and does not automatically recover. Therefore, the VF driver
needs to restore the ECAM configuration, including
bus_master_en, msix_enable to avoid FLR reset failure.
Fixes: 2790c6464725 ("net/hns3: support device reset")
Signed-off-by: Chunsong Feng <fengchunsong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
drivers/net/hns3/hns3_cmd.c | 15 -----
drivers/net/hns3/hns3_ethdev.c | 5 ++
drivers/net/hns3/hns3_ethdev_vf.c | 135 ++++++++++++++++++++++++++++++++++++--
drivers/net/hns3/hns3_intr.c | 5 +-
4 files changed, 139 insertions(+), 21 deletions(-)
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
index 58776c2..65a5af8 100644
--- a/drivers/net/hns3/hns3_cmd.c
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -216,24 +216,9 @@ hns3_cmd_csq_clean(struct hns3_hw *hw)
if (!is_valid_csq_clean_head(csq, head)) {
struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
- uint32_t global;
- uint32_t fun_rst;
hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
csq->next_to_use, csq->next_to_clean);
rte_atomic16_set(&hw->reset.disable_cmd, 1);
- if (hns->is_vf) {
- global = hns3_read_dev(hw, HNS3_VF_RST_ING);
- fun_rst = hns3_read_dev(hw, HNS3_FUN_RST_ING);
- hns3_err(hw, "Delayed VF reset global: %x fun_rst: %x",
- global, fun_rst);
- hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
- } else {
- global = hns3_read_dev(hw, HNS3_GLOBAL_RESET_REG);
- fun_rst = hns3_read_dev(hw, HNS3_FUN_RST_ING);
- hns3_err(hw, "Delayed IMP reset global: %x fun_rst: %x",
- global, fun_rst);
- hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
- }
hns3_schedule_delayed_reset(hns);
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 862a717..3435bce 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -4680,6 +4680,11 @@ hns3_reset_service(void *param)
rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
+ reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
+ if (reset_level == HNS3_NONE_RESET) {
+ hns3_err(hw, "No reset level is set, try IMP reset");
+ hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
+ }
}
rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index 121beb5..4036749 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -9,6 +9,8 @@
#include <inttypes.h>
#include <unistd.h>
#include <arpa/inet.h>
+#include <linux/pci_regs.h>
+
#include <rte_alarm.h>
#include <rte_atomic.h>
#include <rte_bus_pci.h>
@@ -24,6 +26,7 @@
#include <rte_io.h>
#include <rte_log.h>
#include <rte_pci.h>
+#include <rte_vfio.h>
#include "hns3_ethdev.h"
#include "hns3_logs.h"
@@ -56,6 +59,81 @@ static enum hns3_reset_level hns3vf_get_reset_level(struct hns3_hw *hw,
static int hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3vf_dev_configure_vlan(struct rte_eth_dev *dev);
+/* set PCI bus mastering */
+static void
+hns3vf_set_bus_master(const struct rte_pci_device *device, bool op)
+{
+ uint16_t reg;
+
+ rte_pci_read_config(device, ®, sizeof(reg), PCI_COMMAND);
+
+ if (op)
+ /* set the master bit */
+ reg |= PCI_COMMAND_MASTER;
+ else
+ reg &= ~(PCI_COMMAND_MASTER);
+
+ rte_pci_write_config(device, ®, sizeof(reg), PCI_COMMAND);
+}
+
+/**
+ * hns3vf_find_pci_capability - lookup a capability in the PCI capability list
+ * @cap: the capability
+ *
+ * Return the address of the given capability within the PCI capability list.
+ */
+static int
+hns3vf_find_pci_capability(const struct rte_pci_device *device, int cap)
+{
+#define MAX_PCIE_CAPABILITY 48
+ uint16_t status;
+ uint8_t pos;
+ uint8_t id;
+ int ttl;
+
+ rte_pci_read_config(device, &status, sizeof(status), PCI_STATUS);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+
+ ttl = MAX_PCIE_CAPABILITY;
+ rte_pci_read_config(device, &pos, sizeof(pos), PCI_CAPABILITY_LIST);
+ while (ttl-- && pos >= PCI_STD_HEADER_SIZEOF) {
+ rte_pci_read_config(device, &id, sizeof(id),
+ (pos + PCI_CAP_LIST_ID));
+
+ if (id == 0xFF)
+ break;
+
+ if (id == cap)
+ return (int)pos;
+
+ rte_pci_read_config(device, &pos, sizeof(pos),
+ (pos + PCI_CAP_LIST_NEXT));
+ }
+ return 0;
+}
+
+static int
+hns3vf_enable_msix(const struct rte_pci_device *device, bool op)
+{
+ uint16_t control;
+ int pos;
+
+ pos = hns3vf_find_pci_capability(device, PCI_CAP_ID_MSIX);
+ if (pos) {
+ rte_pci_read_config(device, &control, sizeof(control),
+ (pos + PCI_MSIX_FLAGS));
+ if (op)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ else
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ rte_pci_write_config(device, &control, sizeof(control),
+ (pos + PCI_MSIX_FLAGS));
+ return 0;
+ }
+ return -1;
+}
+
static int
hns3vf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
__attribute__ ((unused)) uint32_t idx,
@@ -1308,9 +1386,30 @@ hns3vf_wait_hardware_ready(struct hns3_adapter *hns)
struct hns3_wait_data *wait_data = hw->reset.wait_data;
struct timeval tv;
- if (wait_data->result == HNS3_WAIT_SUCCESS)
- return 0;
- else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
+ if (wait_data->result == HNS3_WAIT_SUCCESS) {
+ /*
+ * After vf reset is ready, the PF may not have completed
+ * the reset processing. The vf sending mbox to PF may fail
+ * during the pf reset, so it is better to add extra delay.
+ */
+ if (hw->reset.level == HNS3_VF_FUNC_RESET ||
+ hw->reset.level == HNS3_FLR_RESET)
+ return 0;
+ /* Reset retry process, no need to add extra delay. */
+ if (hw->reset.attempts)
+ return 0;
+ if (wait_data->check_completion == NULL)
+ return 0;
+
+ wait_data->check_completion = NULL;
+ wait_data->interval = 1 * MSEC_PER_SEC * USEC_PER_MSEC;
+ wait_data->count = 1;
+ wait_data->result = HNS3_WAIT_REQUEST;
+ rte_eal_alarm_set(wait_data->interval, hns3_wait_callback,
+ wait_data);
+ hns3_warn(hw, "hardware is ready, delay 1 sec for PF reset complete");
+ return -EAGAIN;
+ } else if (wait_data->result == HNS3_WAIT_TIMEOUT) {
gettimeofday(&tv, NULL);
hns3_warn(hw, "Reset step4 hardware not ready after reset time=%ld.%.6ld",
tv.tv_sec, tv.tv_usec);
@@ -1473,6 +1572,11 @@ hns3vf_reset_service(void *param)
rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_REQUESTED);
hns3_err(hw, "Handling interrupts in delayed tasks");
hns3vf_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
+ reset_level = hns3vf_get_reset_level(hw, &hw->reset.pending);
+ if (reset_level == HNS3_NONE_RESET) {
+ hns3_err(hw, "No reset level is set, try global reset");
+ hns3_atomic_set_bit(HNS3_VF_RESET, &hw->reset.pending);
+ }
}
rte_atomic16_set(&hns->hw.reset.schedule, SCHEDULE_NONE);
@@ -1498,14 +1602,35 @@ hns3vf_reset_service(void *param)
static int
hns3vf_reinit_dev(struct hns3_adapter *hns)
{
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[hns->hw.data->port_id];
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct hns3_hw *hw = &hns->hw;
int ret;
+ if (hw->reset.level == HNS3_VF_FULL_RESET) {
+ rte_intr_disable(&pci_dev->intr_handle);
+ hns3vf_set_bus_master(pci_dev, true);
+ }
+
/* Firmware command initialize */
ret = hns3_cmd_init(hw);
if (ret) {
hns3_err(hw, "Failed to init cmd: %d", ret);
- return ret;
+ goto err_cmd_init;
+ }
+
+ if (hw->reset.level == HNS3_VF_FULL_RESET) {
+ /*
+ * UIO enables msix by writing the pcie configuration space
+ * vfio_pci enables msix in rte_intr_enable.
+ */
+ if (pci_dev->kdrv == RTE_KDRV_IGB_UIO ||
+ pci_dev->kdrv == RTE_KDRV_UIO_GENERIC) {
+ if (hns3vf_enable_msix(pci_dev, true))
+ hns3_err(hw, "Failed to enable msix");
+ }
+
+ rte_intr_enable(&pci_dev->intr_handle);
}
ret = hns3_reset_all_queues(hns);
@@ -1522,6 +1647,8 @@ hns3vf_reinit_dev(struct hns3_adapter *hns)
return 0;
+err_cmd_init:
+ hns3vf_set_bus_master(pci_dev, false);
err_init:
hns3_cmd_uninit(hw);
return ret;
diff --git a/drivers/net/hns3/hns3_intr.c b/drivers/net/hns3/hns3_intr.c
index 9e2d811..6c3ebd3 100644
--- a/drivers/net/hns3/hns3_intr.c
+++ b/drivers/net/hns3/hns3_intr.c
@@ -890,11 +890,12 @@ hns3_reset_err_handle(struct hns3_adapter *hns)
hns3_warn(hw, "%s reset fail fail_cnt:%" PRIx64 " success_cnt:%" PRIx64
" global_cnt:%" PRIx64 " imp_cnt:%" PRIx64
" request_cnt:%" PRIx64 " exec_cnt:%" PRIx64
- " merge_cnt:%" PRIx64,
+ " merge_cnt:%" PRIx64 "adapter_state:%d",
reset_string[hw->reset.level], hw->reset.stats.fail_cnt,
hw->reset.stats.success_cnt, hw->reset.stats.global_cnt,
hw->reset.stats.imp_cnt, hw->reset.stats.request_cnt,
- hw->reset.stats.exec_cnt, hw->reset.stats.merge_cnt);
+ hw->reset.stats.exec_cnt, hw->reset.stats.merge_cnt,
+ hw->adapter_state);
/* IMP no longer waiting the ready flag */
hns3_notify_reset_ready(hw, true);
--
2.7.4
^ permalink raw reply [flat|nested] 8+ messages in thread