* [dpdk-dev] [PATCH v2 1/9] net/hns3: support Rx interrupt
2019-12-14 10:29 [dpdk-dev] [PATCH v2 0/9] updates for hns3 PMD driver Wei Hu (Xavier)
@ 2019-12-14 10:29 ` Wei Hu (Xavier)
2019-12-14 10:29 ` [dpdk-dev] [PATCH v2 2/9] net/hns3: get link state change through mailbox Wei Hu (Xavier)
` (7 subsequent siblings)
8 siblings, 0 replies; 10+ messages in thread
From: Wei Hu (Xavier) @ 2019-12-14 10:29 UTC (permalink / raw)
To: dev
From: Hao Chen <chenhao164@huawei.com>
This patch supports of receive packets through interrupt mode for hns3
PF/VF driver. The following ops functions should be implemented defined
in struct eth_dev_ops:
rx_queue_intr_enable
rx_queue_intr_disable
rx_queue_count
Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
doc/guides/nics/features/hns3.ini | 1 +
doc/guides/nics/features/hns3_vf.ini | 1 +
drivers/net/hns3/hns3_cmd.h | 28 +++++
drivers/net/hns3/hns3_ethdev.c | 160 ++++++++++++++++++++++++--
drivers/net/hns3/hns3_ethdev_vf.c | 166 ++++++++++++++++++++++++---
drivers/net/hns3/hns3_mbx.h | 13 +++
drivers/net/hns3/hns3_regs.h | 3 +
drivers/net/hns3/hns3_rxtx.c | 51 ++++++++
drivers/net/hns3/hns3_rxtx.h | 4 +
9 files changed, 405 insertions(+), 22 deletions(-)
diff --git a/doc/guides/nics/features/hns3.ini b/doc/guides/nics/features/hns3.ini
index 6df789ed1..cd5c08a9d 100644
--- a/doc/guides/nics/features/hns3.ini
+++ b/doc/guides/nics/features/hns3.ini
@@ -5,6 +5,7 @@
;
[Features]
Link status = Y
+Rx interrupt = Y
MTU update = Y
Jumbo frame = Y
Promiscuous mode = Y
diff --git a/doc/guides/nics/features/hns3_vf.ini b/doc/guides/nics/features/hns3_vf.ini
index 41497c4c2..fd00ac3e2 100644
--- a/doc/guides/nics/features/hns3_vf.ini
+++ b/doc/guides/nics/features/hns3_vf.ini
@@ -5,6 +5,7 @@
;
[Features]
Link status = Y
+Rx interrupt = Y
MTU update = Y
Jumbo frame = Y
Unicast MAC filter = Y
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
index be0ecbe86..897dc1420 100644
--- a/drivers/net/hns3/hns3_cmd.h
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -209,6 +209,10 @@ enum hns3_opcode_type {
/* SFP command */
HNS3_OPC_SFP_GET_SPEED = 0x7104,
+ /* Interrupts commands */
+ HNS3_OPC_ADD_RING_TO_VECTOR = 0x1503,
+ HNS3_OPC_DEL_RING_TO_VECTOR = 0x1504,
+
/* Error INT commands */
HNS3_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
HNS3_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
@@ -673,6 +677,30 @@ struct hns3_tqp_map_cmd {
uint8_t rsv[18];
};
+#define HNS3_RING_TYPE_B 0
+#define HNS3_RING_TYPE_TX 0
+#define HNS3_RING_TYPE_RX 1
+#define HNS3_RING_GL_IDX_S 0
+#define HNS3_RING_GL_IDX_M GENMASK(1, 0)
+#define HNS3_RING_GL_RX 0
+#define HNS3_RING_GL_TX 1
+
+#define HNS3_VECTOR_ELEMENTS_PER_CMD 10
+
+#define HNS3_INT_TYPE_S 0
+#define HNS3_INT_TYPE_M GENMASK(1, 0)
+#define HNS3_TQP_ID_S 2
+#define HNS3_TQP_ID_M GENMASK(12, 2)
+#define HNS3_INT_GL_IDX_S 13
+#define HNS3_INT_GL_IDX_M GENMASK(14, 13)
+struct hns3_ctrl_vector_chain_cmd {
+ uint8_t int_vector_id;
+ uint8_t int_cause_num;
+ uint16_t tqp_type_and_id[HNS3_VECTOR_ELEMENTS_PER_CMD];
+ uint8_t vfid;
+ uint8_t rsv;
+};
+
struct hns3_config_max_frm_size_cmd {
uint16_t max_frm_size;
uint8_t min_frm_size;
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 72315718a..bf0ab458f 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2021,6 +2021,40 @@ hns3_check_dcb_cfg(struct rte_eth_dev *dev)
return hns3_check_mq_mode(dev);
}
+static int
+hns3_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
+ bool mmap, uint16_t queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_cmd_desc desc;
+ struct hns3_ctrl_vector_chain_cmd *req =
+ (struct hns3_ctrl_vector_chain_cmd *)desc.data;
+ enum hns3_cmd_status status;
+ enum hns3_opcode_type op;
+ uint16_t tqp_type_and_id = 0;
+
+ op = mmap ? HNS3_OPC_ADD_RING_TO_VECTOR : HNS3_OPC_DEL_RING_TO_VECTOR;
+ hns3_cmd_setup_basic_desc(&desc, op, false);
+ req->int_vector_id = vector_id;
+
+ hns3_set_field(tqp_type_and_id, HNS3_INT_TYPE_M, HNS3_INT_TYPE_S,
+ HNS3_RING_TYPE_RX);
+ hns3_set_field(tqp_type_and_id, HNS3_TQP_ID_M, HNS3_TQP_ID_S, queue_id);
+ hns3_set_field(tqp_type_and_id, HNS3_INT_GL_IDX_M, HNS3_INT_GL_IDX_S,
+ HNS3_RING_GL_RX);
+ req->tqp_type_and_id[0] = rte_cpu_to_le_16(tqp_type_and_id);
+
+ req->int_cause_num = 1;
+ status = hns3_cmd_send(hw, &desc, 1);
+ if (status) {
+ hns3_err(hw, "Map TQP %d fail, vector_id is %d, status is %d.",
+ queue_id, vector_id, status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int
hns3_dev_configure(struct rte_eth_dev *dev)
{
@@ -4020,15 +4054,83 @@ hns3_do_start(struct hns3_adapter *hns, bool reset_queue)
}
static int
-hns3_dev_start(struct rte_eth_dev *eth_dev)
+hns3_map_rx_interrupt(struct rte_eth_dev *dev)
{
- struct hns3_adapter *hns = eth_dev->data->dev_private;
- struct hns3_hw *hw = &hns->hw;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t intr_vector;
+ uint8_t base = 0;
+ uint8_t vec = 0;
+ uint16_t q_id;
int ret;
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return 0;
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* check and configure queue intr-vector mapping */
+ if (rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) {
+ intr_vector = dev->data->nb_rx_queues;
+ /* creates event fd for each intr vector when MSIX is used */
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -EINVAL;
+ }
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ hns3_err(hw, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ ret = -ENOMEM;
+ goto alloc_intr_vec_error;
+ }
+ }
+
+ if (rte_intr_allow_others(intr_handle)) {
+ vec = RTE_INTR_VEC_RXTX_OFFSET;
+ base = RTE_INTR_VEC_RXTX_OFFSET;
+ }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+ ret = hns3_bind_ring_with_vector(dev, vec, true, q_id);
+ if (ret)
+ goto bind_vector_error;
+ intr_handle->intr_vec[q_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+ }
+ rte_intr_enable(intr_handle);
+ return 0;
+
+bind_vector_error:
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+ return ret;
+alloc_intr_vec_error:
+ rte_intr_efd_disable(intr_handle);
+ return ret;
+}
+
+static int
+hns3_dev_start(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret = 0;
+
PMD_INIT_FUNC_TRACE();
if (rte_atomic16_read(&hw->reset.resetting))
return -EBUSY;
+
rte_spinlock_lock(&hw->lock);
hw->adapter_state = HNS3_NIC_STARTING;
@@ -4041,8 +4143,12 @@ hns3_dev_start(struct rte_eth_dev *eth_dev)
hw->adapter_state = HNS3_NIC_STARTED;
rte_spinlock_unlock(&hw->lock);
- hns3_set_rxtx_function(eth_dev);
- hns3_mp_req_start_rxtx(eth_dev);
+
+ ret = hns3_map_rx_interrupt(dev);
+ if (ret)
+ return ret;
+ hns3_set_rxtx_function(dev);
+ hns3_mp_req_start_rxtx(dev);
hns3_info(hw, "hns3 dev start successful!");
return 0;
@@ -4070,18 +4176,50 @@ hns3_do_stop(struct hns3_adapter *hns)
}
static void
-hns3_dev_stop(struct rte_eth_dev *eth_dev)
+hns3_unmap_rx_interrupt(struct rte_eth_dev *dev)
{
- struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint8_t base = 0;
+ uint8_t vec = 0;
+ uint16_t q_id;
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return;
+
+ /* unmap the ring with vector */
+ if (rte_intr_allow_others(intr_handle)) {
+ vec = RTE_INTR_VEC_RXTX_OFFSET;
+ base = RTE_INTR_VEC_RXTX_OFFSET;
+ }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+ (void)hns3_bind_ring_with_vector(dev, vec, false, q_id);
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+ }
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void
+hns3_dev_stop(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
PMD_INIT_FUNC_TRACE();
hw->adapter_state = HNS3_NIC_STOPPING;
- hns3_set_rxtx_function(eth_dev);
+ hns3_set_rxtx_function(dev);
rte_wmb();
/* Disable datapath on secondary process. */
- hns3_mp_req_stop_rxtx(eth_dev);
+ hns3_mp_req_stop_rxtx(dev);
/* Prevent crashes when queues are still in use. */
rte_delay_ms(hw->tqps_num);
@@ -4092,6 +4230,7 @@ hns3_dev_stop(struct rte_eth_dev *eth_dev)
hw->adapter_state = HNS3_NIC_CONFIGURED;
}
rte_spinlock_unlock(&hw->lock);
+ hns3_unmap_rx_interrupt(dev);
}
static void
@@ -4748,6 +4887,9 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
.tx_queue_setup = hns3_tx_queue_setup,
.rx_queue_release = hns3_dev_rx_queue_release,
.tx_queue_release = hns3_dev_tx_queue_release,
+ .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
+ .rx_queue_count = hns3_dev_rx_queue_count,
.dev_configure = hns3_dev_configure,
.flow_ctrl_get = hns3_flow_ctrl_get,
.flow_ctrl_set = hns3_flow_ctrl_set,
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index b1736e73a..9b6bc83e4 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1208,6 +1208,36 @@ hns3vf_uninit_vf(struct rte_eth_dev *eth_dev)
hw->io_base = NULL;
}
+static int
+hns3vf_bind_ring_with_vector(struct rte_eth_dev *dev, uint8_t vector_id,
+ bool mmap, uint16_t queue_id)
+
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_vf_bind_vector_msg bind_msg;
+ uint16_t code;
+ int ret;
+
+ memset(&bind_msg, 0, sizeof(bind_msg));
+ code = mmap ? HNS3_MBX_MAP_RING_TO_VECTOR :
+ HNS3_MBX_UNMAP_RING_TO_VECTOR;
+ bind_msg.vector_id = vector_id;
+ bind_msg.ring_num = 1;
+ bind_msg.param[0].ring_type = HNS3_RING_TYPE_RX;
+ bind_msg.param[0].tqp_index = queue_id;
+ bind_msg.param[0].int_gl_index = HNS3_RING_GL_RX;
+
+ ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
+ sizeof(bind_msg), false, NULL, 0);
+ if (ret) {
+ hns3_err(hw, "Map TQP %d fail, vector_id is %d, ret is %d.",
+ queue_id, vector_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int
hns3vf_do_stop(struct hns3_adapter *hns)
{
@@ -1225,18 +1255,51 @@ hns3vf_do_stop(struct hns3_adapter *hns)
}
static void
-hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
+hns3vf_unmap_rx_interrupt(struct rte_eth_dev *dev)
{
- struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint8_t base = 0;
+ uint8_t vec = 0;
+ uint16_t q_id;
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return;
+
+ /* unmap the ring with vector */
+ if (rte_intr_allow_others(intr_handle)) {
+ vec = RTE_INTR_VEC_RXTX_OFFSET;
+ base = RTE_INTR_VEC_RXTX_OFFSET;
+ }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+ (void)hns3vf_bind_ring_with_vector(dev, vec, false,
+ q_id);
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+ }
+ /* Clean datapath event and queue/vec mapping */
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void
+hns3vf_dev_stop(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
PMD_INIT_FUNC_TRACE();
hw->adapter_state = HNS3_NIC_STOPPING;
- hns3_set_rxtx_function(eth_dev);
+ hns3_set_rxtx_function(dev);
rte_wmb();
/* Disable datapath on secondary process. */
- hns3_mp_req_stop_rxtx(eth_dev);
+ hns3_mp_req_stop_rxtx(dev);
/* Prevent crashes when queues are still in use. */
rte_delay_ms(hw->tqps_num);
@@ -1246,8 +1309,10 @@ hns3vf_dev_stop(struct rte_eth_dev *eth_dev)
hns3_dev_release_mbufs(hns);
hw->adapter_state = HNS3_NIC_CONFIGURED;
}
- rte_eal_alarm_cancel(hns3vf_service_handler, eth_dev);
+ rte_eal_alarm_cancel(hns3vf_service_handler, dev);
rte_spinlock_unlock(&hw->lock);
+
+ hns3vf_unmap_rx_interrupt(dev);
}
static void
@@ -1329,15 +1394,84 @@ hns3vf_do_start(struct hns3_adapter *hns, bool reset_queue)
}
static int
-hns3vf_dev_start(struct rte_eth_dev *eth_dev)
+hns3vf_map_rx_interrupt(struct rte_eth_dev *dev)
{
- struct hns3_adapter *hns = eth_dev->data->dev_private;
- struct hns3_hw *hw = &hns->hw;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t intr_vector;
+ uint8_t base = 0;
+ uint8_t vec = 0;
+ uint16_t q_id;
int ret;
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return 0;
+
+ /* disable uio/vfio intr/eventfd mapping */
+ rte_intr_disable(intr_handle);
+
+ /* check and configure queue intr-vector mapping */
+ if (rte_intr_cap_multiple(intr_handle) ||
+ !RTE_ETH_DEV_SRIOV(dev).active) {
+ intr_vector = dev->data->nb_rx_queues;
+ /* It creates event fd for each intr vector when MSIX is used */
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -EINVAL;
+ }
+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ hns3_err(hw, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ ret = -ENOMEM;
+ goto vf_alloc_intr_vec_error;
+ }
+ }
+
+ if (rte_intr_allow_others(intr_handle)) {
+ vec = RTE_INTR_VEC_RXTX_OFFSET;
+ base = RTE_INTR_VEC_RXTX_OFFSET;
+ }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (q_id = 0; q_id < dev->data->nb_rx_queues; q_id++) {
+ ret = hns3vf_bind_ring_with_vector(dev, vec, true,
+ q_id);
+ if (ret)
+ goto vf_bind_vector_error;
+ intr_handle->intr_vec[q_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+ }
+ rte_intr_enable(intr_handle);
+ return 0;
+
+vf_bind_vector_error:
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec) {
+ free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+ return ret;
+vf_alloc_intr_vec_error:
+ rte_intr_efd_disable(intr_handle);
+ return ret;
+}
+
+static int
+hns3vf_dev_start(struct rte_eth_dev *dev)
+{
+ struct hns3_adapter *hns = dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret = 0;
+
PMD_INIT_FUNC_TRACE();
if (rte_atomic16_read(&hw->reset.resetting))
return -EBUSY;
+
rte_spinlock_lock(&hw->lock);
hw->adapter_state = HNS3_NIC_STARTING;
ret = hns3vf_do_start(hns, true);
@@ -1348,11 +1482,14 @@ hns3vf_dev_start(struct rte_eth_dev *eth_dev)
}
hw->adapter_state = HNS3_NIC_STARTED;
rte_spinlock_unlock(&hw->lock);
- hns3_set_rxtx_function(eth_dev);
- hns3_mp_req_start_rxtx(eth_dev);
- rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler,
- eth_dev);
- return 0;
+
+ ret = hns3vf_map_rx_interrupt(dev);
+ if (ret)
+ return ret;
+ hns3_set_rxtx_function(dev);
+ hns3_mp_req_start_rxtx(dev);
+ rte_eal_alarm_set(HNS3VF_SERVICE_INTERVAL, hns3vf_service_handler, dev);
+ return ret;
}
static bool
@@ -1685,6 +1822,9 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = {
.tx_queue_setup = hns3_tx_queue_setup,
.rx_queue_release = hns3_dev_rx_queue_release,
.tx_queue_release = hns3_dev_tx_queue_release,
+ .rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
+ .rx_queue_count = hns3_dev_rx_queue_count,
.dev_configure = hns3vf_dev_configure,
.mac_addr_add = hns3vf_add_mac_addr,
.mac_addr_remove = hns3vf_remove_mac_addr,
diff --git a/drivers/net/hns3/hns3_mbx.h b/drivers/net/hns3/hns3_mbx.h
index 01eddb845..d1a6bfead 100644
--- a/drivers/net/hns3/hns3_mbx.h
+++ b/drivers/net/hns3/hns3_mbx.h
@@ -104,6 +104,19 @@ struct hns3_mbx_pf_to_vf_cmd {
uint16_t msg[8];
};
+struct hns3_ring_chain_param {
+ uint8_t ring_type;
+ uint8_t tqp_index;
+ uint8_t int_gl_index;
+};
+
+#define HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM 4
+struct hns3_vf_bind_vector_msg {
+ uint8_t vector_id;
+ uint8_t ring_num;
+ struct hns3_ring_chain_param param[HNS3_MBX_MAX_RING_CHAIN_PARAM_NUM];
+};
+
struct hns3_vf_rst_cmd {
uint8_t dest_vfid;
uint8_t vf_rst;
diff --git a/drivers/net/hns3/hns3_regs.h b/drivers/net/hns3/hns3_regs.h
index 2f5faafe1..42581df67 100644
--- a/drivers/net/hns3/hns3_regs.h
+++ b/drivers/net/hns3/hns3_regs.h
@@ -83,6 +83,9 @@
#define HNS3_RING_EN_B 0
+#define HNS3_VECTOR_REG_OFFSET 0x4
+#define HNS3_VECTOR_VF_OFFSET 0x100000
+
#define HNS3_TQP_REG_OFFSET 0x80000
#define HNS3_TQP_REG_SIZE 0x200
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index 816644713..e7f0c8fc9 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -395,6 +395,57 @@ hns3_reset_all_queues(struct hns3_adapter *hns)
return 0;
}
+uint32_t
+hns3_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct hns3_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ return rxq->nb_rx_desc - rxq->nb_rx_hold;
+}
+
+void
+hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en)
+{
+ uint32_t addr, value;
+
+ addr = HNS3_TQP_INTR_CTRL_REG + tpq_int_num * HNS3_VECTOR_REG_OFFSET;
+ value = en ? 1 : 0;
+
+ hns3_write_dev(hw, addr, value);
+}
+
+int
+hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return -ENOTSUP;
+
+ /* enable the vectors */
+ hns3_tqp_intr_enable(hw, queue_id, true);
+
+ return rte_intr_ack(intr_handle);
+}
+
+int
+hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (dev->data->dev_conf.intr_conf.rxq == 0)
+ return -ENOTSUP;
+
+ /* disable the vectors */
+ hns3_tqp_intr_enable(hw, queue_id, false);
+
+ return 0;
+}
+
static int
hns3_dev_rx_queue_start(struct hns3_adapter *hns, uint16_t idx)
{
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index daf51f409..60c3f414b 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -295,6 +295,9 @@ void hns3_dev_rx_queue_release(void *queue);
void hns3_dev_tx_queue_release(void *queue);
void hns3_free_all_queues(struct rte_eth_dev *dev);
int hns3_reset_all_queues(struct hns3_adapter *hns);
+uint32_t hns3_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int hns3_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
+int hns3_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
int hns3_start_queues(struct hns3_adapter *hns, bool reset_queue);
int hns3_stop_queues(struct hns3_adapter *hns, bool reset_queue);
void hns3_dev_release_mbufs(struct hns3_adapter *hns);
@@ -311,4 +314,5 @@ uint16_t hns3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
const uint32_t *hns3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev);
+void hns3_tqp_intr_enable(struct hns3_hw *hw, uint16_t tpq_int_num, bool en);
#endif /* _HNS3_RXTX_H_ */
--
2.23.0
^ permalink raw reply [flat|nested] 10+ messages in thread
* [dpdk-dev] [PATCH v2 2/9] net/hns3: get link state change through mailbox
2019-12-14 10:29 [dpdk-dev] [PATCH v2 0/9] updates for hns3 PMD driver Wei Hu (Xavier)
2019-12-14 10:29 ` [dpdk-dev] [PATCH v2 1/9] net/hns3: support Rx interrupt Wei Hu (Xavier)
@ 2019-12-14 10:29 ` Wei Hu (Xavier)
2019-12-14 10:29 ` [dpdk-dev] [PATCH v2 3/9] net/hns3: modify the return value of enable msix Wei Hu (Xavier)
` (6 subsequent siblings)
8 siblings, 0 replies; 10+ messages in thread
From: Wei Hu (Xavier) @ 2019-12-14 10:29 UTC (permalink / raw)
To: dev
From: Hongbo Zheng <zhenghongbo3@huawei.com>
Currently, firmware adds the function of sending message to PF driver
through mailbox when the link status is changed, hns3 PMD driver can
usually recognize link state change faster through the message.
And in some extreme cases, this way is not faster than existing method
regularly updating link status by issing the command every second in PF
driver, because of the parallel processing of mailbox and command messages
in firmware. So we reserve updating link status using timers in PF driver,
and add querying link status by issuing command to the firmware in
'.link_update' ops implementation function named hns3_dev_link_update to
solve the out of date link status.
Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Huisong Li <lihuisong@huawei.com>
---
v1 -> v2:
1.Add querying link status by issuing command to the firmware
in '.link_update' ops implementation function named
hns3_dev_link_update to solve the out of date link status.
---
drivers/net/hns3/hns3_ethdev.c | 14 +++++++++++--
drivers/net/hns3/hns3_ethdev.h | 1 +
drivers/net/hns3/hns3_mbx.c | 37 ++++++++++++++++++++++++++++++++++
drivers/net/hns3/hns3_mbx.h | 8 ++++++++
4 files changed, 58 insertions(+), 2 deletions(-)
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index bf0ab458f..5795b3b34 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -77,6 +77,7 @@ static enum hns3_reset_level hns3_get_reset_level(struct hns3_adapter *hns,
static int hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int hns3_vlan_pvid_configure(struct hns3_adapter *hns, uint16_t pvid,
int on);
+static int hns3_update_speed_duplex(struct rte_eth_dev *eth_dev);
static void
hns3_pf_disable_irq0(struct hns3_hw *hw)
@@ -218,6 +219,8 @@ hns3_interrupt_handler(void *param)
hns3_schedule_reset(hns);
} else if (event_cause == HNS3_VECTOR0_EVENT_RST)
hns3_schedule_reset(hns);
+ else if (event_cause == HNS3_VECTOR0_EVENT_MBX)
+ hns3_dev_handle_mbx_msg(hw);
else
hns3_err(hw, "Received unknown event");
@@ -2302,6 +2305,11 @@ hns3_dev_link_update(struct rte_eth_dev *eth_dev,
struct hns3_mac *mac = &hw->mac;
struct rte_eth_link new_link;
+ if (!hns3_is_reset_pending(hns)) {
+ hns3_update_speed_duplex(eth_dev);
+ hns3_update_link_status(hw);
+ }
+
memset(&new_link, 0, sizeof(new_link));
switch (mac->link_speed) {
case ETH_SPEED_NUM_10M:
@@ -3806,14 +3814,16 @@ hns3_get_mac_link_status(struct hns3_hw *hw)
return !!link_status;
}
-static void
+void
hns3_update_link_status(struct hns3_hw *hw)
{
int state;
state = hns3_get_mac_link_status(hw);
- if (state != hw->mac.link_status)
+ if (state != hw->mac.link_status) {
hw->mac.link_status = state;
+ hns3_warn(hw, "Link status change to %s!", state ? "up" : "down");
+ }
}
static void
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index e9a3fe410..004cd75a9 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -631,6 +631,7 @@ int hns3_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op, void *arg);
bool hns3_is_reset_pending(struct hns3_adapter *hns);
bool hns3vf_is_reset_pending(struct hns3_adapter *hns);
+void hns3_update_link_status(struct hns3_hw *hw);
static inline bool
is_reset_pending(struct hns3_adapter *hns)
diff --git a/drivers/net/hns3/hns3_mbx.c b/drivers/net/hns3/hns3_mbx.c
index c1647af4b..26807bc4b 100644
--- a/drivers/net/hns3/hns3_mbx.c
+++ b/drivers/net/hns3/hns3_mbx.c
@@ -282,6 +282,40 @@ hns3_update_resp_position(struct hns3_hw *hw, uint32_t resp_msg)
resp->tail = tail;
}
+static void
+hns3_link_fail_parse(struct hns3_hw *hw, uint8_t link_fail_code)
+{
+ switch (link_fail_code) {
+ case HNS3_MBX_LF_NORMAL:
+ break;
+ case HNS3_MBX_LF_REF_CLOCK_LOST:
+ hns3_warn(hw, "Reference clock lost!");
+ break;
+ case HNS3_MBX_LF_XSFP_TX_DISABLE:
+ hns3_warn(hw, "SFP tx is disabled!");
+ break;
+ case HNS3_MBX_LF_XSFP_ABSENT:
+ hns3_warn(hw, "SFP is absent!");
+ break;
+ default:
+ hns3_warn(hw, "Unknown fail code:%u!", link_fail_code);
+ break;
+ }
+}
+
+static void
+hns3_handle_link_change_event(struct hns3_hw *hw,
+ struct hns3_mbx_pf_to_vf_cmd *req)
+{
+#define LINK_STATUS_OFFSET 1
+#define LINK_FAIL_CODE_OFFSET 2
+
+ if (!req->msg[LINK_STATUS_OFFSET])
+ hns3_link_fail_parse(hw, req->msg[LINK_FAIL_CODE_OFFSET]);
+
+ hns3_update_link_status(hw);
+}
+
void
hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
{
@@ -335,6 +369,9 @@ hns3_dev_handle_mbx_msg(struct hns3_hw *hw)
hns3_mbx_handler(hw);
break;
+ case HNS3_MBX_PUSH_LINK_STATUS:
+ hns3_handle_link_change_event(hw, req);
+ break;
default:
hns3_err(hw,
"VF received unsupported(%d) mbx msg from PF",
diff --git a/drivers/net/hns3/hns3_mbx.h b/drivers/net/hns3/hns3_mbx.h
index d1a6bfead..3722c8760 100644
--- a/drivers/net/hns3/hns3_mbx.h
+++ b/drivers/net/hns3/hns3_mbx.h
@@ -41,6 +41,7 @@ enum HNS3_MBX_OPCODE {
HNS3_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
HNS3_MBX_HANDLE_VF_TBL = 38, /* (VF -> PF) store/clear hw cfg tbl */
+ HNS3_MBX_PUSH_LINK_STATUS = 201, /* (IMP -> PF) get port link status */
};
/* below are per-VF mac-vlan subcodes */
@@ -64,6 +65,13 @@ enum hns3_mbx_tbl_cfg_subcode {
HNS3_MBX_VPORT_LIST_CLEAR = 0,
};
+enum hns3_mbx_link_fail_subcode {
+ HNS3_MBX_LF_NORMAL = 0,
+ HNS3_MBX_LF_REF_CLOCK_LOST,
+ HNS3_MBX_LF_XSFP_TX_DISABLE,
+ HNS3_MBX_LF_XSFP_ABSENT,
+};
+
#define HNS3_MBX_MAX_MSG_SIZE 16
#define HNS3_MBX_MAX_RESP_DATA_SIZE 8
#define HNS3_MBX_RING_MAP_BASIC_MSG_NUM 3
--
2.23.0
^ permalink raw reply [flat|nested] 10+ messages in thread