DPDK patches and discussions
 help / color / mirror / Atom feed
From: Junlong Wang <wang.junlong1@zte.com.cn>
To: ferruh.yigit@amd.com
Cc: dev@dpdk.org, wang.yong19@zte.com.cn,
	Junlong Wang <wang.junlong1@zte.com.cn>
Subject: [PATCH v10 07/10] net/zxdh: add configure zxdh intr implementation
Date: Mon,  4 Nov 2024 19:58:50 +0800	[thread overview]
Message-ID: <20241104115856.2795213-8-wang.junlong1@zte.com.cn> (raw)
In-Reply-To: <20241104115856.2795213-1-wang.junlong1@zte.com.cn>


[-- Attachment #1.1.1: Type: text/plain, Size: 29425 bytes --]

configure zxdh intr include risc,dtb. and release intr.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 drivers/net/zxdh/zxdh_ethdev.c | 315 +++++++++++++++++++++++++++++++++
 drivers/net/zxdh/zxdh_ethdev.h |   6 +
 drivers/net/zxdh/zxdh_msg.c    | 210 ++++++++++++++++++++++
 drivers/net/zxdh/zxdh_msg.h    |  16 +-
 drivers/net/zxdh/zxdh_pci.c    |  31 ++++
 drivers/net/zxdh/zxdh_pci.h    |  11 ++
 drivers/net/zxdh/zxdh_queue.h  | 110 ++++++++++++
 drivers/net/zxdh/zxdh_rxtx.h   |  55 ++++++
 8 files changed, 752 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/zxdh/zxdh_queue.h
 create mode 100644 drivers/net/zxdh/zxdh_rxtx.h

diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index da5ac3ccd1..1a3658e74b 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -11,6 +11,7 @@
 #include "zxdh_pci.h"
 #include "zxdh_msg.h"
 #include "zxdh_common.h"
+#include "zxdh_queue.h"
 
 struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];
 
@@ -26,6 +27,315 @@ zxdh_vport_to_vfid(union zxdh_virport_num v)
 		return (v.epid * 8 + v.pfid) + 1152;
 }
 
+static void
+zxdh_queues_unbind_intr(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int32_t i;
+
+	for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+		ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);
+		ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2 + 1], ZXDH_MSI_NO_VECTOR);
+	}
+}
+
+
+static int32_t
+zxdh_intr_unmask(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (rte_intr_ack(dev->intr_handle) < 0)
+		return -1;
+
+	hw->use_msix = zxdh_pci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
+
+	return 0;
+}
+
+static void
+zxdh_devconf_intr_handler(void *param)
+{
+	struct rte_eth_dev *dev = param;
+
+	if (zxdh_intr_unmask(dev) < 0)
+		PMD_DRV_LOG(ERR, "interrupt enable failed");
+}
+
+
+/* Interrupt handler triggered by NIC for handling specific interrupt. */
+static void
+zxdh_fromriscv_intr_handler(void *param)
+{
+	struct rte_eth_dev *dev = param;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET);
+
+	if (hw->is_pf) {
+		PMD_DRV_LOG(DEBUG, "zxdh_risc2pf_intr_handler");
+		zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_RISC, ZXDH_MSG_CHAN_END_PF, virt_addr, dev);
+	} else {
+		PMD_DRV_LOG(DEBUG, "zxdh_riscvf_intr_handler");
+		zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_RISC, ZXDH_MSG_CHAN_END_VF, virt_addr, dev);
+	}
+}
+
+/* Interrupt handler triggered by NIC for handling specific interrupt. */
+static void
+zxdh_frompfvf_intr_handler(void *param)
+{
+	struct rte_eth_dev *dev = param;
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint64_t virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] +
+				ZXDH_MSG_CHAN_PFVFSHARE_OFFSET);
+
+	if (hw->is_pf) {
+		PMD_DRV_LOG(DEBUG, "zxdh_vf2pf_intr_handler");
+		zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_VF, ZXDH_MSG_CHAN_END_PF, virt_addr, dev);
+	} else {
+		PMD_DRV_LOG(DEBUG, "zxdh_pf2vf_intr_handler");
+		zxdh_bar_irq_recv(ZXDH_MSG_CHAN_END_PF, ZXDH_MSG_CHAN_END_VF, virt_addr, dev);
+	}
+}
+
+static void
+zxdh_intr_cb_reg(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+		rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);
+
+	/* register callback to update dev config intr */
+	rte_intr_callback_register(dev->intr_handle, zxdh_devconf_intr_handler, dev);
+	/* Register rsic_v to pf interrupt callback */
+	struct rte_intr_handle *tmp = hw->risc_intr +
+			(ZXDH_MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);
+
+	rte_intr_callback_register(tmp, zxdh_frompfvf_intr_handler, dev);
+
+	tmp = hw->risc_intr + (ZXDH_MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE);
+	rte_intr_callback_register(tmp, zxdh_fromriscv_intr_handler, dev);
+}
+
+static void
+zxdh_intr_cb_unreg(struct rte_eth_dev *dev)
+{
+	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+		rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);
+
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	/* register callback to update dev config intr */
+	rte_intr_callback_unregister(dev->intr_handle, zxdh_devconf_intr_handler, dev);
+	/* Register rsic_v to pf interrupt callback */
+	struct rte_intr_handle *tmp = hw->risc_intr +
+			(ZXDH_MSIX_FROM_PFVF - ZXDH_MSIX_INTR_MSG_VEC_BASE);
+
+	rte_intr_callback_unregister(tmp, zxdh_frompfvf_intr_handler, dev);
+	tmp = hw->risc_intr + (ZXDH_MSIX_FROM_RISCV - ZXDH_MSIX_INTR_MSG_VEC_BASE);
+	rte_intr_callback_unregister(tmp, zxdh_fromriscv_intr_handler, dev);
+}
+
+static int32_t
+zxdh_intr_disable(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (!hw->intr_enabled)
+		return 0;
+
+	zxdh_intr_cb_unreg(dev);
+	if (rte_intr_disable(dev->intr_handle) < 0)
+		return -1;
+
+	hw->intr_enabled = 0;
+	return 0;
+}
+
+static int32_t
+zxdh_intr_enable(struct rte_eth_dev *dev)
+{
+	int ret = 0;
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (!hw->intr_enabled) {
+		zxdh_intr_cb_reg(dev);
+		ret = rte_intr_enable(dev->intr_handle);
+		if (unlikely(ret))
+			PMD_DRV_LOG(ERR, "Failed to enable %s intr", dev->data->name);
+
+		hw->intr_enabled = 1;
+	}
+	return ret;
+}
+
+static int32_t
+zxdh_intr_release(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+		ZXDH_VTPCI_OPS(hw)->set_config_irq(hw, ZXDH_MSI_NO_VECTOR);
+
+	zxdh_queues_unbind_intr(dev);
+	zxdh_intr_disable(dev);
+
+	rte_intr_efd_disable(dev->intr_handle);
+	rte_intr_vec_list_free(dev->intr_handle);
+	rte_free(hw->risc_intr);
+	hw->risc_intr = NULL;
+	rte_free(hw->dtb_intr);
+	hw->dtb_intr = NULL;
+	return 0;
+}
+
+static int32_t
+zxdh_setup_risc_interrupts(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	uint8_t i;
+
+	if (!hw->risc_intr) {
+		PMD_DRV_LOG(ERR, " to allocate risc_intr");
+		hw->risc_intr = rte_zmalloc("risc_intr",
+			ZXDH_MSIX_INTR_MSG_VEC_NUM * sizeof(struct rte_intr_handle), 0);
+		if (hw->risc_intr == NULL) {
+			PMD_DRV_LOG(ERR, "Failed to allocate risc_intr");
+			return -ENOMEM;
+		}
+	}
+
+	for (i = 0; i < ZXDH_MSIX_INTR_MSG_VEC_NUM; i++) {
+		if (dev->intr_handle->efds[i] < 0) {
+			PMD_DRV_LOG(ERR, "[%u]risc interrupt fd is invalid", i);
+			rte_free(hw->risc_intr);
+			hw->risc_intr = NULL;
+			return -1;
+		}
+
+		struct rte_intr_handle *intr_handle = hw->risc_intr + i;
+
+		intr_handle->fd = dev->intr_handle->efds[i];
+		intr_handle->type = dev->intr_handle->type;
+	}
+
+	return 0;
+}
+
+static int32_t
+zxdh_setup_dtb_interrupts(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	if (!hw->dtb_intr) {
+		hw->dtb_intr = rte_zmalloc("dtb_intr", sizeof(struct rte_intr_handle), 0);
+		if (hw->dtb_intr == NULL) {
+			PMD_DRV_LOG(ERR, "Failed to allocate dtb_intr");
+			return -ENOMEM;
+		}
+	}
+
+	if (dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1] < 0) {
+		PMD_DRV_LOG(ERR, "[%d]dtb interrupt fd is invalid", ZXDH_MSIX_INTR_DTB_VEC - 1);
+		rte_free(hw->dtb_intr);
+		hw->dtb_intr = NULL;
+		return -1;
+	}
+	hw->dtb_intr->fd = dev->intr_handle->efds[ZXDH_MSIX_INTR_DTB_VEC - 1];
+	hw->dtb_intr->type = dev->intr_handle->type;
+	return 0;
+}
+
+static int32_t
+zxdh_queues_bind_intr(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int32_t i;
+	uint16_t vec;
+
+	if (!dev->data->dev_conf.intr_conf.rxq) {
+		for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+			vec = ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw,
+					hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);
+			PMD_DRV_LOG(DEBUG, "vq%d irq set 0x%x, get 0x%x",
+					i * 2, ZXDH_MSI_NO_VECTOR, vec);
+		}
+	} else {
+		for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+			vec = ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw,
+					hw->vqs[i * 2], i + ZXDH_QUEUE_INTR_VEC_BASE);
+			PMD_DRV_LOG(DEBUG, "vq%d irq set %d, get %d",
+					i * 2, i + ZXDH_QUEUE_INTR_VEC_BASE, vec);
+		}
+	}
+	/* mask all txq intr */
+	for (i = 0; i < dev->data->nb_tx_queues; ++i) {
+		vec = ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw,
+				hw->vqs[(i * 2) + 1], ZXDH_MSI_NO_VECTOR);
+		PMD_DRV_LOG(DEBUG, "vq%d irq set 0x%x, get 0x%x",
+				(i * 2) + 1, ZXDH_MSI_NO_VECTOR, vec);
+	}
+	return 0;
+}
+
+static int32_t
+zxdh_configure_intr(struct rte_eth_dev *dev)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+	int32_t ret = 0;
+
+	if (!rte_intr_cap_multiple(dev->intr_handle)) {
+		PMD_DRV_LOG(ERR, "Multiple intr vector not supported");
+		return -ENOTSUP;
+	}
+	zxdh_intr_release(dev);
+	uint8_t nb_efd = ZXDH_MSIX_INTR_DTB_VEC_NUM + ZXDH_MSIX_INTR_MSG_VEC_NUM;
+
+	if (dev->data->dev_conf.intr_conf.rxq)
+		nb_efd += dev->data->nb_rx_queues;
+
+	if (rte_intr_efd_enable(dev->intr_handle, nb_efd)) {
+		PMD_DRV_LOG(ERR, "Fail to create eventfd");
+		return -1;
+	}
+
+	if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",
+					hw->max_queue_pairs + ZXDH_INTR_NONQUE_NUM)) {
+		PMD_DRV_LOG(ERR, "Failed to allocate %u rxq vectors",
+					hw->max_queue_pairs + ZXDH_INTR_NONQUE_NUM);
+		return -ENOMEM;
+	}
+	PMD_DRV_LOG(DEBUG, "allocate %u rxq vectors", dev->intr_handle->vec_list_size);
+	if (zxdh_setup_risc_interrupts(dev) != 0) {
+		PMD_DRV_LOG(ERR, "Error setting up rsic_v interrupts!");
+		ret = -1;
+		goto free_intr_vec;
+	}
+	if (zxdh_setup_dtb_interrupts(dev) != 0) {
+		PMD_DRV_LOG(ERR, "Error setting up dtb interrupts!");
+		ret = -1;
+		goto free_intr_vec;
+	}
+
+	if (zxdh_queues_bind_intr(dev) < 0) {
+		PMD_DRV_LOG(ERR, "Failed to bind queue/interrupt");
+		ret = -1;
+		goto free_intr_vec;
+	}
+
+	if (zxdh_intr_enable(dev) < 0) {
+		PMD_DRV_LOG(ERR, "interrupt enable failed");
+		ret = -1;
+		goto free_intr_vec;
+	}
+	return 0;
+
+free_intr_vec:
+	zxdh_intr_release(dev);
+	return ret;
+}
+
 static int32_t
 zxdh_init_device(struct rte_eth_dev *eth_dev)
 {
@@ -142,9 +452,14 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
 	if (ret != 0)
 		goto err_zxdh_init;
 
+	ret = zxdh_configure_intr(eth_dev);
+	if (ret != 0)
+		goto err_zxdh_init;
+
 	return ret;
 
 err_zxdh_init:
+	zxdh_intr_release(eth_dev);
 	zxdh_bar_msg_chan_exit();
 	rte_free(eth_dev->data->mac_addrs);
 	eth_dev->data->mac_addrs = NULL;
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 7b7bb16be8..65726f3a20 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -7,6 +7,8 @@
 
 #include <rte_ether.h>
 #include "ethdev_driver.h"
+#include <rte_interrupts.h>
+#include <eal_interrupts.h>
 
 #ifdef __cplusplus
 extern "C" {
@@ -43,6 +45,9 @@ struct zxdh_hw {
 	struct rte_eth_dev *eth_dev;
 	struct zxdh_pci_common_cfg *common_cfg;
 	struct zxdh_net_config *dev_cfg;
+	struct rte_intr_handle *risc_intr;
+	struct rte_intr_handle *dtb_intr;
+	struct zxdh_virtqueue **vqs;
 	union zxdh_virport_num vport;
 
 	uint64_t bar_addr[ZXDH_NUM_BARS];
@@ -59,6 +64,7 @@ struct zxdh_hw {
 
 	uint8_t *isr;
 	uint8_t weak_barriers;
+	uint8_t intr_enabled;
 	uint8_t use_msix;
 	uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
 
diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c
index 336ba217d3..53cf972f86 100644
--- a/drivers/net/zxdh/zxdh_msg.c
+++ b/drivers/net/zxdh/zxdh_msg.c
@@ -95,6 +95,12 @@
 #define ZXDH_BAR_CHAN_INDEX_SEND  0
 #define ZXDH_BAR_CHAN_INDEX_RECV  1
 
+#define ZXDH_BAR_CHAN_MSG_SYNC     0
+#define ZXDH_BAR_CHAN_MSG_NO_EMEC  0
+#define ZXDH_BAR_CHAN_MSG_EMEC     1
+#define ZXDH_BAR_CHAN_MSG_NO_ACK   0
+#define ZXDH_BAR_CHAN_MSG_ACK      1
+
 uint8_t subchan_id_tbl[ZXDH_BAR_MSG_SRC_NUM][ZXDH_BAR_MSG_DST_NUM] = {
 	{ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_SEND},
 	{ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_SEND, ZXDH_BAR_CHAN_INDEX_RECV},
@@ -137,6 +143,39 @@ static struct zxdh_seqid_ring g_seqid_ring;
 static uint8_t tmp_msg_header[ZXDH_BAR_MSG_ADDR_CHAN_INTERVAL];
 static rte_spinlock_t chan_lock;
 
+zxdh_bar_chan_msg_recv_callback msg_recv_func_tbl[ZXDH_BAR_MSG_MODULE_NUM];
+
+static inline const char
+*zxdh_module_id_name(int val)
+{
+	switch (val) {
+	case ZXDH_BAR_MODULE_DBG:        return "ZXDH_BAR_MODULE_DBG";
+	case ZXDH_BAR_MODULE_TBL:        return "ZXDH_BAR_MODULE_TBL";
+	case ZXDH_BAR_MODULE_MISX:       return "ZXDH_BAR_MODULE_MISX";
+	case ZXDH_BAR_MODULE_SDA:        return "ZXDH_BAR_MODULE_SDA";
+	case ZXDH_BAR_MODULE_RDMA:       return "ZXDH_BAR_MODULE_RDMA";
+	case ZXDH_BAR_MODULE_DEMO:       return "ZXDH_BAR_MODULE_DEMO";
+	case ZXDH_BAR_MODULE_SMMU:       return "ZXDH_BAR_MODULE_SMMU";
+	case ZXDH_BAR_MODULE_MAC:        return "ZXDH_BAR_MODULE_MAC";
+	case ZXDH_BAR_MODULE_VDPA:       return "ZXDH_BAR_MODULE_VDPA";
+	case ZXDH_BAR_MODULE_VQM:        return "ZXDH_BAR_MODULE_VQM";
+	case ZXDH_BAR_MODULE_NP:         return "ZXDH_BAR_MODULE_NP";
+	case ZXDH_BAR_MODULE_VPORT:      return "ZXDH_BAR_MODULE_VPORT";
+	case ZXDH_BAR_MODULE_BDF:        return "ZXDH_BAR_MODULE_BDF";
+	case ZXDH_BAR_MODULE_RISC_READY: return "ZXDH_BAR_MODULE_RISC_READY";
+	case ZXDH_BAR_MODULE_REVERSE:    return "ZXDH_BAR_MODULE_REVERSE";
+	case ZXDH_BAR_MDOULE_NVME:       return "ZXDH_BAR_MDOULE_NVME";
+	case ZXDH_BAR_MDOULE_NPSDK:      return "ZXDH_BAR_MDOULE_NPSDK";
+	case ZXDH_BAR_MODULE_NP_TODO:    return "ZXDH_BAR_MODULE_NP_TODO";
+	case ZXDH_MODULE_BAR_MSG_TO_PF:  return "ZXDH_MODULE_BAR_MSG_TO_PF";
+	case ZXDH_MODULE_BAR_MSG_TO_VF:  return "ZXDH_MODULE_BAR_MSG_TO_VF";
+	case ZXDH_MODULE_FLASH:          return "ZXDH_MODULE_FLASH";
+	case ZXDH_BAR_MODULE_OFFSET_GET: return "ZXDH_BAR_MODULE_OFFSET_GET";
+	case ZXDH_BAR_EVENT_OVS_WITH_VCB: return "ZXDH_BAR_EVENT_OVS_WITH_VCB";
+	default: return "NA";
+	}
+}
+
 static uint16_t
 zxdh_pcie_id_to_hard_lock(uint16_t src_pcieid, uint8_t dst)
 {
@@ -825,3 +864,174 @@ zxdh_msg_chan_enable(struct rte_eth_dev *dev)
 
 	return zxdh_bar_chan_enable(&misx_info, &hw->vport.vport);
 }
+
+static uint64_t
+zxdh_recv_addr_get(uint8_t src_type, uint8_t dst_type, uint64_t virt_addr)
+{
+	uint8_t chan_id = 0;
+	uint8_t subchan_id = 0;
+	uint8_t src = 0;
+	uint8_t dst = 0;
+
+	src = zxdh_bar_msg_dst_index_trans(src_type);
+	dst = zxdh_bar_msg_src_index_trans(dst_type);
+	if (src == ZXDH_BAR_MSG_SRC_ERR || dst == ZXDH_BAR_MSG_DST_ERR)
+		return 0;
+
+	chan_id = chan_id_tbl[dst][src];
+	subchan_id = 1 - subchan_id_tbl[dst][src];
+
+	return zxdh_subchan_addr_cal(virt_addr, chan_id, subchan_id);
+}
+
+static void
+zxdh_bar_msg_ack_async_msg_proc(struct zxdh_bar_msg_header *msg_header,
+		uint8_t *receiver_buff)
+{
+	struct zxdh_seqid_item *reps_info = &g_seqid_ring.reps_info_tbl[msg_header->msg_id];
+
+	if (reps_info->flag != ZXDH_REPS_INFO_FLAG_USED) {
+		PMD_MSG_LOG(ERR, "msg_id: %u is released", msg_header->msg_id);
+		return;
+	}
+	if (msg_header->len > reps_info->buffer_len - 4) {
+		PMD_MSG_LOG(ERR, "reps_buf_len is %u, but reps_msg_len is %u",
+				reps_info->buffer_len, msg_header->len + 4);
+		goto free_id;
+	}
+	uint8_t *reps_buffer = (uint8_t *)reps_info->reps_addr;
+
+	rte_memcpy(reps_buffer + 4, receiver_buff, msg_header->len);
+	*(uint16_t *)(reps_buffer + 1) = msg_header->len;
+	*(uint8_t *)(reps_info->reps_addr) = ZXDH_REPS_HEADER_REPLYED;
+
+free_id:
+	zxdh_bar_chan_msgid_free(msg_header->msg_id);
+}
+
+static void
+zxdh_bar_msg_sync_msg_proc(uint64_t reply_addr,
+		struct zxdh_bar_msg_header *msg_header,
+		uint8_t *receiver_buff, void *dev)
+{
+	uint16_t reps_len = 0;
+	uint8_t *reps_buffer = NULL;
+
+	reps_buffer = rte_malloc(NULL, ZXDH_BAR_MSG_PAYLOAD_MAX_LEN, 0);
+	if (reps_buffer == NULL)
+		return;
+
+	zxdh_bar_chan_msg_recv_callback recv_func = msg_recv_func_tbl[msg_header->module_id];
+
+	recv_func(receiver_buff, msg_header->len, reps_buffer, &reps_len, dev);
+	msg_header->ack = ZXDH_BAR_CHAN_MSG_ACK;
+	msg_header->len = reps_len;
+	zxdh_bar_chan_msg_header_set(reply_addr, msg_header);
+	zxdh_bar_chan_msg_payload_set(reply_addr, reps_buffer, reps_len);
+	zxdh_bar_chan_msg_valid_set(reply_addr, ZXDH_BAR_MSG_CHAN_USABLE);
+	rte_free(reps_buffer);
+}
+
+static uint64_t
+zxdh_reply_addr_get(uint8_t sync, uint8_t src_type,
+		uint8_t dst_type, uint64_t virt_addr)
+{
+	uint64_t recv_rep_addr = 0;
+	uint8_t chan_id = 0;
+	uint8_t subchan_id = 0;
+	uint8_t src = 0;
+	uint8_t dst = 0;
+
+	src = zxdh_bar_msg_dst_index_trans(src_type);
+	dst = zxdh_bar_msg_src_index_trans(dst_type);
+	if (src == ZXDH_BAR_MSG_SRC_ERR || dst == ZXDH_BAR_MSG_DST_ERR)
+		return 0;
+
+	chan_id = chan_id_tbl[dst][src];
+	subchan_id = 1 - subchan_id_tbl[dst][src];
+
+	if (sync == ZXDH_BAR_CHAN_MSG_SYNC)
+		recv_rep_addr = zxdh_subchan_addr_cal(virt_addr, chan_id, subchan_id);
+	else
+		recv_rep_addr = zxdh_subchan_addr_cal(virt_addr, chan_id, 1 - subchan_id);
+
+	return recv_rep_addr;
+}
+
+static uint16_t
+zxdh_bar_chan_msg_header_check(struct zxdh_bar_msg_header *msg_header)
+{
+	uint16_t len = 0;
+	uint8_t module_id = 0;
+
+	if (msg_header->valid != ZXDH_BAR_MSG_CHAN_USED) {
+		PMD_MSG_LOG(ERR, "recv header ERR: valid label is not used.");
+		return ZXDH_BAR_MSG_ERR_MODULE;
+	}
+	module_id = msg_header->module_id;
+
+	if (module_id >= (uint8_t)ZXDH_BAR_MSG_MODULE_NUM) {
+		PMD_MSG_LOG(ERR, "recv header ERR: invalid module_id: %u.", module_id);
+		return ZXDH_BAR_MSG_ERR_MODULE;
+	}
+	len = msg_header->len;
+
+	if (len > ZXDH_BAR_MSG_PAYLOAD_MAX_LEN) {
+		PMD_MSG_LOG(ERR, "recv header ERR: invalid mesg len: %u.", len);
+		return ZXDH_BAR_MSG_ERR_LEN;
+	}
+	if (msg_recv_func_tbl[msg_header->module_id] == NULL) {
+		PMD_MSG_LOG(ERR, "recv header ERR: module:%s(%u) doesn't register",
+				zxdh_module_id_name(module_id), module_id);
+		return ZXDH_BAR_MSG_ERR_MODULE_NOEXIST;
+	}
+	return ZXDH_BAR_MSG_OK;
+}
+
+int
+zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev)
+{
+	struct zxdh_bar_msg_header msg_header = {0};
+	uint64_t recv_addr = 0;
+	uint64_t reps_addr = 0;
+	uint16_t ret = 0;
+	uint8_t *recved_msg = NULL;
+
+	recv_addr = zxdh_recv_addr_get(src, dst, virt_addr);
+	if (recv_addr == 0) {
+		PMD_MSG_LOG(ERR, "invalid driver type(src:%u, dst:%u).", src, dst);
+		return -1;
+	}
+
+	zxdh_bar_chan_msg_header_get(recv_addr, &msg_header);
+	ret = zxdh_bar_chan_msg_header_check(&msg_header);
+
+	if (ret != ZXDH_BAR_MSG_OK) {
+		PMD_MSG_LOG(ERR, "recv msg_head err, ret: %u.", ret);
+		return -1;
+	}
+
+	recved_msg = rte_malloc(NULL, msg_header.len, 0);
+	if (recved_msg == NULL) {
+		PMD_MSG_LOG(ERR, "malloc temp buff failed.");
+		return -1;
+	}
+	zxdh_bar_chan_msg_payload_get(recv_addr, recved_msg, msg_header.len);
+
+	reps_addr = zxdh_reply_addr_get(msg_header.sync, src, dst, virt_addr);
+
+	if (msg_header.sync == ZXDH_BAR_CHAN_MSG_SYNC) {
+		zxdh_bar_msg_sync_msg_proc(reps_addr, &msg_header, recved_msg, dev);
+		goto exit;
+	}
+	zxdh_bar_chan_msg_valid_set(recv_addr, ZXDH_BAR_MSG_CHAN_USABLE);
+	if (msg_header.ack == ZXDH_BAR_CHAN_MSG_ACK) {
+		zxdh_bar_msg_ack_async_msg_proc(&msg_header, recved_msg);
+		goto exit;
+	}
+	return 0;
+
+exit:
+	rte_free(recved_msg);
+	return ZXDH_BAR_MSG_OK;
+}
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index b2beedec64..5742000a3b 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -13,10 +13,17 @@
 extern "C" {
 #endif
 
-#define ZXDH_BAR0_INDEX     0
-#define ZXDH_CTRLCH_OFFSET            (0x2000)
+#define ZXDH_BAR0_INDEX                 0
+#define ZXDH_CTRLCH_OFFSET              (0x2000)
+#define ZXDH_MSG_CHAN_PFVFSHARE_OFFSET  (ZXDH_CTRLCH_OFFSET + 0x1000)
 
 #define ZXDH_MSIX_INTR_MSG_VEC_BASE   1
+#define ZXDH_MSIX_INTR_MSG_VEC_NUM    3
+#define ZXDH_MSIX_INTR_DTB_VEC        (ZXDH_MSIX_INTR_MSG_VEC_BASE + ZXDH_MSIX_INTR_MSG_VEC_NUM)
+#define ZXDH_MSIX_INTR_DTB_VEC_NUM    1
+#define ZXDH_INTR_NONQUE_NUM          (ZXDH_MSIX_INTR_MSG_VEC_NUM + ZXDH_MSIX_INTR_DTB_VEC_NUM + 1)
+#define ZXDH_QUEUE_INTR_VEC_BASE      (ZXDH_MSIX_INTR_DTB_VEC + ZXDH_MSIX_INTR_DTB_VEC_NUM)
+#define ZXDH_QUEUE_INTR_VEC_NUM       256
 
 #define ZXDH_BAR_MSG_POLLING_SPAN     100
 #define ZXDH_BAR_MSG_POLL_CNT_PER_MS  (1 * 1000 / ZXDH_BAR_MSG_POLLING_SPAN)
@@ -201,6 +208,9 @@ struct zxdh_bar_msg_header {
 	uint16_t dst_pcieid; /* used in PF-->VF */
 };
 
+typedef int (*zxdh_bar_chan_msg_recv_callback)(void *pay_load, uint16_t len,
+		void *reps_buffer, uint16_t *reps_len, void *dev);
+
 int zxdh_msg_chan_init(void);
 int zxdh_bar_msg_chan_exit(void);
 int zxdh_msg_chan_hwlock_init(struct rte_eth_dev *dev);
@@ -209,6 +219,8 @@ int zxdh_msg_chan_enable(struct rte_eth_dev *dev);
 int zxdh_bar_chan_sync_msg_send(struct zxdh_pci_bar_msg *in,
 		struct zxdh_msg_recviver_mem *result);
 
+int zxdh_bar_irq_recv(uint8_t src, uint8_t dst, uint64_t virt_addr, void *dev);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c
index 68785aa03e..8e7a9c1213 100644
--- a/drivers/net/zxdh/zxdh_pci.c
+++ b/drivers/net/zxdh/zxdh_pci.c
@@ -14,6 +14,7 @@
 #include "zxdh_ethdev.h"
 #include "zxdh_pci.h"
 #include "zxdh_logs.h"
+#include "zxdh_queue.h"
 
 #define ZXDH_PMD_DEFAULT_GUEST_FEATURES   \
 		(1ULL << ZXDH_NET_F_MRG_RXBUF | \
@@ -93,6 +94,27 @@ zxdh_set_features(struct zxdh_hw *hw, uint64_t features)
 	rte_write32(features >> 32, &hw->common_cfg->guest_feature);
 }
 
+static uint16_t
+zxdh_set_config_irq(struct zxdh_hw *hw, uint16_t vec)
+{
+	rte_write16(vec, &hw->common_cfg->msix_config);
+	return rte_read16(&hw->common_cfg->msix_config);
+}
+
+static uint16_t
+zxdh_set_queue_irq(struct zxdh_hw *hw, struct zxdh_virtqueue *vq, uint16_t vec)
+{
+	rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+	rte_write16(vec, &hw->common_cfg->queue_msix_vector);
+	return rte_read16(&hw->common_cfg->queue_msix_vector);
+}
+
+static uint8_t
+zxdh_get_isr(struct zxdh_hw *hw)
+{
+	return rte_read8(hw->isr);
+}
+
 const struct zxdh_pci_ops zxdh_dev_pci_ops = {
 	.read_dev_cfg   = zxdh_read_dev_config,
 	.write_dev_cfg  = zxdh_write_dev_config,
@@ -100,8 +122,17 @@ const struct zxdh_pci_ops zxdh_dev_pci_ops = {
 	.set_status     = zxdh_set_status,
 	.get_features   = zxdh_get_features,
 	.set_features   = zxdh_set_features,
+	.set_queue_irq  = zxdh_set_queue_irq,
+	.set_config_irq = zxdh_set_config_irq,
+	.get_isr        = zxdh_get_isr,
 };
 
+uint8_t
+zxdh_pci_isr(struct zxdh_hw *hw)
+{
+	return ZXDH_VTPCI_OPS(hw)->get_isr(hw);
+}
+
 uint16_t
 zxdh_pci_get_features(struct zxdh_hw *hw)
 {
diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h
index 7905911a34..41e47d5d3b 100644
--- a/drivers/net/zxdh/zxdh_pci.h
+++ b/drivers/net/zxdh/zxdh_pci.h
@@ -22,6 +22,13 @@ enum zxdh_msix_status {
 	ZXDH_MSIX_ENABLED  = 2
 };
 
+/* The bit of the ISR which indicates a device has an interrupt. */
+#define ZXDH_PCI_ISR_INTR    0x1
+/* The bit of the ISR which indicates a device configuration change. */
+#define ZXDH_PCI_ISR_CONFIG  0x2
+/* Vector value used to disable MSI for queue. */
+#define ZXDH_MSI_NO_VECTOR   0x7F
+
 #define ZXDH_NET_F_MAC               5   /* Host has given MAC address. */
 #define ZXDH_NET_F_MRG_RXBUF         15  /* Host can merge receive buffers. */
 #define ZXDH_NET_F_STATUS            16  /* zxdh_net_config.status available */
@@ -110,6 +117,9 @@ struct zxdh_pci_ops {
 
 	uint64_t (*get_features)(struct zxdh_hw *hw);
 	void     (*set_features)(struct zxdh_hw *hw, uint64_t features);
+	uint16_t (*set_queue_irq)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq, uint16_t vec);
+	uint16_t (*set_config_irq)(struct zxdh_hw *hw, uint16_t vec);
+	uint8_t  (*get_isr)(struct zxdh_hw *hw);
 };
 
 struct zxdh_hw_internal {
@@ -130,6 +140,7 @@ void zxdh_get_pci_dev_config(struct zxdh_hw *hw);
 
 uint16_t zxdh_pci_get_features(struct zxdh_hw *hw);
 enum zxdh_msix_status zxdh_pci_msix_detect(struct rte_pci_device *dev);
+uint8_t zxdh_pci_isr(struct zxdh_hw *hw);
 
 #ifdef __cplusplus
 }
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
new file mode 100644
index 0000000000..9c790cd9d3
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef ZXDH_QUEUE_H
+#define ZXDH_QUEUE_H
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+#include "zxdh_ethdev.h"
+#include "zxdh_rxtx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * ring descriptors: 16 bytes.
+ * These can chain together via "next".
+ */
+struct zxdh_vring_desc {
+	uint64_t addr;  /*  Address (guest-physical). */
+	uint32_t len;   /* Length. */
+	uint16_t flags; /* The flags as indicated above. */
+	uint16_t next;  /* We chain unused descriptors via this. */
+} __rte_packed;
+
+struct zxdh_vring_avail {
+	uint16_t flags;
+	uint16_t idx;
+	uint16_t ring[];
+} __rte_packed;
+
+struct zxdh_vring_packed_desc {
+	uint64_t addr;
+	uint32_t len;
+	uint16_t id;
+	uint16_t flags;
+} __rte_packed;
+
+struct zxdh_vring_packed_desc_event {
+	uint16_t desc_event_off_wrap;
+	uint16_t desc_event_flags;
+} __rte_packed;
+
+struct zxdh_vring_packed {
+	uint32_t num;
+	struct zxdh_vring_packed_desc *desc;
+	struct zxdh_vring_packed_desc_event *driver;
+	struct zxdh_vring_packed_desc_event *device;
+} __rte_packed;
+
+struct zxdh_vq_desc_extra {
+	void *cookie;
+	uint16_t ndescs;
+	uint16_t next;
+} __rte_packed;
+
+struct zxdh_virtqueue {
+	struct zxdh_hw  *hw; /* < zxdh_hw structure pointer. */
+	struct {
+		/* vring keeping descs and events */
+		struct zxdh_vring_packed ring;
+		uint8_t used_wrap_counter;
+		uint8_t rsv;
+		uint16_t cached_flags; /* < cached flags for descs */
+		uint16_t event_flags_shadow;
+		uint16_t rsv1;
+	} __rte_packed vq_packed;
+	uint16_t vq_used_cons_idx; /* < last consumed descriptor */
+	uint16_t vq_nentries;  /* < vring desc numbers */
+	uint16_t vq_free_cnt;  /* < num of desc available */
+	uint16_t vq_avail_idx; /* < sync until needed */
+	uint16_t vq_free_thresh; /* < free threshold */
+	uint16_t rsv2;
+
+	void *vq_ring_virt_mem;  /* < linear address of vring */
+	uint32_t vq_ring_size;
+
+	union {
+		struct zxdh_virtnet_rx rxq;
+		struct zxdh_virtnet_tx txq;
+	};
+
+	/*
+	 * physical address of vring, or virtual address
+	 */
+	rte_iova_t vq_ring_mem;
+
+	/*
+	 * Head of the free chain in the descriptor table. If
+	 * there are no free descriptors, this will be set to
+	 * VQ_RING_DESC_CHAIN_END.
+	 */
+	uint16_t  vq_desc_head_idx;
+	uint16_t  vq_desc_tail_idx;
+	uint16_t  vq_queue_index;   /* < PCI queue index */
+	uint16_t  offset; /* < relative offset to obtain addr in mbuf */
+	uint16_t *notify_addr;
+	struct rte_mbuf **sw_ring;  /* < RX software ring. */
+	struct zxdh_vq_desc_extra vq_descx[];
+} __rte_packed;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ZXDH_QUEUE_H */
diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h
new file mode 100644
index 0000000000..7d4b5481ec
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_rxtx.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef ZXDH_RXTX_H
+#define ZXDH_RXTX_H
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_mbuf_core.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct zxdh_virtnet_stats {
+	uint64_t packets;
+	uint64_t bytes;
+	uint64_t errors;
+	uint64_t multicast;
+	uint64_t broadcast;
+	uint64_t truncated_err;
+	uint64_t size_bins[8];
+};
+
+struct zxdh_virtnet_rx {
+	struct zxdh_virtqueue         *vq;
+
+	/* dummy mbuf, for wraparound when processing RX ring. */
+	struct rte_mbuf           fake_mbuf;
+
+	uint64_t                  mbuf_initializer; /* value to init mbufs. */
+	struct rte_mempool       *mpool;            /* mempool for mbuf allocation */
+	uint16_t                  queue_id;         /* DPDK queue index. */
+	uint16_t                  port_id;          /* Device port identifier. */
+	struct zxdh_virtnet_stats      stats;
+	const struct rte_memzone *mz;               /* mem zone to populate RX ring. */
+} __rte_packed;
+
+struct zxdh_virtnet_tx {
+	struct zxdh_virtqueue         *vq;
+	const struct rte_memzone *zxdh_net_hdr_mz;  /* memzone to populate hdr. */
+	rte_iova_t                zxdh_net_hdr_mem; /* hdr for each xmit packet */
+	uint16_t                  queue_id;           /* DPDK queue index. */
+	uint16_t                  port_id;            /* Device port identifier. */
+	struct zxdh_virtnet_stats      stats;
+	const struct rte_memzone *mz;                 /* mem zone to populate TX ring. */
+} __rte_packed;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* ZXDH_RXTX_H */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 62557 bytes --]

  parent reply	other threads:[~2024-11-04 12:07 UTC|newest]

Thread overview: 108+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-10 12:00 [PATCH v4] net/zxdh: Provided zxdh basic init Junlong Wang
2024-09-24  1:35 ` [v4] " Junlong Wang
2024-09-25 22:39 ` [PATCH v4] " Ferruh Yigit
2024-09-26  6:49 ` [v4] " Junlong Wang
2024-10-07 21:43 ` [PATCH v4] " Stephen Hemminger
2024-10-15  5:43 ` [PATCH v5 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-15  5:43   ` [PATCH v5 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-15  5:44     ` [PATCH v5 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-15  5:44       ` [PATCH v5 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-15  5:44       ` [PATCH v5 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-15  5:44       ` [PATCH v5 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-15 15:37         ` Stephen Hemminger
2024-10-15 15:57         ` Stephen Hemminger
2024-10-16  8:16     ` [PATCH v6 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-16  8:16       ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-16  8:18         ` [PATCH v6 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-16  8:18           ` [PATCH v6 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-16  8:18           ` [PATCH v6 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-16  8:18           ` [PATCH v6 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-21  8:50             ` Thomas Monjalon
2024-10-21 10:56             ` Junlong Wang
2024-10-16  8:18           ` [PATCH v6 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-21  8:52             ` Thomas Monjalon
2024-10-16  8:18           ` [PATCH v6 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-16  8:18           ` [PATCH v6 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-21  8:54             ` Thomas Monjalon
2024-10-16  8:18           ` [PATCH v6 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-18  5:18             ` [v6,9/9] " Junlong Wang
2024-10-18  6:48               ` David Marchand
2024-10-19 11:17             ` Junlong Wang
2024-10-21  9:03         ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Thomas Monjalon
2024-10-22 12:20         ` [PATCH v7 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-22 12:20           ` [PATCH v7 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-30  9:01             ` [PATCH v8 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-30  9:01               ` [PATCH v8 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-01  6:21                 ` [PATCH v9 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-02  0:57                     ` Ferruh Yigit
2024-11-04 11:58                     ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 01/10] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-07 10:32                         ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 02/10] net/zxdh: add logging implementation Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 03/10] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 04/10] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 05/10] net/zxdh: add msg chan enable implementation Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 06/10] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-11-04 11:58                       ` Junlong Wang [this message]
2024-11-04 11:58                       ` [PATCH v10 08/10] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 09/10] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-11-04 11:58                       ` [PATCH v10 10/10] net/zxdh: add zxdh dev close ops Junlong Wang
2024-11-06  0:40                       ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Ferruh Yigit
2024-11-07  9:28                         ` Ferruh Yigit
2024-11-07  9:58                           ` Ferruh Yigit
2024-11-01  6:21                   ` [PATCH v9 2/9] net/zxdh: add logging implementation Junlong Wang
2024-11-02  1:02                     ` Ferruh Yigit
2024-11-04  2:44                     ` [v9,2/9] " Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-11-02  1:01                     ` Ferruh Yigit
2024-11-01  6:21                   ` [PATCH v9 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-11-02  1:00                     ` Ferruh Yigit
2024-11-04  2:47                     ` Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-11-02  1:06                     ` Ferruh Yigit
2024-11-04  3:30                     ` [v9,6/9] " Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-11-02  1:07                     ` Ferruh Yigit
2024-11-01  6:21                   ` [PATCH v9 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-11-01  6:21                   ` [PATCH v9 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-11-02  0:56                   ` [PATCH v9 0/9] net/zxdh: introduce net zxdh driver Ferruh Yigit
2024-11-04  2:42                   ` Junlong Wang
2024-11-04  8:46                     ` Ferruh Yigit
2024-11-04  9:52                       ` David Marchand
2024-11-04 11:46                   ` Junlong Wang
2024-11-04 22:47                     ` Thomas Monjalon
2024-11-05  9:39                   ` Junlong Wang
2024-11-06  0:38                     ` Ferruh Yigit
2024-10-30  9:01               ` [PATCH v8 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-30  9:01               ` [PATCH v8 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-30 14:55                 ` David Marchand
2024-10-30  9:01               ` [PATCH v8 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-30  9:01               ` [PATCH v8 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-30  9:01               ` [PATCH v8 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-30  9:01               ` [PATCH v8 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-30  9:01               ` [PATCH v8 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-30  9:01               ` [PATCH v8 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-22 12:20           ` [PATCH v7 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-22 12:20           ` [PATCH v7 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-27 16:47             ` Stephen Hemminger
2024-10-27 16:47             ` Stephen Hemminger
2024-10-22 12:20           ` [PATCH v7 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-22 12:20           ` [PATCH v7 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-26 17:05             ` Thomas Monjalon
2024-10-22 12:20           ` [PATCH v7 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-22 12:20           ` [PATCH v7 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-27 17:07             ` Stephen Hemminger
2024-10-22 12:20           ` [PATCH v7 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-22 12:20           ` [PATCH v7 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-24 11:31             ` [v7,9/9] " Junlong Wang
2024-10-25  9:48             ` Junlong Wang
2024-10-26  2:32             ` Junlong Wang
2024-10-27 16:40             ` [PATCH v7 9/9] " Stephen Hemminger
2024-10-27 17:03               ` Stephen Hemminger
2024-10-27 16:58             ` Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241104115856.2795213-8-wang.junlong1@zte.com.cn \
    --to=wang.junlong1@zte.com.cn \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=wang.yong19@zte.com.cn \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).