DPDK patches and discussions
 help / color / mirror / Atom feed
From: Junlong Wang <wang.junlong1@zte.com.cn>
To: dev@dpdk.org
Cc: wang.yong19@zte.com.cn, Junlong Wang <wang.junlong1@zte.com.cn>
Subject: [PATCH v8 6/9] net/zxdh: add zxdh get device backend infos
Date: Wed, 30 Oct 2024 17:01:13 +0800	[thread overview]
Message-ID: <20241030090124.2540776-7-wang.junlong1@zte.com.cn> (raw)
In-Reply-To: <20241030090124.2540776-1-wang.junlong1@zte.com.cn>


[-- Attachment #1.1.1: Type: text/plain, Size: 16207 bytes --]

Add zxdh get device backend infos,
use msg chan to send msg get.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 drivers/net/zxdh/meson.build   |   1 +
 drivers/net/zxdh/zxdh_common.c | 250 +++++++++++++++++++++++++++++++++
 drivers/net/zxdh/zxdh_common.h |  30 ++++
 drivers/net/zxdh/zxdh_ethdev.c |  35 +++++
 drivers/net/zxdh/zxdh_ethdev.h |   5 +
 drivers/net/zxdh/zxdh_msg.c    |  17 +--
 drivers/net/zxdh/zxdh_msg.h    |  21 +++
 drivers/net/zxdh/zxdh_queue.h  |   4 +
 drivers/net/zxdh/zxdh_rxtx.h   |   4 +
 9 files changed, 359 insertions(+), 8 deletions(-)
 create mode 100644 drivers/net/zxdh/zxdh_common.c
 create mode 100644 drivers/net/zxdh/zxdh_common.h

diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build
index 2e0c8fddae..a16db47f89 100644
--- a/drivers/net/zxdh/meson.build
+++ b/drivers/net/zxdh/meson.build
@@ -17,4 +17,5 @@ sources = files(
         'zxdh_ethdev.c',
         'zxdh_pci.c',
         'zxdh_msg.c',
+        'zxdh_common.c',
 )
diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c
new file mode 100644
index 0000000000..0cb5380c5e
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_common.c
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include <ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include "zxdh_ethdev.h"
+#include "zxdh_logs.h"
+#include "zxdh_msg.h"
+#include "zxdh_common.h"
+
+#define ZXDH_MSG_RSP_SIZE_MAX         512
+
+#define ZXDH_COMMON_TABLE_READ        0
+#define ZXDH_COMMON_TABLE_WRITE       1
+
+#define ZXDH_COMMON_FIELD_PHYPORT     6
+
+#define ZXDH_RSC_TBL_CONTENT_LEN_MAX  (257 * 2)
+
+#define ZXDH_REPS_HEADER_OFFSET       4
+#define ZXDH_TBL_MSG_PRO_SUCCESS      0xaa
+
+struct zxdh_common_msg {
+	uint8_t  type;    /* 0:read table 1:write table */
+	uint8_t  field;
+	uint16_t pcie_id;
+	uint16_t slen;    /* Data length for write table */
+	uint16_t reserved;
+} __rte_packed;
+
+struct zxdh_common_rsp_hdr {
+	uint8_t  rsp_status;
+	uint16_t rsp_len;
+	uint8_t  reserved;
+	uint8_t  payload_status;
+	uint8_t  rsv;
+	uint16_t payload_len;
+} __rte_packed;
+
+struct zxdh_tbl_msg_header {
+	uint8_t  type;  /* r/w */
+	uint8_t  field;
+	uint16_t pcieid;
+	uint16_t slen;
+	uint16_t rsv;
+};
+struct zxdh_tbl_msg_reps_header {
+	uint8_t  check;
+	uint8_t  rsv;
+	uint16_t len;
+};
+
+static int32_t zxdh_fill_common_msg(struct zxdh_hw *hw,
+				struct zxdh_pci_bar_msg *desc,
+				uint8_t        type,
+				uint8_t        field,
+				void          *buff,
+				uint16_t       buff_size)
+{
+	uint64_t msg_len = sizeof(struct zxdh_common_msg) + buff_size;
+
+	desc->payload_addr = rte_zmalloc(NULL, msg_len, 0);
+	if (unlikely(desc->payload_addr == NULL)) {
+		PMD_DRV_LOG(ERR, "Failed to allocate msg_data");
+		return -ENOMEM;
+	}
+	memset(desc->payload_addr, 0, msg_len);
+	desc->payload_len = msg_len;
+	struct zxdh_common_msg *msg_data = (struct zxdh_common_msg *)desc->payload_addr;
+
+	msg_data->type = type;
+	msg_data->field = field;
+	msg_data->pcie_id = hw->pcie_id;
+	msg_data->slen = buff_size;
+	if (buff_size != 0)
+		rte_memcpy(msg_data + 1, buff, buff_size);
+
+	return 0;
+}
+
+static int32_t zxdh_send_command(struct zxdh_hw *hw,
+				struct zxdh_pci_bar_msg      *desc,
+				enum zxdh_bar_module_id       module_id,
+				struct zxdh_msg_recviver_mem *msg_rsp)
+{
+	desc->virt_addr = (uint64_t)(hw->bar_addr[ZXDH_BAR0_INDEX] + ZXDH_CTRLCH_OFFSET);
+	desc->src = hw->is_pf ? ZXDH_MSG_CHAN_END_PF : ZXDH_MSG_CHAN_END_VF;
+	desc->dst = ZXDH_MSG_CHAN_END_RISC;
+	desc->module_id = module_id;
+	desc->src_pcieid = hw->pcie_id;
+
+	msg_rsp->buffer_len  = ZXDH_MSG_RSP_SIZE_MAX;
+	msg_rsp->recv_buffer = rte_zmalloc(NULL, msg_rsp->buffer_len, 0);
+	if (unlikely(msg_rsp->recv_buffer == NULL)) {
+		PMD_DRV_LOG(ERR, "Failed to allocate messages response");
+		return -ENOMEM;
+	}
+
+	if (zxdh_bar_chan_sync_msg_send(desc, msg_rsp) != ZXDH_BAR_MSG_OK) {
+		PMD_DRV_LOG(ERR, "Failed to send sync messages or receive response");
+		rte_free(msg_rsp->recv_buffer);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int32_t zxdh_common_rsp_check(struct zxdh_msg_recviver_mem *msg_rsp,
+		void *buff, uint16_t len)
+{
+	struct zxdh_common_rsp_hdr *rsp_hdr = (struct zxdh_common_rsp_hdr *)msg_rsp->recv_buffer;
+
+	if (rsp_hdr->payload_status != 0xaa || rsp_hdr->payload_len != len) {
+		PMD_DRV_LOG(ERR, "Common response is invalid, status:0x%x rsp_len:%d",
+					rsp_hdr->payload_status, rsp_hdr->payload_len);
+		return -1;
+	}
+	if (len != 0)
+		rte_memcpy(buff, rsp_hdr + 1, len);
+
+	return 0;
+}
+
+static int32_t zxdh_common_table_read(struct zxdh_hw *hw, uint8_t field,
+			void *buff, uint16_t buff_size)
+{
+	struct zxdh_msg_recviver_mem msg_rsp;
+	struct zxdh_pci_bar_msg desc;
+	int32_t ret = 0;
+
+	if (!hw->msg_chan_init) {
+		PMD_DRV_LOG(ERR, "Bar messages channel not initialized");
+		return -1;
+	}
+
+	ret = zxdh_fill_common_msg(hw, &desc, ZXDH_COMMON_TABLE_READ, field, NULL, 0);
+	if (ret != 0) {
+		PMD_DRV_LOG(ERR, "Failed to fill common msg");
+		return ret;
+	}
+
+	ret = zxdh_send_command(hw, &desc, ZXDH_BAR_MODULE_TBL, &msg_rsp);
+	if (ret != 0)
+		goto free_msg_data;
+
+	ret = zxdh_common_rsp_check(&msg_rsp, buff, buff_size);
+	if (ret != 0)
+		goto free_rsp_data;
+
+free_rsp_data:
+	rte_free(msg_rsp.recv_buffer);
+free_msg_data:
+	rte_free(desc.payload_addr);
+	return ret;
+}
+
+int32_t zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	int32_t ret = zxdh_common_table_read(hw, ZXDH_COMMON_FIELD_PHYPORT,
+					(void *)phyport, sizeof(*phyport));
+	return ret;
+}
+
+static inline void zxdh_fill_res_para(struct rte_eth_dev *dev, struct zxdh_res_para *param)
+{
+	struct zxdh_hw *hw = dev->data->dev_private;
+
+	param->pcie_id   = hw->pcie_id;
+	param->virt_addr = hw->bar_addr[0] + ZXDH_CTRLCH_OFFSET;
+	param->src_type  = ZXDH_BAR_MODULE_TBL;
+}
+
+static int zxdh_get_res_info(struct zxdh_res_para *dev, uint8_t field, uint8_t *res, uint16_t *len)
+{
+	struct zxdh_pci_bar_msg in = {0};
+	uint8_t recv_buf[ZXDH_RSC_TBL_CONTENT_LEN_MAX + 8] = {0};
+	int ret = 0;
+
+	if (!res || !dev)
+		return ZXDH_BAR_MSG_ERR_NULL;
+
+	struct zxdh_tbl_msg_header tbl_msg = {
+		.type = ZXDH_TBL_TYPE_READ,
+		.field = field,
+		.pcieid = dev->pcie_id,
+		.slen = 0,
+		.rsv = 0,
+	};
+
+	in.virt_addr = dev->virt_addr;
+	in.payload_addr = &tbl_msg;
+	in.payload_len = sizeof(tbl_msg);
+	in.src = dev->src_type;
+	in.dst = ZXDH_MSG_CHAN_END_RISC;
+	in.module_id = ZXDH_BAR_MODULE_TBL;
+	in.src_pcieid = dev->pcie_id;
+
+	struct zxdh_msg_recviver_mem result = {
+		.recv_buffer = recv_buf,
+		.buffer_len = sizeof(recv_buf),
+	};
+	ret = zxdh_bar_chan_sync_msg_send(&in, &result);
+
+	if (ret != ZXDH_BAR_MSG_OK) {
+		PMD_DRV_LOG(ERR,
+			"send sync_msg failed. pcieid: 0x%x, ret: %d.", dev->pcie_id, ret);
+		return ret;
+	}
+	struct zxdh_tbl_msg_reps_header *tbl_reps =
+		(struct zxdh_tbl_msg_reps_header *)(recv_buf + ZXDH_REPS_HEADER_OFFSET);
+
+	if (tbl_reps->check != ZXDH_TBL_MSG_PRO_SUCCESS) {
+		PMD_DRV_LOG(ERR,
+			"get resource_field failed. pcieid: 0x%x, ret: %d.", dev->pcie_id, ret);
+		return ret;
+	}
+	*len = tbl_reps->len;
+	rte_memcpy(res, (recv_buf + ZXDH_REPS_HEADER_OFFSET +
+		sizeof(struct zxdh_tbl_msg_reps_header)), *len);
+	return ret;
+}
+
+static int zxdh_get_res_panel_id(struct zxdh_res_para *in, uint8_t *panel_id)
+{
+	uint8_t reps = 0;
+	uint16_t reps_len = 0;
+
+	if (zxdh_get_res_info(in, ZXDH_TBL_FIELD_PNLID, &reps, &reps_len) != ZXDH_BAR_MSG_OK)
+		return -1;
+
+	*panel_id = reps;
+	return ZXDH_BAR_MSG_OK;
+}
+
+int32_t zxdh_pannelid_get(struct rte_eth_dev *dev, uint8_t *pannelid)
+{
+	struct zxdh_res_para param;
+
+	zxdh_fill_res_para(dev, &param);
+	int32_t ret = zxdh_get_res_panel_id(&param, pannelid);
+	return ret;
+}
diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h
new file mode 100644
index 0000000000..f098ae4cf9
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_common.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef ZXDH_COMMON_H
+#define ZXDH_COMMON_H
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+
+#include "zxdh_ethdev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct zxdh_res_para {
+	uint64_t virt_addr;
+	uint16_t pcie_id;
+	uint16_t src_type; /* refer to BAR_DRIVER_TYPE */
+};
+
+int32_t zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport);
+int32_t zxdh_pannelid_get(struct rte_eth_dev *dev, uint8_t *pannelid);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ZXDH_COMMON_H */
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index a729344288..8d9df218ce 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -10,9 +10,21 @@
 #include "zxdh_logs.h"
 #include "zxdh_pci.h"
 #include "zxdh_msg.h"
+#include "zxdh_common.h"
 
 struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];
 
+uint16_t zxdh_vport_to_vfid(union zxdh_virport_num v)
+{
+	/* epid > 4 is local soft queue. return 1192 */
+	if (v.epid > 4)
+		return 1192;
+	if (v.vf_flag)
+		return v.epid * 256 + v.vfid;
+	else
+		return (v.epid * 8 + v.pfid) + 1152;
+}
+
 static int32_t zxdh_init_device(struct rte_eth_dev *eth_dev)
 {
 	struct zxdh_hw *hw = eth_dev->data->dev_private;
@@ -44,6 +56,25 @@ static int32_t zxdh_init_device(struct rte_eth_dev *eth_dev)
 	return ret;
 }
 
+static int zxdh_agent_comm(struct rte_eth_dev *eth_dev, struct zxdh_hw *hw)
+{
+	if (zxdh_phyport_get(eth_dev, &hw->phyport) != 0) {
+		PMD_INIT_LOG(ERR, "Failed to get phyport");
+		return -1;
+	}
+	PMD_INIT_LOG(INFO, "Get phyport success: 0x%x", hw->phyport);
+
+	hw->vfid = zxdh_vport_to_vfid(hw->vport);
+
+	if (zxdh_pannelid_get(eth_dev, &hw->panel_id) != 0) {
+		PMD_INIT_LOG(ERR, "Failed to get panel_id");
+		return -1;
+	}
+	PMD_INIT_LOG(INFO, "Get panel id success: 0x%x", hw->panel_id);
+
+	return 0;
+}
+
 static int zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
@@ -103,6 +134,10 @@ static int zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
 		goto err_zxdh_init;
 	}
 
+	ret = zxdh_agent_comm(eth_dev, hw);
+	if (ret != 0)
+		goto err_zxdh_init;
+
 	return ret;
 
 err_zxdh_init:
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index bed1334690..1ee8dd744c 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -56,6 +56,7 @@ struct zxdh_hw {
 	uint16_t pcie_id;
 	uint16_t device_id;
 	uint16_t port_id;
+	uint16_t vfid;
 
 	uint8_t *isr;
 	uint8_t weak_barriers;
@@ -65,8 +66,12 @@ struct zxdh_hw {
 	uint8_t duplex;
 	uint8_t is_pf;
 	uint8_t msg_chan_init;
+	uint8_t phyport;
+	uint8_t panel_id;
 };
 
+uint16_t zxdh_vport_to_vfid(union zxdh_virport_num v);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/net/zxdh/zxdh_msg.c b/drivers/net/zxdh/zxdh_msg.c
index 1bf72a9b7c..4105daf5c6 100644
--- a/drivers/net/zxdh/zxdh_msg.c
+++ b/drivers/net/zxdh/zxdh_msg.c
@@ -133,7 +133,9 @@ struct zxdh_seqid_ring {
 };
 struct zxdh_seqid_ring g_seqid_ring = {0};
 
-static uint16_t pcie_id_to_hard_lock(uint16_t src_pcieid, uint8_t dst)
+static uint8_t tmp_msg_header[ZXDH_BAR_MSG_ADDR_CHAN_INTERVAL];
+
+static uint16_t zxdh_pcie_id_to_hard_lock(uint16_t src_pcieid, uint8_t dst)
 {
 	uint16_t lock_id = 0;
 	uint16_t pf_idx = (src_pcieid & ZXDH_PCIEID_PF_IDX_MASK) >> ZXDH_PCIEID_PF_IDX_OFFSET;
@@ -209,11 +211,11 @@ static int32_t zxdh_spinlock_unlock(uint32_t virt_lock_id, uint64_t virt_addr, u
  */
 static int bar_chan_pf_init_spinlock(uint16_t pcie_id, uint64_t bar_base_addr)
 {
-	int lock_id = pcie_id_to_hard_lock(pcie_id, ZXDH_MSG_CHAN_END_RISC);
+	int lock_id = zxdh_pcie_id_to_hard_lock(pcie_id, ZXDH_MSG_CHAN_END_RISC);
 
 	zxdh_spinlock_unlock(lock_id, bar_base_addr + ZXDH_BAR0_SPINLOCK_OFFSET,
 			bar_base_addr + ZXDH_HW_LABEL_OFFSET);
-	lock_id = pcie_id_to_hard_lock(pcie_id, ZXDH_MSG_CHAN_END_VF);
+	lock_id = zxdh_pcie_id_to_hard_lock(pcie_id, ZXDH_MSG_CHAN_END_VF);
 	zxdh_spinlock_unlock(lock_id, bar_base_addr + ZXDH_BAR0_SPINLOCK_OFFSET,
 			bar_base_addr + ZXDH_HW_LABEL_OFFSET);
 	return 0;
@@ -409,7 +411,7 @@ static uint16_t zxdh_bar_chan_subchan_addr_get(struct zxdh_pci_bar_msg *in, uint
 static int zxdh_bar_hard_lock(uint16_t src_pcieid, uint8_t dst, uint64_t virt_addr)
 {
 	int ret = 0;
-	uint16_t lockid = pcie_id_to_hard_lock(src_pcieid, dst);
+	uint16_t lockid = zxdh_pcie_id_to_hard_lock(src_pcieid, dst);
 
 	PMD_MSG_LOG(DEBUG, "dev pcieid: 0x%x lock, get hardlockid: %u", src_pcieid, lockid);
 	if (dst == ZXDH_MSG_CHAN_END_RISC)
@@ -426,7 +428,7 @@ static int zxdh_bar_hard_lock(uint16_t src_pcieid, uint8_t dst, uint64_t virt_ad
 
 static void zxdh_bar_hard_unlock(uint16_t src_pcieid, uint8_t dst, uint64_t virt_addr)
 {
-	uint16_t lockid = pcie_id_to_hard_lock(src_pcieid, dst);
+	uint16_t lockid = zxdh_pcie_id_to_hard_lock(src_pcieid, dst);
 
 	PMD_MSG_LOG(DEBUG, "dev pcieid: 0x%x unlock, get hardlockid: %u", src_pcieid, lockid);
 	if (dst == ZXDH_MSG_CHAN_END_RISC)
@@ -586,7 +588,6 @@ static uint16_t zxdh_bar_chan_msg_valid_set(uint64_t subchan_addr, uint8_t valid
 	return ZXDH_BAR_MSG_OK;
 }
 
-static uint8_t temp_msg[ZXDH_BAR_MSG_ADDR_CHAN_INTERVAL];
 static uint16_t zxdh_bar_chan_msg_send(uint64_t subchan_addr,
 					void *payload_addr,
 					uint16_t payload_len,
@@ -596,13 +597,13 @@ static uint16_t zxdh_bar_chan_msg_send(uint64_t subchan_addr,
 	ret = zxdh_bar_chan_msg_header_set(subchan_addr, msg_header);
 
 	ret = zxdh_bar_chan_msg_header_get(subchan_addr,
-				(struct zxdh_bar_msg_header *)temp_msg);
+				(struct zxdh_bar_msg_header *)tmp_msg_header);
 
 	ret = zxdh_bar_chan_msg_payload_set(subchan_addr,
 				(uint8_t *)(payload_addr), payload_len);
 
 	ret = zxdh_bar_chan_msg_payload_get(subchan_addr,
-				temp_msg, payload_len);
+				tmp_msg_header, payload_len);
 
 	ret = zxdh_bar_chan_msg_valid_set(subchan_addr, ZXDH_BAR_MSG_CHAN_USED);
 	return ret;
diff --git a/drivers/net/zxdh/zxdh_msg.h b/drivers/net/zxdh/zxdh_msg.h
index 7fbab4b214..7da60ee189 100644
--- a/drivers/net/zxdh/zxdh_msg.h
+++ b/drivers/net/zxdh/zxdh_msg.h
@@ -107,6 +107,27 @@ enum zxdh_bar_module_id {
 	ZXDH_BAR_MSG_MODULE_NUM = 100,
 };
 
+enum ZXDH_RES_TBL_FILED {
+	ZXDH_TBL_FIELD_PCIEID     = 0,
+	ZXDH_TBL_FIELD_BDF        = 1,
+	ZXDH_TBL_FIELD_MSGCH      = 2,
+	ZXDH_TBL_FIELD_DATACH     = 3,
+	ZXDH_TBL_FIELD_VPORT      = 4,
+	ZXDH_TBL_FIELD_PNLID      = 5,
+	ZXDH_TBL_FIELD_PHYPORT    = 6,
+	ZXDH_TBL_FIELD_SERDES_NUM = 7,
+	ZXDH_TBL_FIELD_NP_PORT    = 8,
+	ZXDH_TBL_FIELD_SPEED      = 9,
+	ZXDH_TBL_FIELD_HASHID     = 10,
+	ZXDH_TBL_FIELD_NON,
+};
+
+enum ZXDH_TBL_MSG_TYPE {
+	ZXDH_TBL_TYPE_READ,
+	ZXDH_TBL_TYPE_WRITE,
+	ZXDH_TBL_TYPE_NON,
+};
+
 struct zxdh_msix_para {
 	uint16_t pcie_id;
 	uint16_t vector_risc;
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
index fd73f14e2d..66f37ec612 100644
--- a/drivers/net/zxdh/zxdh_queue.h
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -102,4 +102,8 @@ struct zxdh_virtqueue {
 	struct zxdh_vq_desc_extra vq_descx[];
 } __rte_packed;
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* ZXDH_QUEUE_H */
diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h
index ccac7e7834..31b1c8f0a5 100644
--- a/drivers/net/zxdh/zxdh_rxtx.h
+++ b/drivers/net/zxdh/zxdh_rxtx.h
@@ -48,4 +48,8 @@ struct zxdh_virtnet_tx {
 	const struct rte_memzone *mz;                 /* mem zone to populate TX ring. */
 } __rte_packed;
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif  /* ZXDH_RXTX_H */
-- 
2.27.0

[-- Attachment #1.1.2: Type: text/html , Size: 33972 bytes --]

  parent reply	other threads:[~2024-10-30  9:06 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-10 12:00 [PATCH v4] net/zxdh: Provided zxdh basic init Junlong Wang
2024-09-24  1:35 ` [v4] " Junlong Wang
2024-09-25 22:39 ` [PATCH v4] " Ferruh Yigit
2024-09-26  6:49 ` [v4] " Junlong Wang
2024-10-07 21:43 ` [PATCH v4] " Stephen Hemminger
2024-10-15  5:43 ` [PATCH v5 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-15  5:43   ` [PATCH v5 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-15  5:44     ` [PATCH v5 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-15  5:44       ` [PATCH v5 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-15  5:44       ` [PATCH v5 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-15  5:44       ` [PATCH v5 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-15  5:44       ` [PATCH v5 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-15 15:37         ` Stephen Hemminger
2024-10-15 15:57         ` Stephen Hemminger
2024-10-16  8:16     ` [PATCH v6 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-16  8:16       ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-16  8:18         ` [PATCH v6 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-16  8:18           ` [PATCH v6 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-16  8:18           ` [PATCH v6 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-16  8:18           ` [PATCH v6 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-21  8:50             ` Thomas Monjalon
2024-10-21 10:56             ` Junlong Wang
2024-10-16  8:18           ` [PATCH v6 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-21  8:52             ` Thomas Monjalon
2024-10-16  8:18           ` [PATCH v6 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-16  8:18           ` [PATCH v6 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-21  8:54             ` Thomas Monjalon
2024-10-16  8:18           ` [PATCH v6 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-18  5:18             ` [v6,9/9] " Junlong Wang
2024-10-18  6:48               ` David Marchand
2024-10-19 11:17             ` Junlong Wang
2024-10-21  9:03         ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Thomas Monjalon
2024-10-22 12:20         ` [PATCH v7 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-22 12:20           ` [PATCH v7 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-30  9:01             ` [PATCH v8 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-30  9:01               ` [PATCH v8 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-30  9:01               ` [PATCH v8 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-30  9:01               ` [PATCH v8 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-30  9:01               ` [PATCH v8 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-30  9:01               ` [PATCH v8 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-30  9:01               ` Junlong Wang [this message]
2024-10-30  9:01               ` [PATCH v8 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-30  9:01               ` [PATCH v8 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-30  9:01               ` [PATCH v8 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-22 12:20           ` [PATCH v7 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-22 12:20           ` [PATCH v7 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-27 16:47             ` Stephen Hemminger
2024-10-27 16:47             ` Stephen Hemminger
2024-10-22 12:20           ` [PATCH v7 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-22 12:20           ` [PATCH v7 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-26 17:05             ` Thomas Monjalon
2024-10-22 12:20           ` [PATCH v7 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-22 12:20           ` [PATCH v7 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-27 17:07             ` Stephen Hemminger
2024-10-22 12:20           ` [PATCH v7 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-22 12:20           ` [PATCH v7 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-24 11:31             ` [v7,9/9] " Junlong Wang
2024-10-25  9:48             ` Junlong Wang
2024-10-26  2:32             ` Junlong Wang
2024-10-27 16:40             ` [PATCH v7 9/9] " Stephen Hemminger
2024-10-27 17:03               ` Stephen Hemminger
2024-10-27 16:58             ` Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241030090124.2540776-7-wang.junlong1@zte.com.cn \
    --to=wang.junlong1@zte.com.cn \
    --cc=dev@dpdk.org \
    --cc=wang.yong19@zte.com.cn \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).