DPDK patches and discussions
 help / color / mirror / Atom feed
* [[PATCH v1] 4/8] net/rnp: add mbx basic api feature
@ 2023-08-01  7:22 Wenbo Cao
  2023-08-01 15:25 ` Stephen Hemminger
  0 siblings, 1 reply; 5+ messages in thread
From: Wenbo Cao @ 2023-08-01  7:22 UTC (permalink / raw)
  To: caowenbo; +Cc: dev

mbx base code is for communicate with the firmware

Signed-off-by: Wenbo Cao <caowenbo@mucse.com>
---
 drivers/net/rnp/base/rnp_hw.h | 107 ++++++-
 drivers/net/rnp/meson.build   |   1 +
 drivers/net/rnp/rnp.h         |  35 +++
 drivers/net/rnp/rnp_ethdev.c  |  68 +++++
 drivers/net/rnp/rnp_logs.h    |   9 +
 drivers/net/rnp/rnp_mbx.c     | 522 ++++++++++++++++++++++++++++++++++
 drivers/net/rnp/rnp_mbx.h     | 138 +++++++++
 drivers/net/rnp/rnp_mbx_fw.h  |  22 ++
 8 files changed, 901 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/rnp/rnp_mbx.c
 create mode 100644 drivers/net/rnp/rnp_mbx.h
 create mode 100644 drivers/net/rnp/rnp_mbx_fw.h

diff --git a/drivers/net/rnp/base/rnp_hw.h b/drivers/net/rnp/base/rnp_hw.h
index 543011a64c..da3659bd67 100644
--- a/drivers/net/rnp/base/rnp_hw.h
+++ b/drivers/net/rnp/base/rnp_hw.h
@@ -4,15 +4,120 @@
 #ifndef __RNP_HW_H__
 #define __RNP_HW_H__
 
+#include <rte_io.h>
+#include <ethdev_driver.h>
+
+#include "rnp_osdep.h"
+
+static inline unsigned int rnp_rd_reg(volatile void *addr)
+{
+	unsigned int v = rte_read32(addr);
+
+	return v;
+}
+
+static inline void rnp_wr_reg(volatile void *reg, int val)
+{
+	rte_write32_relaxed((val), (reg));
+}
+
+#define mbx_rd32(hw, reg)	rnp_rd_reg((hw)->iobar4 + (reg))
+#define mbx_wr32(hw, reg, val)	rnp_wr_reg((hw)->iobar4 + (reg), (val))
+
+struct rnp_hw;
+/* Mbx Operate info */
+enum MBX_ID {
+	MBX_PF = 0,
+	MBX_VF,
+	MBX_CM3CPU,
+	MBX_FW = MBX_CM3CPU,
+	MBX_VFCNT
+};
+struct rnp_mbx_api {
+	void (*init_mbx)(struct rnp_hw *hw);
+	int32_t (*read)(struct rnp_hw *hw,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*write)(struct rnp_hw *hw,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*read_posted)(struct rte_eth_dev *dev,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*write_posted)(struct rte_eth_dev *dev,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*check_for_msg)(struct rnp_hw *hw, enum MBX_ID);
+	int32_t (*check_for_ack)(struct rnp_hw *hw, enum MBX_ID);
+	int32_t (*check_for_rst)(struct rnp_hw *hw, enum MBX_ID);
+	int32_t (*configure)(struct rnp_hw *hw, int nr_vec, bool enable);
+};
+
+struct rnp_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct rnp_mbx_info {
+	struct rnp_mbx_api ops;
+	uint32_t usec_delay;    /* retry interval delay time */
+	uint32_t timeout;       /* retry ops timeout limit */
+	uint16_t size;          /* data buffer size*/
+	uint16_t vf_num;        /* Virtual Function num */
+	uint16_t pf_num;        /* Physical Function num */
+	uint16_t sriov_st;      /* Sriov state */
+	bool irq_enabled;
+	union {
+		struct {
+			unsigned short pf_req;
+			unsigned short pf_ack;
+		};
+		struct {
+			unsigned short cpu_req;
+			unsigned short cpu_ack;
+		};
+	};
+	unsigned short vf_req[64];
+	unsigned short vf_ack[64];
+
+	struct rnp_mbx_stats stats;
+
+	rte_atomic16_t state;
+} __rte_cache_aligned;
+
+#define RNP_MAX_HW_PORT_PERR_PF (4)
 struct rnp_hw {
 	void *back;
 	char *iobar0;
 	uint32_t iobar0_len;
 	char *iobar4;
 	uint32_t iobar4_len;
+	char *link_sync;
+	char *dma_base;
+	char *eth_base;
+	char *veb_base;
+	char *mac_base[RNP_MAX_HW_PORT_PERR_PF];
+	char *msix_base;
+	/* === dma == */
+	char *dma_axi_en;
+	char *dma_axi_st;
 
 	uint16_t device_id;
 	uint16_t vendor_id;
-} __rte_cache_aligned;
+	uint16_t function;
+	uint16_t pf_vf_num;
+	uint16_t max_vfs;
+	void *cookie_pool;
+	char cookie_p_name[RTE_MEMZONE_NAMESIZE];
 
+	struct rnp_mbx_info mbx;
+} __rte_cache_aligned;
 #endif /* __RNP_H__*/
diff --git a/drivers/net/rnp/meson.build b/drivers/net/rnp/meson.build
index 36a1f7148d..38dbee5ca4 100644
--- a/drivers/net/rnp/meson.build
+++ b/drivers/net/rnp/meson.build
@@ -9,5 +9,6 @@ endif
 
 sources = files(
 		'rnp_ethdev.c',
+		'rnp_mbx.c',
 )
 includes += include_directories('base')
diff --git a/drivers/net/rnp/rnp.h b/drivers/net/rnp/rnp.h
index ea9d138673..a8999ddc47 100644
--- a/drivers/net/rnp/rnp.h
+++ b/drivers/net/rnp/rnp.h
@@ -3,6 +3,7 @@
  */
 #ifndef __RNP_H__
 #define __RNP_H__
+#include <rte_log.h>
 
 #include "base/rnp_hw.h"
 
@@ -14,14 +15,17 @@
 
 struct rnp_eth_port {
 	void *adapt;
+	struct rnp_hw *hw;
 	struct rte_eth_dev *eth_dev;
 } __rte_cache_aligned;
 
 struct rnp_share_ops {
+	struct rnp_mbx_api mbx_api;
 } __rte_cache_aligned;
 
 struct rnp_eth_adapter {
 	struct rnp_hw hw;
+	uint16_t max_vfs;
 	struct rte_pci_device *pdev;
 	struct rte_eth_dev *eth_dev; /* master eth_dev */
 	struct rnp_eth_port *ports[RNP_MAX_PORT_OF_PF];
@@ -34,5 +38,36 @@ struct rnp_eth_adapter {
 	(((struct rnp_eth_port *)((eth_dev)->data->dev_private)))
 #define RNP_DEV_TO_ADAPTER(eth_dev) \
 	((struct rnp_eth_adapter *)(RNP_DEV_TO_PORT(eth_dev)->adapt))
+#define RNP_DEV_TO_HW(eth_dev) \
+	(&((struct rnp_eth_adapter *)(RNP_DEV_TO_PORT((eth_dev))->adapt))->hw)
+#define RNP_DEV_PP_PRIV_TO_MBX_OPS(dev) \
+	(&((struct rnp_share_ops *)(dev)->process_private)->mbx_api)
+#define RNP_DEV_TO_MBX_OPS(dev)	RNP_DEV_PP_PRIV_TO_MBX_OPS(dev)
 
+static inline void rnp_reg_offset_init(struct rnp_hw *hw)
+{
+	uint16_t i;
+
+	if (hw->device_id == RNP_DEV_ID_N10G && hw->mbx.pf_num) {
+		hw->iobar4 += 0x100000;
+		hw->msix_base = hw->iobar4 + 0xa0000;
+		hw->msix_base += 0x200;
+	} else {
+		hw->msix_base = hw->iobar4 + 0xa0000;
+	}
+	/* === dma status/config====== */
+	hw->link_sync    = hw->iobar4 + 0x000c;
+	hw->dma_axi_en   = hw->iobar4 + 0x0010;
+	hw->dma_axi_st   = hw->iobar4 + 0x0014;
+
+	if (hw->mbx.pf_num)
+		hw->msix_base += 0x200;
+	/* === queue registers === */
+	hw->dma_base     = hw->iobar4 + 0x08000;
+	hw->veb_base     = hw->iobar4 + 0x0;
+	hw->eth_base     = hw->iobar4 + 0x10000;
+	/* mac */
+	for (i = 0; i < RNP_MAX_HW_PORT_PERR_PF; i++)
+		hw->mac_base[i] = hw->iobar4 + 0x60000 + 0x10000 * i;
+}
 #endif /* __RNP_H__ */
diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c
index 18fd169166..4747a41423 100644
--- a/drivers/net/rnp/rnp_ethdev.c
+++ b/drivers/net/rnp/rnp_ethdev.c
@@ -8,8 +8,10 @@
 #include <ethdev_driver.h>
 
 #include "rnp.h"
+#include "rnp_mbx.h"
 #include "rnp_logs.h"
 
+extern struct rnp_mbx_api rnp_mbx_pf_ops;
 static int
 rnp_mac_rx_disable(struct rte_eth_dev *dev)
 {
@@ -89,6 +91,59 @@ rnp_alloc_eth_port(struct rte_pci_device *master_pci, char *name)
 	return NULL;
 }
 
+static void rnp_get_nic_attr(struct rnp_eth_adapter *adapter)
+{
+	RTE_SET_USED(adapter);
+}
+
+static int
+rnp_process_resource_init(struct rte_eth_dev *eth_dev)
+{
+	struct rnp_share_ops *share_priv;
+
+	/* allocate process_private memory this must can't
+	 * belone to the dpdk mem resource manager
+	 * such as from rte_malloc or rte_dma_zone..
+	 */
+	/* use the process_prive point to resolve secondary process
+	 * use point-func. This point is per process will be safe to cover.
+	 * This will cause secondary process core-dump because of IPC
+	 * Secondary will call primary process point func virt-address
+	 * secondary process don't alloc user/pmd to alloc or free
+	 * the memory of dpdk-mem resource it will cause hugepage
+	 * mem exception
+	 * be careful for secondary Process to use the share-mem of
+	 * point correlation
+	 */
+	share_priv = calloc(1, sizeof(*share_priv));
+	if (!share_priv) {
+		PMD_DRV_LOG(ERR, "calloc share_priv failed");
+		return -ENOMEM;
+	}
+
+	eth_dev->process_private = share_priv;
+	adapter->share_priv = share_priv;
+
+	return 0;
+}
+
+static void
+rnp_common_ops_init(struct rnp_eth_adapter *adapter)
+{
+	struct rnp_share_ops *share_priv;
+
+	share_priv = adapter->share_priv;
+	share_priv->mbx_api = rnp_mbx_pf_ops;
+}
+
+static int
+rnp_special_ops_init(struct rte_eth_dev *eth_dev)
+{
+	RTE_SET_USED(eth_dev);
+
+	return 0;
+}
+
 static int
 rnp_eth_dev_init(struct rte_eth_dev *dev)
 {
@@ -124,6 +179,19 @@ rnp_eth_dev_init(struct rte_eth_dev *dev)
 	hw->device_id = pci_dev->id.device_id;
 	hw->vendor_id = pci_dev->id.vendor_id;
 	hw->device_id = pci_dev->id.device_id;
+	adapter->max_vfs = pci_dev->max_vfs;
+	ret = rnp_process_resource_init(dev, adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "share prive resource init failed");
+		return ret;
+	}
+	rnp_common_ops_init(adapter);
+	rnp_get_nic_attr(adapter);
+	/* We need Use Device Id To Change The Resource Mode */
+	rnp_special_ops_init(dev);
+	port->adapt = adapter;
+	port->hw = hw;
+	rnp_init_mbx_ops_pf(hw);
 	for (p_id = 0; p_id < adapter->num_ports; p_id++) {
 		/* port 0 resource has been alloced When Probe */
 		if (!p_id) {
diff --git a/drivers/net/rnp/rnp_logs.h b/drivers/net/rnp/rnp_logs.h
index 1b3ee33745..f1648aabb5 100644
--- a/drivers/net/rnp/rnp_logs.h
+++ b/drivers/net/rnp/rnp_logs.h
@@ -13,6 +13,15 @@ extern int rnp_drv_logtype;
 #define RNP_PMD_DRV_LOG(level, fmt, args...) \
 	rte_log(RTE_LOG_##level, rnp_drv_logtype, \
 		"%s() " fmt, __func__, ##args)
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, rnp_drv_logtype, "%s(): " fmt, \
+			__func__, ## args)
+#define PMD_DRV_LOG(level, fmt, args...) \
+	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#define RNP_PMD_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_##level, rnp_drv_logtype, \
+			"rnp_net: (%d) " fmt, __LINE__, ##args)
 #ifdef RTE_LIBRTE_RNP_DEBUG_RX
 extern int rnp_rx_logtype;
 #define RNP_PMD_RX_LOG(level, fmt, args...) \
diff --git a/drivers/net/rnp/rnp_mbx.c b/drivers/net/rnp/rnp_mbx.c
new file mode 100644
index 0000000000..0f1c0a5b74
--- /dev/null
+++ b/drivers/net/rnp/rnp_mbx.c
@@ -0,0 +1,522 @@
+#include <rte_cycles.h>
+#include <rte_log.h>
+
+#include "rnp.h"
+#include "rnp_hw.h"
+#include "rnp_mbx.h"
+#include "rnp_mbx_fw.h"
+#include "rnp_logs.h"
+
+#define RNP_MAX_VF_FUNCTIONS	(64)
+/* == VEC == */
+#define VF2PF_MBOX_VEC(VF)	(0xa5100 + 4 * (VF))
+#define CPU2PF_MBOX_VEC		(0xa5300)
+
+/* == PF <--> VF mailbox ==== */
+#define SHARE_MEM_BYTES		(64) /* 64bytes */
+/* for PF1 rtl will remap 6000 to 0xb000 */
+#define PF_VF_SHM(vf)		((0xa6000) + (64 * (vf)))
+#define PF2VF_COUNTER(vf)	(PF_VF_SHM(vf) + 0)
+#define VF2PF_COUNTER(vf)	(PF_VF_SHM(vf) + 4)
+#define PF_VF_SHM_DATA(vf)	(PF_VF_SHM(vf) + 8)
+#define PF2VF_MBOX_CTRL(vf)	((0xa7100) + (4 * (vf)))
+#define PF_VF_MBOX_MASK_LO	((0xa7200))
+#define PF_VF_MBOX_MASK_HI	((0xa7300))
+
+/* === CPU <--> PF === */
+#define CPU_PF_SHM		(0xaa000)
+#define CPU2PF_COUNTER		(CPU_PF_SHM + 0)
+#define PF2CPU_COUNTER		(CPU_PF_SHM + 4)
+#define CPU_PF_SHM_DATA		(CPU_PF_SHM + 8)
+#define PF2CPU_MBOX_CTRL	(0xaa100)
+#define CPU_PF_MBOX_MASK	(0xaa300)
+
+/* === CPU <--> VF === */
+#define CPU_VF_SHM(vf)		(0xa8000 + (64 * (vf)))
+#define CPU2VF_COUNTER(vf)	(CPU_VF_SHM(vf) + 0)
+#define VF2CPU_COUNTER(vf)	(CPU_VF_SHM(vf) + 4)
+#define CPU_VF_SHM_DATA(vf)	(CPU_VF_SHM(vf) + 8)
+#define VF2CPU_MBOX_CTRL(vf)	(0xa9000 + 64 * (vf))
+#define CPU_VF_MBOX_MASK_LO(vf) (0xa9200 + 64 * (vf))
+#define CPU_VF_MBOX_MASK_HI(vf) (0xa9300 + 64 * (vf))
+
+#define MBOX_CTRL_REQ		(1 << 0)  /* WO */
+/* VF:WR, PF:RO */
+#define MBOX_CTRL_PF_HOLD_SHM	(1 << 3)  /* VF:RO, PF:WR */
+
+#define MBOX_IRQ_EN		(0)
+#define MBOX_IRQ_DISABLE	(1)
+
+/****************************PF MBX OPS************************************/
+static inline u16 rnp_mbx_get_req(struct rnp_hw *hw, int reg)
+{
+	rte_mb();
+	return mbx_rd32(hw, reg) & 0xffff;
+}
+
+static inline u16 rnp_mbx_get_ack(struct rnp_hw *hw, int reg)
+{
+	rte_mb();
+	return (mbx_rd32(hw, reg) >> 16) & 0xffff;
+}
+
+static inline void rnp_mbx_inc_pf_req(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int reg = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_COUNTER : PF2VF_COUNTER(mbx_id);
+	u32 v = mbx_rd32(hw, reg);
+	u16 req;
+
+	req = (v & 0xffff);
+	req++;
+	v &= ~(0x0000ffff);
+	v |= req;
+
+	rte_mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	/* hw->mbx.stats.msgs_tx++; */
+}
+
+static inline void rnp_mbx_inc_pf_ack(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int reg = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_COUNTER : PF2VF_COUNTER(mbx_id);
+	u32 v = mbx_rd32(hw, reg);
+	u16 ack;
+
+	ack = (v >> 16) & 0xffff;
+	ack++;
+	v &= ~(0xffff0000);
+	v |= (ack << 16);
+
+	rte_mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	/* hw->mbx.stats.msgs_rx++; */
+}
+
+/**
+ *  rnp_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+static int32_t rnp_poll_for_msg(struct rte_eth_dev *dev, enum MBX_ID mbx_id)
+{
+	struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !ops->check_for_msg)
+		goto out;
+
+	while (countdown && ops->check_for_msg(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		rte_delay_us_block(mbx->usec_delay);
+	}
+
+out:
+	return countdown ? 0 : -ETIME;
+}
+
+/**
+ *  rnp_poll_for_ack - Wait for message acknowledgment
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgment
+ **/
+static int32_t rnp_poll_for_ack(struct rte_eth_dev *dev, enum MBX_ID mbx_id)
+{
+	struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !ops->check_for_ack)
+		goto out;
+
+	while (countdown && ops->check_for_ack(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		rte_delay_us_block(mbx->usec_delay);
+	}
+
+out:
+	return countdown ? 0 : -ETIME;
+}
+
+/**
+ *  rnp_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+static int32_t
+rnp_read_posted_mbx_pf(struct rte_eth_dev *dev, u32 *msg, u16 size,
+		       enum MBX_ID mbx_id)
+{
+	struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+	int32_t ret_val = -ETIME;
+
+	if (!ops->read || !countdown)
+		return -EOPNOTSUPP;
+
+	ret_val = rnp_poll_for_msg(dev, mbx_id);
+
+	/* if ack received read message, otherwise we timed out */
+	if (!ret_val)
+		return ops->read(hw, msg, size, mbx_id);
+	return ret_val;
+}
+
+/**
+ *  rnp_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+static int32_t
+rnp_write_posted_mbx_pf(struct rte_eth_dev *dev, u32 *msg, u16 size,
+			enum MBX_ID mbx_id)
+{
+	struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int32_t ret_val = -ETIME;
+
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!ops->write || !mbx->timeout)
+		goto out;
+
+	/* send msg and hold buffer lock */
+	if (ops->write)
+		ret_val = ops->write(hw, msg, size, mbx_id);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = rnp_poll_for_ack(dev, mbx_id);
+out:
+	return ret_val;
+}
+
+/**
+ *  rnp_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static int32_t rnp_check_for_msg_pf(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int32_t ret_val = -ETIME;
+
+	if (mbx_id == MBX_CM3CPU) {
+		if (rnp_mbx_get_req(hw, CPU2PF_COUNTER) != hw->mbx.cpu_req) {
+			ret_val = 0;
+			/* hw->mbx.stats.reqs++; */
+		}
+	} else {
+		if (rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx_id)) !=
+				hw->mbx.vf_req[mbx_id]) {
+			ret_val = 0;
+			/* hw->mbx.stats.reqs++; */
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnp_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static int32_t rnp_check_for_ack_pf(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int32_t ret_val = -ETIME;
+
+	if (mbx_id == MBX_CM3CPU) {
+		if (rnp_mbx_get_ack(hw, CPU2PF_COUNTER) != hw->mbx.cpu_ack) {
+			ret_val = 0;
+			/* hw->mbx.stats.acks++; */
+		}
+	} else {
+		if (rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx_id)) != hw->mbx.vf_ack[mbx_id]) {
+			ret_val = 0;
+			/* hw->mbx.stats.acks++; */
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnp_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: the VF index or CPU
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static int32_t rnp_obtain_mbx_lock_pf(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int32_t ret_val = -ETIME;
+	int try_cnt = 5000;  /* 500ms */
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id);
+
+	while (try_cnt-- > 0) {
+		/* Take ownership of the buffer */
+		mbx_wr32(hw, CTRL_REG, MBOX_CTRL_PF_HOLD_SHM);
+
+		/* reserve mailbox for cm3 use */
+		if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_PF_HOLD_SHM)
+			return 0;
+		rte_delay_us_block(100);
+	}
+
+	RNP_PMD_LOG(WARNING, "%s: failed to get:%d lock\n",
+			__func__, mbx_id);
+	return ret_val;
+}
+
+/**
+ *  rnp_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static int32_t rnp_write_mbx_pf(struct rnp_hw *hw, u32 *msg,
+				u16 size, enum MBX_ID mbx_id)
+{
+	u32 DATA_REG = (mbx_id == MBX_CM3CPU) ?
+		CPU_PF_SHM_DATA : PF_VF_SHM_DATA(mbx_id);
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id);
+	int32_t ret_val = 0;
+	u32 stat __rte_unused;
+	u16 i;
+
+	if (size > RNP_VFMAILBOX_SIZE) {
+		RNP_PMD_LOG(ERR, "%s: size:%d should <%d\n", __func__,
+				size, RNP_VFMAILBOX_SIZE);
+		return -EINVAL;
+	}
+
+	/* lock the mailbox to prevent pf/vf/cpu race condition */
+	ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id);
+	if (ret_val) {
+		RNP_PMD_LOG(WARNING, "PF[%d] Can't Get Mbx-Lock Try Again\n",
+				hw->function);
+		return ret_val;
+	}
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++) {
+#ifdef MBX_WR_DEBUG
+		mbx_pwr32(hw, DATA_REG + i * 4, msg[i]);
+#else
+		mbx_wr32(hw, DATA_REG + i * 4, msg[i]);
+#endif
+	}
+
+	/* flush msg and acks as we are overwriting the message buffer */
+	if (mbx_id == MBX_CM3CPU)
+		hw->mbx.cpu_ack = rnp_mbx_get_ack(hw, CPU2PF_COUNTER);
+	else
+		hw->mbx.vf_ack[mbx_id] = rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx_id));
+
+	rnp_mbx_inc_pf_req(hw, mbx_id);
+	rte_mb();
+
+	rte_delay_us(300);
+
+	/* Interrupt VF/CM3 to tell it a message
+	 * has been sent and release buffer
+	 */
+	mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ);
+
+	return 0;
+}
+
+/**
+ *  rnp_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF/CPU request so no polling for message is needed.
+ **/
+static int32_t rnp_read_mbx_pf(struct rnp_hw *hw, u32 *msg,
+			       u16 size, enum MBX_ID mbx_id)
+{
+	u32 BUF_REG  = (mbx_id == MBX_CM3CPU) ?
+		CPU_PF_SHM_DATA : PF_VF_SHM_DATA(mbx_id);
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id);
+	int32_t ret_val = -EIO;
+	u32 stat __rte_unused, i;
+	if (size > RNP_VFMAILBOX_SIZE) {
+		RNP_PMD_LOG(ERR, "%s: size:%d should <%d\n", __func__,
+				size, RNP_VFMAILBOX_SIZE);
+		return -EINVAL;
+	}
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id);
+	if (ret_val)
+		goto out_no_read;
+
+	/* copy the message from the mailbox memory buffer */
+	for (i = 0; i < size; i++) {
+#ifdef MBX_RD_DEBUG
+		msg[i] = mbx_prd32(hw, BUF_REG + 4 * i);
+#else
+		msg[i] = mbx_rd32(hw, BUF_REG + 4 * i);
+#endif
+	}
+	mbx_wr32(hw, BUF_REG, 0);
+
+	/* update req. used by rnpvf_check_for_msg_vf  */
+	if (mbx_id == MBX_CM3CPU)
+		hw->mbx.cpu_req = rnp_mbx_get_req(hw, CPU2PF_COUNTER);
+	else
+		hw->mbx.vf_req[mbx_id] = rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx_id));
+
+	/* this ack maybe too earier? */
+	/* Acknowledge receipt and release mailbox, then we're done */
+	rnp_mbx_inc_pf_ack(hw, mbx_id);
+
+	rte_mb();
+
+	/* free ownership of the buffer */
+	mbx_wr32(hw, CTRL_REG, 0);
+
+out_no_read:
+
+	return ret_val;
+}
+
+static void rnp_mbx_reset_pf(struct rnp_hw *hw)
+{
+	int v;
+
+	/* reset pf->cm3 status */
+	v = mbx_rd32(hw, CPU2PF_COUNTER);
+	hw->mbx.cpu_req = v & 0xffff;
+	hw->mbx.cpu_ack = (v >> 16) & 0xffff;
+	/* release   pf->cm3 buffer lock */
+	mbx_wr32(hw, PF2CPU_MBOX_CTRL, 0);
+
+	rte_mb();
+	/* enable irq to fw */
+	mbx_wr32(hw, CPU_PF_MBOX_MASK, 0);
+}
+
+static int get_pfvfnum(struct rnp_hw *hw)
+{
+	uint32_t addr_mask;
+	uint32_t offset;
+	uint32_t val;
+#define RNP_PF_NUM_REG       (0x75f000)
+#define RNP_PFVF_SHIFT       (4)
+#define RNP_PF_SHIFT         (6)
+#define RNP_PF_BIT_MASK      BIT(6)
+	addr_mask = hw->iobar0_len - 1;
+	offset = RNP_PF_NUM_REG & addr_mask;
+	val = rnp_rd_reg(hw->iobar0 + offset);
+
+	return val >> RNP_PFVF_SHIFT;
+}
+
+struct rnp_mbx_api rnp_mbx_pf_ops = {
+	.read           = rnp_read_mbx_pf,
+	.write          = rnp_write_mbx_pf,
+	.read_posted    = rnp_read_posted_mbx_pf,
+	.write_posted   = rnp_write_posted_mbx_pf,
+	.check_for_msg  = rnp_check_for_msg_pf,
+	.check_for_ack  = rnp_check_for_ack_pf,
+};
+
+void *rnp_memzone_reserve(const char *name, unsigned int size)
+{
+#define NO_FLAGS 0
+	const struct rte_memzone *mz = NULL;
+
+	if (name) {
+		if (size) {
+			mz = rte_memzone_reserve(name, size,
+					rte_socket_id(), NO_FLAGS);
+			if (mz)
+				memset(mz->addr, 0, size);
+		} else {
+			mz = rte_memzone_lookup(name);
+		}
+		return mz ? mz->addr : NULL;
+	}
+	return NULL;
+}
+
+void rnp_init_mbx_ops_pf(struct rnp_hw *hw)
+{
+	struct rnp_eth_adapter *adapter = hw->back;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	struct mbx_req_cookie *cookie;
+	uint32_t vf_isolat_off;
+
+	mbx->size       = RNP_VFMAILBOX_SIZE;
+	mbx->usec_delay = RNP_MBX_DELAY_US;
+	mbx->timeout    = (RNP_MBX_TIMEOUT_SECONDS * 1000 * 1000) /
+		mbx->usec_delay;
+	if (hw->device_id == RNP_DEV_ID_N10G) {
+		vf_isolat_off = RNP_VF_ISOLATE_CTRL &
+			(hw->iobar0_len - 1);
+		rnp_wr_reg(hw->iobar0 + vf_isolat_off, 0);
+	}
+	mbx->sriov_st = 0;
+	hw->pf_vf_num = get_pfvfnum(hw);
+	mbx->vf_num = UINT16_MAX;
+	mbx->pf_num = (hw->pf_vf_num & RNP_PF_BIT_MASK) >> RNP_PF_SHIFT;
+	hw->function = mbx->pf_num;
+	/* Retrieving and storing the HW base address of device */
+	rnp_reg_offset_init(hw);
+	snprintf(hw->cookie_p_name, RTE_MEMZONE_NAMESIZE, "mbx_req_cookie%d_%d",
+			hw->function, adapter->eth_dev->data->port_id);
+	hw->cookie_pool = rnp_memzone_reserve(hw->cookie_p_name,
+			sizeof(struct mbx_req_cookie));
+
+	cookie = (struct mbx_req_cookie *)hw->cookie_pool;
+	if (cookie) {
+		cookie->timeout_ms = 1000;
+		cookie->magic = COOKIE_MAGIC;
+		cookie->priv_len = RNP_MAX_SHARE_MEM;
+	}
+
+	rnp_mbx_reset_pf(hw);
+}
diff --git a/drivers/net/rnp/rnp_mbx.h b/drivers/net/rnp/rnp_mbx.h
new file mode 100644
index 0000000000..ee020b5fbc
--- /dev/null
+++ b/drivers/net/rnp/rnp_mbx.h
@@ -0,0 +1,138 @@
+#ifndef __TSRN10_MBX_H__
+#define __TSRN10_MBX_H__
+
+#define VF_NUM_MASK_TEMP	(0xff0)
+#define VF_NUM_OFF		(4)
+#define RNP_VF_NUM		(0x75f000)
+#define RNP_VF_NB_MASK		(0x3f)
+#define RNP_PF_NB_MASK		(0x40)
+#define RNP_VF_ISOLATE_CTRL	(0x7982fc)
+#define RNP_IS_SRIOV		BIT(7)
+#define RNP_SRIOV_ST_SHIFT	(24)
+#define RNP_VF_DEFAULT_PORT	(0)
+
+/* Mbx Ctrl state */
+#define RNP_VFMAILBOX_SIZE	(14) /* 16 32 bit words - 64 bytes */
+#define TSRN10_VFMBX_SIZE	(RNP_VFMAILBOX_SIZE)
+#define RNP_VT_MSGTYPE_ACK	(0x80000000)
+
+#define RNP_VT_MSGTYPE_NACK	(0x40000000)
+/* Messages below or'd with * this are the NACK */
+#define RNP_VT_MSGTYPE_CTS	(0x20000000)
+/* Indicates that VF is still
+ *clear to send requests
+ */
+#define RNP_VT_MSGINFO_SHIFT	(16)
+
+#define RNP_VT_MSGINFO_MASK	(0xFF << RNP_VT_MSGINFO_SHIFT)
+/* The mailbox memory size is 64 bytes accessed by 32-bit registers */
+#define RNP_VLVF_VIEN		(0x80000000) /* filter is valid */
+#define RNP_VLVF_ENTRIES	(64)
+#define RNP_VLVF_VLANID_MASK	(0x00000FFF)
+/* Every VF own 64 bytes mem for communitate accessed by 32-bit */
+
+#define RNP_VF_RESET		(0x01) /* VF requests reset */
+#define RNP_VF_SET_MAC_ADDR	(0x02) /* VF requests PF to set MAC addr */
+#define RNP_VF_SET_MULTICAST	(0x03) /* VF requests PF to set MC addr */
+#define RNP_VF_SET_VLAN		(0x04) /* VF requests PF to set VLAN */
+
+#define RNP_VF_SET_LPE		(0x05) /* VF requests PF to set VMOLR.LPE */
+#define RNP_VF_SET_MACVLAN	(0x06) /* VF requests PF for unicast filter */
+#define RNP_VF_GET_MACVLAN	(0x07) /* VF requests mac */
+#define RNP_VF_API_NEGOTIATE	(0x08) /* negotiate API version */
+#define RNP_VF_GET_QUEUES	(0x09) /* get queue configuration */
+#define RNP_VF_GET_LINK		(0x10) /* get link status */
+
+#define RNP_VF_SET_VLAN_STRIP	(0x0a) /* VF Requests PF to set VLAN STRIP */
+#define RNP_VF_REG_RD		(0x0b) /* VF Read Reg */
+#define RNP_VF_GET_MAX_MTU	(0x0c) /* VF Get Max Mtu */
+#define RNP_VF_SET_MTU		(0x0d) /* VF Set Mtu */
+#define RNP_VF_GET_FW		(0x0e) /* VF Get Firmware Version */
+
+#define RNP_PF_VFNUM_MASK	GENMASK(26, 21)
+
+#define RNP_PF_SET_FCS		(0x10) /* PF set fcs status */
+#define RNP_PF_SET_PAUSE	(0x11) /* PF set pause status */
+#define RNP_PF_SET_FT_PADDING	(0x12) /* PF set ft padding status */
+#define RNP_PF_SET_VLAN_FILTER	(0x13) /* PF set ntuple status */
+#define RNP_PF_SET_VLAN		(0x14)
+#define RNP_PF_SET_LINK		(0x15)
+#define RNP_PF_SET_SPEED_40G	BIT(8)
+#define RNP_PF_SET_SPEED_10G	BIT(7)
+#define RNP_PF_SET_SPEED_1G	BIT(5)
+#define RNP_PF_SET_SPEED_100M	BIT(3)
+
+#define RNP_PF_SET_MTU		(0x16)
+#define RNP_PF_SET_RESET	(0x17)
+#define RNP_PF_LINK_UP		BIT(31)
+#define RNP_PF_SPEED_MASK	GENMASK(15, 0)
+
+/* Define mailbox register bits */
+#define RNP_PF_REMOVE		(0x0f)
+
+/* Mailbox API ID VF Request */
+/* length of permanent address message returned from PF */
+#define RNP_VF_PERMADDR_MSG_LEN (11)
+#define RNP_VF_TX_QUEUES	(1) /* number of Tx queues supported */
+#define RNP_VF_RX_QUEUES	(2) /* number of Rx queues supported */
+#define RNP_VF_TRANS_VLAN	(3) /* Indication of port vlan */
+#define RNP_VF_DEF_QUEUE	(4) /* Default queue offset */
+/* word in permanent address message with the current multicast type */
+#define RNP_VF_VLAN_WORD	(5)
+#define RNP_VF_PHY_TYPE_WORD	(6)
+#define RNP_VF_FW_VERSION_WORD	(7)
+#define RNP_VF_LINK_STATUS_WORD	(8)
+#define RNP_VF_AXI_MHZ		(9)
+#define RNP_VF_RNP_VF_FEATURE	(10)
+#define RNP_VF_RNP_VF_FILTER_EN	BIT(0)
+
+#define RNP_LINK_SPEED_UNKNOWN 0
+#define RNP_LINK_SPEED_10_FULL    BIT(2)
+#define RNP_LINK_SPEED_100_FULL   BIT(3)
+#define RNP_LINK_SPEED_1GB_FULL   BIT(4)
+#define RNP_LINK_SPEED_10GB_FULL  BIT(5)
+#define RNP_LINK_SPEED_40GB_FULL  BIT(6)
+#define RNP_LINK_SPEED_25GB_FULL  BIT(7)
+#define RNP_LINK_SPEED_50GB_FULL  BIT(8)
+#define RNP_LINK_SPEED_100GB_FULL BIT(9)
+#define RNP_LINK_SPEED_10_HALF    BIT(10)
+#define RNP_LINK_SPEED_100_HALF   BIT(11)
+#define RNP_LINK_SPEED_1GB_HALF   BIT(12)
+
+/* Mailbox API ID PF Request */
+#define RNP_VF_MC_TYPE_WORD		(3)
+#define RNP_VF_DMA_VERSION_WORD		(4)
+/* Get Queue write-back reference value */
+#define RNP_PF_CONTROL_PRING_MSG	(0x0100) /* PF control message */
+
+#define TSRN10_MBX_VECTOR_ID            (0)
+#define TSRN10_PF2VF_MBX_VEC_CTR(n)     (0xa5000 + 0x4 * (n))
+
+#define RNP_VF_INIT_TIMEOUT		(200) /* Number of retries to clear RSTI */
+#define RNP_VF_MBX_INIT_TIMEOUT		(2000) /* number of retries on mailbox */
+
+#define MBOX_CTRL_REQ			(1 << 0) /* WO */
+#define MBOX_CTRL_VF_HOLD_SHM		(1 << 2) /* VF:WR, PF:RO */
+#define VF_NUM_MASK 0x3f
+#define VFNUM(num)			((num) & VF_NUM_MASK)
+
+#define PF_VF_SHM(vf)	\
+	((0xa6000) + (64 * (vf))) /* for PF1 rtl will remap 6000 to 0xb000 */
+#define PF2VF_COUNTER(vf)		(PF_VF_SHM(vf) + 0)
+#define VF2PF_COUNTER(vf)		(PF_VF_SHM(vf) + 4)
+#define PF_VF_SHM_DATA(vf)		(PF_VF_SHM(vf) + 8)
+#define VF2PF_MBOX_CTRL(vf)		((0xa7000) + (4 * (vf)))
+
+/* Error Codes */
+#define RNP_ERR_INVALID_MAC_ADDR	(-1)
+#define RNP_ERR_MBX			(-100)
+
+#define RNP_MBX_DELAY_US		(100) /* Delay us for Retry */
+/* Max Retry Time */
+#define RNP_MBX_TIMEOUT_SECONDS	(2) /* Max Retry Time 2s */
+#define RNP_ARRAY_OPCODE_OFFSET	(0)
+#define RNP_ARRAY_CTRL_OFFSET	(1)
+
+void rnp_init_mbx_ops_pf(struct rnp_hw *hw);
+void *rnp_memzone_reserve(const char *name, unsigned int size);
+#endif
diff --git a/drivers/net/rnp/rnp_mbx_fw.h b/drivers/net/rnp/rnp_mbx_fw.h
new file mode 100644
index 0000000000..439090b5a3
--- /dev/null
+++ b/drivers/net/rnp/rnp_mbx_fw.h
@@ -0,0 +1,22 @@
+#ifndef __RNP_MBX_FW_H__
+#define __RNP_MBX_FW_H__
+
+struct mbx_fw_cmd_reply;
+typedef void (*cookie_cb)(struct mbx_fw_cmd_reply *reply, void *priv);
+#define RNP_MAX_SHARE_MEM (8 * 8)
+struct mbx_req_cookie {
+	int magic;
+#define COOKIE_MAGIC 0xCE
+	cookie_cb cb;
+	int timeout_ms;
+	int errcode;
+
+	/* wait_queue_head_t wait; */
+	volatile int done;
+	int priv_len;
+	char priv[RNP_MAX_SHARE_MEM];
+};
+struct mbx_fw_cmd_reply {
+} __rte_cache_aligned;
+
+#endif /* __RNP_MBX_FW_H__*/
-- 
2.27.0


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [[PATCH v1] 4/8] net/rnp: add mbx basic api feature
  2023-08-01  7:22 [[PATCH v1] 4/8] net/rnp: add mbx basic api feature Wenbo Cao
@ 2023-08-01 15:25 ` Stephen Hemminger
  2023-08-02  1:41   ` 11
  0 siblings, 1 reply; 5+ messages in thread
From: Stephen Hemminger @ 2023-08-01 15:25 UTC (permalink / raw)
  To: Wenbo Cao; +Cc: dev

On Tue,  1 Aug 2023 07:22:07 +0000
Wenbo Cao <caowenbo@mucse.com> wrote:

> +struct rnp_mbx_api {
> +	void (*init_mbx)(struct rnp_hw *hw);
> +	int32_t (*read)(struct rnp_hw *hw,
> +			uint32_t *msg,
> +			uint16_t size,
> +			enum MBX_ID);
> +	int32_t (*write)(struct rnp_hw *hw,
> +			uint32_t *msg,
> +			uint16_t size,
> +			enum MBX_ID);
> +	int32_t (*read_posted)(struct rte_eth_dev *dev,
> +			uint32_t *msg,
> +			uint16_t size,
> +			enum MBX_ID);
> +	int32_t (*write_posted)(struct rte_eth_dev *dev,
> +			uint32_t *msg,
> +			uint16_t size,
> +			enum MBX_ID);
> +	int32_t (*check_for_msg)(struct rnp_hw *hw, enum MBX_ID);
> +	int32_t (*check_for_ack)(struct rnp_hw *hw, enum MBX_ID);
> +	int32_t (*check_for_rst)(struct rnp_hw *hw, enum MBX_ID);
> +	int32_t (*configure)(struct rnp_hw *hw, int nr_vec, bool enable);
> +};

Since this data structure only has virtual function pointers;
many types of bugs can be avoided if it can be made const.
If it const, then compiler can put the initialized table in r/o text section.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [[PATCH v1] 4/8] net/rnp: add mbx basic api feature
  2023-08-01 15:25 ` Stephen Hemminger
@ 2023-08-02  1:41   ` 11
  2023-08-02  2:27     ` Stephen Hemminger
  0 siblings, 1 reply; 5+ messages in thread
From: 11 @ 2023-08-02  1:41 UTC (permalink / raw)
  To: 'Stephen Hemminger'; +Cc: dev

Hi Stephen,

Thanks for your comment, as your advice that I need to define the virtual
function pointers
As the below type ?
const struct rnp_mbx_api {
..
}
If It will protect the virtual function address ,won't be remove by mistake?

Regards Wenbo
-----Original Message-----
From: Stephen Hemminger <stephen@networkplumber.org> 
Sent: 2023年8月1日 23:26
To: Wenbo Cao <caowenbo@mucse.com>
Cc: dev@dpdk.org
Subject: Re: [[PATCH v1] 4/8] net/rnp: add mbx basic api feature

On Tue,  1 Aug 2023 07:22:07 +0000
Wenbo Cao <caowenbo@mucse.com> wrote:

> +struct rnp_mbx_api {
> +	void (*init_mbx)(struct rnp_hw *hw);
> +	int32_t (*read)(struct rnp_hw *hw,
> +			uint32_t *msg,
> +			uint16_t size,
> +			enum MBX_ID);
> +	int32_t (*write)(struct rnp_hw *hw,
> +			uint32_t *msg,
> +			uint16_t size,
> +			enum MBX_ID);
> +	int32_t (*read_posted)(struct rte_eth_dev *dev,
> +			uint32_t *msg,
> +			uint16_t size,
> +			enum MBX_ID);
> +	int32_t (*write_posted)(struct rte_eth_dev *dev,
> +			uint32_t *msg,
> +			uint16_t size,
> +			enum MBX_ID);
> +	int32_t (*check_for_msg)(struct rnp_hw *hw, enum MBX_ID);
> +	int32_t (*check_for_ack)(struct rnp_hw *hw, enum MBX_ID);
> +	int32_t (*check_for_rst)(struct rnp_hw *hw, enum MBX_ID);
> +	int32_t (*configure)(struct rnp_hw *hw, int nr_vec, bool enable); };

Since this data structure only has virtual function pointers; many types of
bugs can be avoided if it can be made const.
If it const, then compiler can put the initialized table in r/o text
section.




^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [[PATCH v1] 4/8] net/rnp: add mbx basic api feature
  2023-08-02  1:41   ` 11
@ 2023-08-02  2:27     ` Stephen Hemminger
  0 siblings, 0 replies; 5+ messages in thread
From: Stephen Hemminger @ 2023-08-02  2:27 UTC (permalink / raw)
  To: 11; +Cc: dev

On Wed, 2 Aug 2023 09:41:29 +0800
"11" <caowenbo@mucse.com> wrote:

> Hi Stephen,
> 
> Thanks for your comment, as your advice that I need to define the virtual
> function pointers
> As the below type ?

No, keep the struct the same, but make it const where used.
You also do a copy of the ops struct so the one you initialize
ends up in the shared ops. It would be cleaner to have one
initialized object and have objects point to that.

Tried something like this, but it won't work.


diff --git a/drivers/net/rnp/rnp.h b/drivers/net/rnp/rnp.h
index a8999ddc471c..437a2cc2093d 100644
--- a/drivers/net/rnp/rnp.h
+++ b/drivers/net/rnp/rnp.h
@@ -20,7 +20,7 @@ struct rnp_eth_port {
 } __rte_cache_aligned;
 
 struct rnp_share_ops {
-	struct rnp_mbx_api mbx_api;
+	const struct rnp_mbx_api *mbx_api;
 } __rte_cache_aligned;
 
 struct rnp_eth_adapter {
@@ -41,7 +41,7 @@ struct rnp_eth_adapter {
 #define RNP_DEV_TO_HW(eth_dev) \
 	(&((struct rnp_eth_adapter *)(RNP_DEV_TO_PORT((eth_dev))->adapt))->hw)
 #define RNP_DEV_PP_PRIV_TO_MBX_OPS(dev) \
-	(&((struct rnp_share_ops *)(dev)->process_private)->mbx_api)
+	(((struct rnp_share_ops *)(dev)->process_private)->mbx_api)
 #define RNP_DEV_TO_MBX_OPS(dev)	RNP_DEV_PP_PRIV_TO_MBX_OPS(dev)
 
 static inline void rnp_reg_offset_init(struct rnp_hw *hw)
diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c
index 4747a41423dd..4518eaa3efa9 100644
--- a/drivers/net/rnp/rnp_ethdev.c
+++ b/drivers/net/rnp/rnp_ethdev.c
@@ -11,7 +11,6 @@
 #include "rnp_mbx.h"
 #include "rnp_logs.h"
 
-extern struct rnp_mbx_api rnp_mbx_pf_ops;
 static int
 rnp_mac_rx_disable(struct rte_eth_dev *dev)
 {
@@ -133,7 +132,7 @@ rnp_common_ops_init(struct rnp_eth_adapter *adapter)
 	struct rnp_share_ops *share_priv;
 
 	share_priv = adapter->share_priv;
-	share_priv->mbx_api = rnp_mbx_pf_ops;
+	share_priv->mbx_api = &rnp_mbx_pf_ops;
 }
 
 static int
diff --git a/drivers/net/rnp/rnp_mbx.c b/drivers/net/rnp/rnp_mbx.c
index 0f1c0a5b7471..b39e05bb715c 100644
--- a/drivers/net/rnp/rnp_mbx.c
+++ b/drivers/net/rnp/rnp_mbx.c
@@ -455,7 +455,7 @@ static int get_pfvfnum(struct rnp_hw *hw)
 	return val >> RNP_PFVF_SHIFT;
 }
 
-struct rnp_mbx_api rnp_mbx_pf_ops = {
+const struct rnp_mbx_api rnp_mbx_pf_ops = {
 	.read           = rnp_read_mbx_pf,
 	.write          = rnp_write_mbx_pf,
 	.read_posted    = rnp_read_posted_mbx_pf,
diff --git a/drivers/net/rnp/rnp_mbx.h b/drivers/net/rnp/rnp_mbx.h
index ee020b5fbce7..0fae78167336 100644
--- a/drivers/net/rnp/rnp_mbx.h
+++ b/drivers/net/rnp/rnp_mbx.h
@@ -135,4 +135,6 @@
 
 void rnp_init_mbx_ops_pf(struct rnp_hw *hw);
 void *rnp_memzone_reserve(const char *name, unsigned int size);
+
+extern const struct rnp_mbx_api rnp_mbx_pf_ops;
 #endif


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [[PATCH v1] 4/8] net/rnp: add mbx basic api feature
  2023-08-01  6:43 [[PATCH v1] 0/8] *** SUBJECT HERE *** Wenbo Cao
@ 2023-08-01  6:43 ` Wenbo Cao
  0 siblings, 0 replies; 5+ messages in thread
From: Wenbo Cao @ 2023-08-01  6:43 UTC (permalink / raw)
  To: caowenbo; +Cc: dev

mbx base code is for communicate with the firmware

Signed-off-by: Wenbo Cao <caowenbo@mucse.com>
---
 drivers/net/rnp/base/rnp_hw.h | 107 ++++++-
 drivers/net/rnp/meson.build   |   1 +
 drivers/net/rnp/rnp.h         |  35 +++
 drivers/net/rnp/rnp_ethdev.c  |  68 +++++
 drivers/net/rnp/rnp_logs.h    |   9 +
 drivers/net/rnp/rnp_mbx.c     | 522 ++++++++++++++++++++++++++++++++++
 drivers/net/rnp/rnp_mbx.h     | 138 +++++++++
 drivers/net/rnp/rnp_mbx_fw.h  |  22 ++
 8 files changed, 901 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/rnp/rnp_mbx.c
 create mode 100644 drivers/net/rnp/rnp_mbx.h
 create mode 100644 drivers/net/rnp/rnp_mbx_fw.h

diff --git a/drivers/net/rnp/base/rnp_hw.h b/drivers/net/rnp/base/rnp_hw.h
index 543011a64c..da3659bd67 100644
--- a/drivers/net/rnp/base/rnp_hw.h
+++ b/drivers/net/rnp/base/rnp_hw.h
@@ -4,15 +4,120 @@
 #ifndef __RNP_HW_H__
 #define __RNP_HW_H__
 
+#include <rte_io.h>
+#include <ethdev_driver.h>
+
+#include "rnp_osdep.h"
+
+static inline unsigned int rnp_rd_reg(volatile void *addr)
+{
+	unsigned int v = rte_read32(addr);
+
+	return v;
+}
+
+static inline void rnp_wr_reg(volatile void *reg, int val)
+{
+	rte_write32_relaxed((val), (reg));
+}
+
+#define mbx_rd32(hw, reg)	rnp_rd_reg((hw)->iobar4 + (reg))
+#define mbx_wr32(hw, reg, val)	rnp_wr_reg((hw)->iobar4 + (reg), (val))
+
+struct rnp_hw;
+/* Mbx Operate info */
+enum MBX_ID {
+	MBX_PF = 0,
+	MBX_VF,
+	MBX_CM3CPU,
+	MBX_FW = MBX_CM3CPU,
+	MBX_VFCNT
+};
+struct rnp_mbx_api {
+	void (*init_mbx)(struct rnp_hw *hw);
+	int32_t (*read)(struct rnp_hw *hw,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*write)(struct rnp_hw *hw,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*read_posted)(struct rte_eth_dev *dev,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*write_posted)(struct rte_eth_dev *dev,
+			uint32_t *msg,
+			uint16_t size,
+			enum MBX_ID);
+	int32_t (*check_for_msg)(struct rnp_hw *hw, enum MBX_ID);
+	int32_t (*check_for_ack)(struct rnp_hw *hw, enum MBX_ID);
+	int32_t (*check_for_rst)(struct rnp_hw *hw, enum MBX_ID);
+	int32_t (*configure)(struct rnp_hw *hw, int nr_vec, bool enable);
+};
+
+struct rnp_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct rnp_mbx_info {
+	struct rnp_mbx_api ops;
+	uint32_t usec_delay;    /* retry interval delay time */
+	uint32_t timeout;       /* retry ops timeout limit */
+	uint16_t size;          /* data buffer size*/
+	uint16_t vf_num;        /* Virtual Function num */
+	uint16_t pf_num;        /* Physical Function num */
+	uint16_t sriov_st;      /* Sriov state */
+	bool irq_enabled;
+	union {
+		struct {
+			unsigned short pf_req;
+			unsigned short pf_ack;
+		};
+		struct {
+			unsigned short cpu_req;
+			unsigned short cpu_ack;
+		};
+	};
+	unsigned short vf_req[64];
+	unsigned short vf_ack[64];
+
+	struct rnp_mbx_stats stats;
+
+	rte_atomic16_t state;
+} __rte_cache_aligned;
+
+#define RNP_MAX_HW_PORT_PERR_PF (4)
 struct rnp_hw {
 	void *back;
 	char *iobar0;
 	uint32_t iobar0_len;
 	char *iobar4;
 	uint32_t iobar4_len;
+	char *link_sync;
+	char *dma_base;
+	char *eth_base;
+	char *veb_base;
+	char *mac_base[RNP_MAX_HW_PORT_PERR_PF];
+	char *msix_base;
+	/* === dma == */
+	char *dma_axi_en;
+	char *dma_axi_st;
 
 	uint16_t device_id;
 	uint16_t vendor_id;
-} __rte_cache_aligned;
+	uint16_t function;
+	uint16_t pf_vf_num;
+	uint16_t max_vfs;
+	void *cookie_pool;
+	char cookie_p_name[RTE_MEMZONE_NAMESIZE];
 
+	struct rnp_mbx_info mbx;
+} __rte_cache_aligned;
 #endif /* __RNP_H__*/
diff --git a/drivers/net/rnp/meson.build b/drivers/net/rnp/meson.build
index 36a1f7148d..38dbee5ca4 100644
--- a/drivers/net/rnp/meson.build
+++ b/drivers/net/rnp/meson.build
@@ -9,5 +9,6 @@ endif
 
 sources = files(
 		'rnp_ethdev.c',
+		'rnp_mbx.c',
 )
 includes += include_directories('base')
diff --git a/drivers/net/rnp/rnp.h b/drivers/net/rnp/rnp.h
index ea9d138673..a8999ddc47 100644
--- a/drivers/net/rnp/rnp.h
+++ b/drivers/net/rnp/rnp.h
@@ -3,6 +3,7 @@
  */
 #ifndef __RNP_H__
 #define __RNP_H__
+#include <rte_log.h>
 
 #include "base/rnp_hw.h"
 
@@ -14,14 +15,17 @@
 
 struct rnp_eth_port {
 	void *adapt;
+	struct rnp_hw *hw;
 	struct rte_eth_dev *eth_dev;
 } __rte_cache_aligned;
 
 struct rnp_share_ops {
+	struct rnp_mbx_api mbx_api;
 } __rte_cache_aligned;
 
 struct rnp_eth_adapter {
 	struct rnp_hw hw;
+	uint16_t max_vfs;
 	struct rte_pci_device *pdev;
 	struct rte_eth_dev *eth_dev; /* master eth_dev */
 	struct rnp_eth_port *ports[RNP_MAX_PORT_OF_PF];
@@ -34,5 +38,36 @@ struct rnp_eth_adapter {
 	(((struct rnp_eth_port *)((eth_dev)->data->dev_private)))
 #define RNP_DEV_TO_ADAPTER(eth_dev) \
 	((struct rnp_eth_adapter *)(RNP_DEV_TO_PORT(eth_dev)->adapt))
+#define RNP_DEV_TO_HW(eth_dev) \
+	(&((struct rnp_eth_adapter *)(RNP_DEV_TO_PORT((eth_dev))->adapt))->hw)
+#define RNP_DEV_PP_PRIV_TO_MBX_OPS(dev) \
+	(&((struct rnp_share_ops *)(dev)->process_private)->mbx_api)
+#define RNP_DEV_TO_MBX_OPS(dev)	RNP_DEV_PP_PRIV_TO_MBX_OPS(dev)
 
+static inline void rnp_reg_offset_init(struct rnp_hw *hw)
+{
+	uint16_t i;
+
+	if (hw->device_id == RNP_DEV_ID_N10G && hw->mbx.pf_num) {
+		hw->iobar4 += 0x100000;
+		hw->msix_base = hw->iobar4 + 0xa0000;
+		hw->msix_base += 0x200;
+	} else {
+		hw->msix_base = hw->iobar4 + 0xa0000;
+	}
+	/* === dma status/config====== */
+	hw->link_sync    = hw->iobar4 + 0x000c;
+	hw->dma_axi_en   = hw->iobar4 + 0x0010;
+	hw->dma_axi_st   = hw->iobar4 + 0x0014;
+
+	if (hw->mbx.pf_num)
+		hw->msix_base += 0x200;
+	/* === queue registers === */
+	hw->dma_base     = hw->iobar4 + 0x08000;
+	hw->veb_base     = hw->iobar4 + 0x0;
+	hw->eth_base     = hw->iobar4 + 0x10000;
+	/* mac */
+	for (i = 0; i < RNP_MAX_HW_PORT_PERR_PF; i++)
+		hw->mac_base[i] = hw->iobar4 + 0x60000 + 0x10000 * i;
+}
 #endif /* __RNP_H__ */
diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c
index 18fd169166..4747a41423 100644
--- a/drivers/net/rnp/rnp_ethdev.c
+++ b/drivers/net/rnp/rnp_ethdev.c
@@ -8,8 +8,10 @@
 #include <ethdev_driver.h>
 
 #include "rnp.h"
+#include "rnp_mbx.h"
 #include "rnp_logs.h"
 
+extern struct rnp_mbx_api rnp_mbx_pf_ops;
 static int
 rnp_mac_rx_disable(struct rte_eth_dev *dev)
 {
@@ -89,6 +91,59 @@ rnp_alloc_eth_port(struct rte_pci_device *master_pci, char *name)
 	return NULL;
 }
 
+static void rnp_get_nic_attr(struct rnp_eth_adapter *adapter)
+{
+	RTE_SET_USED(adapter);
+}
+
+static int
+rnp_process_resource_init(struct rte_eth_dev *eth_dev)
+{
+	struct rnp_share_ops *share_priv;
+
+	/* allocate process_private memory this must can't
+	 * belone to the dpdk mem resource manager
+	 * such as from rte_malloc or rte_dma_zone..
+	 */
+	/* use the process_prive point to resolve secondary process
+	 * use point-func. This point is per process will be safe to cover.
+	 * This will cause secondary process core-dump because of IPC
+	 * Secondary will call primary process point func virt-address
+	 * secondary process don't alloc user/pmd to alloc or free
+	 * the memory of dpdk-mem resource it will cause hugepage
+	 * mem exception
+	 * be careful for secondary Process to use the share-mem of
+	 * point correlation
+	 */
+	share_priv = calloc(1, sizeof(*share_priv));
+	if (!share_priv) {
+		PMD_DRV_LOG(ERR, "calloc share_priv failed");
+		return -ENOMEM;
+	}
+
+	eth_dev->process_private = share_priv;
+	adapter->share_priv = share_priv;
+
+	return 0;
+}
+
+static void
+rnp_common_ops_init(struct rnp_eth_adapter *adapter)
+{
+	struct rnp_share_ops *share_priv;
+
+	share_priv = adapter->share_priv;
+	share_priv->mbx_api = rnp_mbx_pf_ops;
+}
+
+static int
+rnp_special_ops_init(struct rte_eth_dev *eth_dev)
+{
+	RTE_SET_USED(eth_dev);
+
+	return 0;
+}
+
 static int
 rnp_eth_dev_init(struct rte_eth_dev *dev)
 {
@@ -124,6 +179,19 @@ rnp_eth_dev_init(struct rte_eth_dev *dev)
 	hw->device_id = pci_dev->id.device_id;
 	hw->vendor_id = pci_dev->id.vendor_id;
 	hw->device_id = pci_dev->id.device_id;
+	adapter->max_vfs = pci_dev->max_vfs;
+	ret = rnp_process_resource_init(dev, adapter);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "share prive resource init failed");
+		return ret;
+	}
+	rnp_common_ops_init(adapter);
+	rnp_get_nic_attr(adapter);
+	/* We need Use Device Id To Change The Resource Mode */
+	rnp_special_ops_init(dev);
+	port->adapt = adapter;
+	port->hw = hw;
+	rnp_init_mbx_ops_pf(hw);
 	for (p_id = 0; p_id < adapter->num_ports; p_id++) {
 		/* port 0 resource has been alloced When Probe */
 		if (!p_id) {
diff --git a/drivers/net/rnp/rnp_logs.h b/drivers/net/rnp/rnp_logs.h
index 1b3ee33745..f1648aabb5 100644
--- a/drivers/net/rnp/rnp_logs.h
+++ b/drivers/net/rnp/rnp_logs.h
@@ -13,6 +13,15 @@ extern int rnp_drv_logtype;
 #define RNP_PMD_DRV_LOG(level, fmt, args...) \
 	rte_log(RTE_LOG_##level, rnp_drv_logtype, \
 		"%s() " fmt, __func__, ##args)
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, rnp_drv_logtype, "%s(): " fmt, \
+			__func__, ## args)
+#define PMD_DRV_LOG(level, fmt, args...) \
+	PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#define RNP_PMD_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_##level, rnp_drv_logtype, \
+			"rnp_net: (%d) " fmt, __LINE__, ##args)
 #ifdef RTE_LIBRTE_RNP_DEBUG_RX
 extern int rnp_rx_logtype;
 #define RNP_PMD_RX_LOG(level, fmt, args...) \
diff --git a/drivers/net/rnp/rnp_mbx.c b/drivers/net/rnp/rnp_mbx.c
new file mode 100644
index 0000000000..0f1c0a5b74
--- /dev/null
+++ b/drivers/net/rnp/rnp_mbx.c
@@ -0,0 +1,522 @@
+#include <rte_cycles.h>
+#include <rte_log.h>
+
+#include "rnp.h"
+#include "rnp_hw.h"
+#include "rnp_mbx.h"
+#include "rnp_mbx_fw.h"
+#include "rnp_logs.h"
+
+#define RNP_MAX_VF_FUNCTIONS	(64)
+/* == VEC == */
+#define VF2PF_MBOX_VEC(VF)	(0xa5100 + 4 * (VF))
+#define CPU2PF_MBOX_VEC		(0xa5300)
+
+/* == PF <--> VF mailbox ==== */
+#define SHARE_MEM_BYTES		(64) /* 64bytes */
+/* for PF1 rtl will remap 6000 to 0xb000 */
+#define PF_VF_SHM(vf)		((0xa6000) + (64 * (vf)))
+#define PF2VF_COUNTER(vf)	(PF_VF_SHM(vf) + 0)
+#define VF2PF_COUNTER(vf)	(PF_VF_SHM(vf) + 4)
+#define PF_VF_SHM_DATA(vf)	(PF_VF_SHM(vf) + 8)
+#define PF2VF_MBOX_CTRL(vf)	((0xa7100) + (4 * (vf)))
+#define PF_VF_MBOX_MASK_LO	((0xa7200))
+#define PF_VF_MBOX_MASK_HI	((0xa7300))
+
+/* === CPU <--> PF === */
+#define CPU_PF_SHM		(0xaa000)
+#define CPU2PF_COUNTER		(CPU_PF_SHM + 0)
+#define PF2CPU_COUNTER		(CPU_PF_SHM + 4)
+#define CPU_PF_SHM_DATA		(CPU_PF_SHM + 8)
+#define PF2CPU_MBOX_CTRL	(0xaa100)
+#define CPU_PF_MBOX_MASK	(0xaa300)
+
+/* === CPU <--> VF === */
+#define CPU_VF_SHM(vf)		(0xa8000 + (64 * (vf)))
+#define CPU2VF_COUNTER(vf)	(CPU_VF_SHM(vf) + 0)
+#define VF2CPU_COUNTER(vf)	(CPU_VF_SHM(vf) + 4)
+#define CPU_VF_SHM_DATA(vf)	(CPU_VF_SHM(vf) + 8)
+#define VF2CPU_MBOX_CTRL(vf)	(0xa9000 + 64 * (vf))
+#define CPU_VF_MBOX_MASK_LO(vf) (0xa9200 + 64 * (vf))
+#define CPU_VF_MBOX_MASK_HI(vf) (0xa9300 + 64 * (vf))
+
+#define MBOX_CTRL_REQ		(1 << 0)  /* WO */
+/* VF:WR, PF:RO */
+#define MBOX_CTRL_PF_HOLD_SHM	(1 << 3)  /* VF:RO, PF:WR */
+
+#define MBOX_IRQ_EN		(0)
+#define MBOX_IRQ_DISABLE	(1)
+
+/****************************PF MBX OPS************************************/
+static inline u16 rnp_mbx_get_req(struct rnp_hw *hw, int reg)
+{
+	rte_mb();
+	return mbx_rd32(hw, reg) & 0xffff;
+}
+
+static inline u16 rnp_mbx_get_ack(struct rnp_hw *hw, int reg)
+{
+	rte_mb();
+	return (mbx_rd32(hw, reg) >> 16) & 0xffff;
+}
+
+static inline void rnp_mbx_inc_pf_req(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int reg = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_COUNTER : PF2VF_COUNTER(mbx_id);
+	u32 v = mbx_rd32(hw, reg);
+	u16 req;
+
+	req = (v & 0xffff);
+	req++;
+	v &= ~(0x0000ffff);
+	v |= req;
+
+	rte_mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	/* hw->mbx.stats.msgs_tx++; */
+}
+
+static inline void rnp_mbx_inc_pf_ack(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int reg = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_COUNTER : PF2VF_COUNTER(mbx_id);
+	u32 v = mbx_rd32(hw, reg);
+	u16 ack;
+
+	ack = (v >> 16) & 0xffff;
+	ack++;
+	v &= ~(0xffff0000);
+	v |= (ack << 16);
+
+	rte_mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	/* hw->mbx.stats.msgs_rx++; */
+}
+
+/**
+ *  rnp_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+static int32_t rnp_poll_for_msg(struct rte_eth_dev *dev, enum MBX_ID mbx_id)
+{
+	struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !ops->check_for_msg)
+		goto out;
+
+	while (countdown && ops->check_for_msg(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		rte_delay_us_block(mbx->usec_delay);
+	}
+
+out:
+	return countdown ? 0 : -ETIME;
+}
+
+/**
+ *  rnp_poll_for_ack - Wait for message acknowledgment
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgment
+ **/
+static int32_t rnp_poll_for_ack(struct rte_eth_dev *dev, enum MBX_ID mbx_id)
+{
+	struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !ops->check_for_ack)
+		goto out;
+
+	while (countdown && ops->check_for_ack(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		rte_delay_us_block(mbx->usec_delay);
+	}
+
+out:
+	return countdown ? 0 : -ETIME;
+}
+
+/**
+ *  rnp_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+static int32_t
+rnp_read_posted_mbx_pf(struct rte_eth_dev *dev, u32 *msg, u16 size,
+		       enum MBX_ID mbx_id)
+{
+	struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+	int32_t ret_val = -ETIME;
+
+	if (!ops->read || !countdown)
+		return -EOPNOTSUPP;
+
+	ret_val = rnp_poll_for_msg(dev, mbx_id);
+
+	/* if ack received read message, otherwise we timed out */
+	if (!ret_val)
+		return ops->read(hw, msg, size, mbx_id);
+	return ret_val;
+}
+
+/**
+ *  rnp_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+static int32_t
+rnp_write_posted_mbx_pf(struct rte_eth_dev *dev, u32 *msg, u16 size,
+			enum MBX_ID mbx_id)
+{
+	struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev);
+	struct rnp_hw *hw = RNP_DEV_TO_HW(dev);
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int32_t ret_val = -ETIME;
+
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!ops->write || !mbx->timeout)
+		goto out;
+
+	/* send msg and hold buffer lock */
+	if (ops->write)
+		ret_val = ops->write(hw, msg, size, mbx_id);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = rnp_poll_for_ack(dev, mbx_id);
+out:
+	return ret_val;
+}
+
+/**
+ *  rnp_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static int32_t rnp_check_for_msg_pf(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int32_t ret_val = -ETIME;
+
+	if (mbx_id == MBX_CM3CPU) {
+		if (rnp_mbx_get_req(hw, CPU2PF_COUNTER) != hw->mbx.cpu_req) {
+			ret_val = 0;
+			/* hw->mbx.stats.reqs++; */
+		}
+	} else {
+		if (rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx_id)) !=
+				hw->mbx.vf_req[mbx_id]) {
+			ret_val = 0;
+			/* hw->mbx.stats.reqs++; */
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnp_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static int32_t rnp_check_for_ack_pf(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int32_t ret_val = -ETIME;
+
+	if (mbx_id == MBX_CM3CPU) {
+		if (rnp_mbx_get_ack(hw, CPU2PF_COUNTER) != hw->mbx.cpu_ack) {
+			ret_val = 0;
+			/* hw->mbx.stats.acks++; */
+		}
+	} else {
+		if (rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx_id)) != hw->mbx.vf_ack[mbx_id]) {
+			ret_val = 0;
+			/* hw->mbx.stats.acks++; */
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnp_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: the VF index or CPU
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static int32_t rnp_obtain_mbx_lock_pf(struct rnp_hw *hw, enum MBX_ID mbx_id)
+{
+	int32_t ret_val = -ETIME;
+	int try_cnt = 5000;  /* 500ms */
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id);
+
+	while (try_cnt-- > 0) {
+		/* Take ownership of the buffer */
+		mbx_wr32(hw, CTRL_REG, MBOX_CTRL_PF_HOLD_SHM);
+
+		/* reserve mailbox for cm3 use */
+		if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_PF_HOLD_SHM)
+			return 0;
+		rte_delay_us_block(100);
+	}
+
+	RNP_PMD_LOG(WARNING, "%s: failed to get:%d lock\n",
+			__func__, mbx_id);
+	return ret_val;
+}
+
+/**
+ *  rnp_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static int32_t rnp_write_mbx_pf(struct rnp_hw *hw, u32 *msg,
+				u16 size, enum MBX_ID mbx_id)
+{
+	u32 DATA_REG = (mbx_id == MBX_CM3CPU) ?
+		CPU_PF_SHM_DATA : PF_VF_SHM_DATA(mbx_id);
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id);
+	int32_t ret_val = 0;
+	u32 stat __rte_unused;
+	u16 i;
+
+	if (size > RNP_VFMAILBOX_SIZE) {
+		RNP_PMD_LOG(ERR, "%s: size:%d should <%d\n", __func__,
+				size, RNP_VFMAILBOX_SIZE);
+		return -EINVAL;
+	}
+
+	/* lock the mailbox to prevent pf/vf/cpu race condition */
+	ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id);
+	if (ret_val) {
+		RNP_PMD_LOG(WARNING, "PF[%d] Can't Get Mbx-Lock Try Again\n",
+				hw->function);
+		return ret_val;
+	}
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++) {
+#ifdef MBX_WR_DEBUG
+		mbx_pwr32(hw, DATA_REG + i * 4, msg[i]);
+#else
+		mbx_wr32(hw, DATA_REG + i * 4, msg[i]);
+#endif
+	}
+
+	/* flush msg and acks as we are overwriting the message buffer */
+	if (mbx_id == MBX_CM3CPU)
+		hw->mbx.cpu_ack = rnp_mbx_get_ack(hw, CPU2PF_COUNTER);
+	else
+		hw->mbx.vf_ack[mbx_id] = rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx_id));
+
+	rnp_mbx_inc_pf_req(hw, mbx_id);
+	rte_mb();
+
+	rte_delay_us(300);
+
+	/* Interrupt VF/CM3 to tell it a message
+	 * has been sent and release buffer
+	 */
+	mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ);
+
+	return 0;
+}
+
+/**
+ *  rnp_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF/CPU request so no polling for message is needed.
+ **/
+static int32_t rnp_read_mbx_pf(struct rnp_hw *hw, u32 *msg,
+			       u16 size, enum MBX_ID mbx_id)
+{
+	u32 BUF_REG  = (mbx_id == MBX_CM3CPU) ?
+		CPU_PF_SHM_DATA : PF_VF_SHM_DATA(mbx_id);
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ?
+		PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id);
+	int32_t ret_val = -EIO;
+	u32 stat __rte_unused, i;
+	if (size > RNP_VFMAILBOX_SIZE) {
+		RNP_PMD_LOG(ERR, "%s: size:%d should <%d\n", __func__,
+				size, RNP_VFMAILBOX_SIZE);
+		return -EINVAL;
+	}
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id);
+	if (ret_val)
+		goto out_no_read;
+
+	/* copy the message from the mailbox memory buffer */
+	for (i = 0; i < size; i++) {
+#ifdef MBX_RD_DEBUG
+		msg[i] = mbx_prd32(hw, BUF_REG + 4 * i);
+#else
+		msg[i] = mbx_rd32(hw, BUF_REG + 4 * i);
+#endif
+	}
+	mbx_wr32(hw, BUF_REG, 0);
+
+	/* update req. used by rnpvf_check_for_msg_vf  */
+	if (mbx_id == MBX_CM3CPU)
+		hw->mbx.cpu_req = rnp_mbx_get_req(hw, CPU2PF_COUNTER);
+	else
+		hw->mbx.vf_req[mbx_id] = rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx_id));
+
+	/* this ack maybe too earier? */
+	/* Acknowledge receipt and release mailbox, then we're done */
+	rnp_mbx_inc_pf_ack(hw, mbx_id);
+
+	rte_mb();
+
+	/* free ownership of the buffer */
+	mbx_wr32(hw, CTRL_REG, 0);
+
+out_no_read:
+
+	return ret_val;
+}
+
+static void rnp_mbx_reset_pf(struct rnp_hw *hw)
+{
+	int v;
+
+	/* reset pf->cm3 status */
+	v = mbx_rd32(hw, CPU2PF_COUNTER);
+	hw->mbx.cpu_req = v & 0xffff;
+	hw->mbx.cpu_ack = (v >> 16) & 0xffff;
+	/* release   pf->cm3 buffer lock */
+	mbx_wr32(hw, PF2CPU_MBOX_CTRL, 0);
+
+	rte_mb();
+	/* enable irq to fw */
+	mbx_wr32(hw, CPU_PF_MBOX_MASK, 0);
+}
+
+static int get_pfvfnum(struct rnp_hw *hw)
+{
+	uint32_t addr_mask;
+	uint32_t offset;
+	uint32_t val;
+#define RNP_PF_NUM_REG       (0x75f000)
+#define RNP_PFVF_SHIFT       (4)
+#define RNP_PF_SHIFT         (6)
+#define RNP_PF_BIT_MASK      BIT(6)
+	addr_mask = hw->iobar0_len - 1;
+	offset = RNP_PF_NUM_REG & addr_mask;
+	val = rnp_rd_reg(hw->iobar0 + offset);
+
+	return val >> RNP_PFVF_SHIFT;
+}
+
+struct rnp_mbx_api rnp_mbx_pf_ops = {
+	.read           = rnp_read_mbx_pf,
+	.write          = rnp_write_mbx_pf,
+	.read_posted    = rnp_read_posted_mbx_pf,
+	.write_posted   = rnp_write_posted_mbx_pf,
+	.check_for_msg  = rnp_check_for_msg_pf,
+	.check_for_ack  = rnp_check_for_ack_pf,
+};
+
+void *rnp_memzone_reserve(const char *name, unsigned int size)
+{
+#define NO_FLAGS 0
+	const struct rte_memzone *mz = NULL;
+
+	if (name) {
+		if (size) {
+			mz = rte_memzone_reserve(name, size,
+					rte_socket_id(), NO_FLAGS);
+			if (mz)
+				memset(mz->addr, 0, size);
+		} else {
+			mz = rte_memzone_lookup(name);
+		}
+		return mz ? mz->addr : NULL;
+	}
+	return NULL;
+}
+
+void rnp_init_mbx_ops_pf(struct rnp_hw *hw)
+{
+	struct rnp_eth_adapter *adapter = hw->back;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	struct mbx_req_cookie *cookie;
+	uint32_t vf_isolat_off;
+
+	mbx->size       = RNP_VFMAILBOX_SIZE;
+	mbx->usec_delay = RNP_MBX_DELAY_US;
+	mbx->timeout    = (RNP_MBX_TIMEOUT_SECONDS * 1000 * 1000) /
+		mbx->usec_delay;
+	if (hw->device_id == RNP_DEV_ID_N10G) {
+		vf_isolat_off = RNP_VF_ISOLATE_CTRL &
+			(hw->iobar0_len - 1);
+		rnp_wr_reg(hw->iobar0 + vf_isolat_off, 0);
+	}
+	mbx->sriov_st = 0;
+	hw->pf_vf_num = get_pfvfnum(hw);
+	mbx->vf_num = UINT16_MAX;
+	mbx->pf_num = (hw->pf_vf_num & RNP_PF_BIT_MASK) >> RNP_PF_SHIFT;
+	hw->function = mbx->pf_num;
+	/* Retrieving and storing the HW base address of device */
+	rnp_reg_offset_init(hw);
+	snprintf(hw->cookie_p_name, RTE_MEMZONE_NAMESIZE, "mbx_req_cookie%d_%d",
+			hw->function, adapter->eth_dev->data->port_id);
+	hw->cookie_pool = rnp_memzone_reserve(hw->cookie_p_name,
+			sizeof(struct mbx_req_cookie));
+
+	cookie = (struct mbx_req_cookie *)hw->cookie_pool;
+	if (cookie) {
+		cookie->timeout_ms = 1000;
+		cookie->magic = COOKIE_MAGIC;
+		cookie->priv_len = RNP_MAX_SHARE_MEM;
+	}
+
+	rnp_mbx_reset_pf(hw);
+}
diff --git a/drivers/net/rnp/rnp_mbx.h b/drivers/net/rnp/rnp_mbx.h
new file mode 100644
index 0000000000..ee020b5fbc
--- /dev/null
+++ b/drivers/net/rnp/rnp_mbx.h
@@ -0,0 +1,138 @@
+#ifndef __TSRN10_MBX_H__
+#define __TSRN10_MBX_H__
+
+#define VF_NUM_MASK_TEMP	(0xff0)
+#define VF_NUM_OFF		(4)
+#define RNP_VF_NUM		(0x75f000)
+#define RNP_VF_NB_MASK		(0x3f)
+#define RNP_PF_NB_MASK		(0x40)
+#define RNP_VF_ISOLATE_CTRL	(0x7982fc)
+#define RNP_IS_SRIOV		BIT(7)
+#define RNP_SRIOV_ST_SHIFT	(24)
+#define RNP_VF_DEFAULT_PORT	(0)
+
+/* Mbx Ctrl state */
+#define RNP_VFMAILBOX_SIZE	(14) /* 16 32 bit words - 64 bytes */
+#define TSRN10_VFMBX_SIZE	(RNP_VFMAILBOX_SIZE)
+#define RNP_VT_MSGTYPE_ACK	(0x80000000)
+
+#define RNP_VT_MSGTYPE_NACK	(0x40000000)
+/* Messages below or'd with * this are the NACK */
+#define RNP_VT_MSGTYPE_CTS	(0x20000000)
+/* Indicates that VF is still
+ *clear to send requests
+ */
+#define RNP_VT_MSGINFO_SHIFT	(16)
+
+#define RNP_VT_MSGINFO_MASK	(0xFF << RNP_VT_MSGINFO_SHIFT)
+/* The mailbox memory size is 64 bytes accessed by 32-bit registers */
+#define RNP_VLVF_VIEN		(0x80000000) /* filter is valid */
+#define RNP_VLVF_ENTRIES	(64)
+#define RNP_VLVF_VLANID_MASK	(0x00000FFF)
+/* Every VF own 64 bytes mem for communitate accessed by 32-bit */
+
+#define RNP_VF_RESET		(0x01) /* VF requests reset */
+#define RNP_VF_SET_MAC_ADDR	(0x02) /* VF requests PF to set MAC addr */
+#define RNP_VF_SET_MULTICAST	(0x03) /* VF requests PF to set MC addr */
+#define RNP_VF_SET_VLAN		(0x04) /* VF requests PF to set VLAN */
+
+#define RNP_VF_SET_LPE		(0x05) /* VF requests PF to set VMOLR.LPE */
+#define RNP_VF_SET_MACVLAN	(0x06) /* VF requests PF for unicast filter */
+#define RNP_VF_GET_MACVLAN	(0x07) /* VF requests mac */
+#define RNP_VF_API_NEGOTIATE	(0x08) /* negotiate API version */
+#define RNP_VF_GET_QUEUES	(0x09) /* get queue configuration */
+#define RNP_VF_GET_LINK		(0x10) /* get link status */
+
+#define RNP_VF_SET_VLAN_STRIP	(0x0a) /* VF Requests PF to set VLAN STRIP */
+#define RNP_VF_REG_RD		(0x0b) /* VF Read Reg */
+#define RNP_VF_GET_MAX_MTU	(0x0c) /* VF Get Max Mtu */
+#define RNP_VF_SET_MTU		(0x0d) /* VF Set Mtu */
+#define RNP_VF_GET_FW		(0x0e) /* VF Get Firmware Version */
+
+#define RNP_PF_VFNUM_MASK	GENMASK(26, 21)
+
+#define RNP_PF_SET_FCS		(0x10) /* PF set fcs status */
+#define RNP_PF_SET_PAUSE	(0x11) /* PF set pause status */
+#define RNP_PF_SET_FT_PADDING	(0x12) /* PF set ft padding status */
+#define RNP_PF_SET_VLAN_FILTER	(0x13) /* PF set ntuple status */
+#define RNP_PF_SET_VLAN		(0x14)
+#define RNP_PF_SET_LINK		(0x15)
+#define RNP_PF_SET_SPEED_40G	BIT(8)
+#define RNP_PF_SET_SPEED_10G	BIT(7)
+#define RNP_PF_SET_SPEED_1G	BIT(5)
+#define RNP_PF_SET_SPEED_100M	BIT(3)
+
+#define RNP_PF_SET_MTU		(0x16)
+#define RNP_PF_SET_RESET	(0x17)
+#define RNP_PF_LINK_UP		BIT(31)
+#define RNP_PF_SPEED_MASK	GENMASK(15, 0)
+
+/* Define mailbox register bits */
+#define RNP_PF_REMOVE		(0x0f)
+
+/* Mailbox API ID VF Request */
+/* length of permanent address message returned from PF */
+#define RNP_VF_PERMADDR_MSG_LEN (11)
+#define RNP_VF_TX_QUEUES	(1) /* number of Tx queues supported */
+#define RNP_VF_RX_QUEUES	(2) /* number of Rx queues supported */
+#define RNP_VF_TRANS_VLAN	(3) /* Indication of port vlan */
+#define RNP_VF_DEF_QUEUE	(4) /* Default queue offset */
+/* word in permanent address message with the current multicast type */
+#define RNP_VF_VLAN_WORD	(5)
+#define RNP_VF_PHY_TYPE_WORD	(6)
+#define RNP_VF_FW_VERSION_WORD	(7)
+#define RNP_VF_LINK_STATUS_WORD	(8)
+#define RNP_VF_AXI_MHZ		(9)
+#define RNP_VF_RNP_VF_FEATURE	(10)
+#define RNP_VF_RNP_VF_FILTER_EN	BIT(0)
+
+#define RNP_LINK_SPEED_UNKNOWN 0
+#define RNP_LINK_SPEED_10_FULL    BIT(2)
+#define RNP_LINK_SPEED_100_FULL   BIT(3)
+#define RNP_LINK_SPEED_1GB_FULL   BIT(4)
+#define RNP_LINK_SPEED_10GB_FULL  BIT(5)
+#define RNP_LINK_SPEED_40GB_FULL  BIT(6)
+#define RNP_LINK_SPEED_25GB_FULL  BIT(7)
+#define RNP_LINK_SPEED_50GB_FULL  BIT(8)
+#define RNP_LINK_SPEED_100GB_FULL BIT(9)
+#define RNP_LINK_SPEED_10_HALF    BIT(10)
+#define RNP_LINK_SPEED_100_HALF   BIT(11)
+#define RNP_LINK_SPEED_1GB_HALF   BIT(12)
+
+/* Mailbox API ID PF Request */
+#define RNP_VF_MC_TYPE_WORD		(3)
+#define RNP_VF_DMA_VERSION_WORD		(4)
+/* Get Queue write-back reference value */
+#define RNP_PF_CONTROL_PRING_MSG	(0x0100) /* PF control message */
+
+#define TSRN10_MBX_VECTOR_ID            (0)
+#define TSRN10_PF2VF_MBX_VEC_CTR(n)     (0xa5000 + 0x4 * (n))
+
+#define RNP_VF_INIT_TIMEOUT		(200) /* Number of retries to clear RSTI */
+#define RNP_VF_MBX_INIT_TIMEOUT		(2000) /* number of retries on mailbox */
+
+#define MBOX_CTRL_REQ			(1 << 0) /* WO */
+#define MBOX_CTRL_VF_HOLD_SHM		(1 << 2) /* VF:WR, PF:RO */
+#define VF_NUM_MASK 0x3f
+#define VFNUM(num)			((num) & VF_NUM_MASK)
+
+#define PF_VF_SHM(vf)	\
+	((0xa6000) + (64 * (vf))) /* for PF1 rtl will remap 6000 to 0xb000 */
+#define PF2VF_COUNTER(vf)		(PF_VF_SHM(vf) + 0)
+#define VF2PF_COUNTER(vf)		(PF_VF_SHM(vf) + 4)
+#define PF_VF_SHM_DATA(vf)		(PF_VF_SHM(vf) + 8)
+#define VF2PF_MBOX_CTRL(vf)		((0xa7000) + (4 * (vf)))
+
+/* Error Codes */
+#define RNP_ERR_INVALID_MAC_ADDR	(-1)
+#define RNP_ERR_MBX			(-100)
+
+#define RNP_MBX_DELAY_US		(100) /* Delay us for Retry */
+/* Max Retry Time */
+#define RNP_MBX_TIMEOUT_SECONDS	(2) /* Max Retry Time 2s */
+#define RNP_ARRAY_OPCODE_OFFSET	(0)
+#define RNP_ARRAY_CTRL_OFFSET	(1)
+
+void rnp_init_mbx_ops_pf(struct rnp_hw *hw);
+void *rnp_memzone_reserve(const char *name, unsigned int size);
+#endif
diff --git a/drivers/net/rnp/rnp_mbx_fw.h b/drivers/net/rnp/rnp_mbx_fw.h
new file mode 100644
index 0000000000..439090b5a3
--- /dev/null
+++ b/drivers/net/rnp/rnp_mbx_fw.h
@@ -0,0 +1,22 @@
+#ifndef __RNP_MBX_FW_H__
+#define __RNP_MBX_FW_H__
+
+struct mbx_fw_cmd_reply;
+typedef void (*cookie_cb)(struct mbx_fw_cmd_reply *reply, void *priv);
+#define RNP_MAX_SHARE_MEM (8 * 8)
+struct mbx_req_cookie {
+	int magic;
+#define COOKIE_MAGIC 0xCE
+	cookie_cb cb;
+	int timeout_ms;
+	int errcode;
+
+	/* wait_queue_head_t wait; */
+	volatile int done;
+	int priv_len;
+	char priv[RNP_MAX_SHARE_MEM];
+};
+struct mbx_fw_cmd_reply {
+} __rte_cache_aligned;
+
+#endif /* __RNP_MBX_FW_H__*/
-- 
2.27.0


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-08-02  2:27 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-01  7:22 [[PATCH v1] 4/8] net/rnp: add mbx basic api feature Wenbo Cao
2023-08-01 15:25 ` Stephen Hemminger
2023-08-02  1:41   ` 11
2023-08-02  2:27     ` Stephen Hemminger
  -- strict thread matches above, loose matches on Subject: below --
2023-08-01  6:43 [[PATCH v1] 0/8] *** SUBJECT HERE *** Wenbo Cao
2023-08-01  6:43 ` [[PATCH v1] 4/8] net/rnp: add mbx basic api feature Wenbo Cao

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).