From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4125E42FF3; Mon, 7 Aug 2023 04:17:00 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 42FA343258; Mon, 7 Aug 2023 04:16:57 +0200 (CEST) Received: from smtpbg153.qq.com (smtpbg153.qq.com [13.245.218.24]) by mails.dpdk.org (Postfix) with ESMTP id 4770841141 for ; Mon, 7 Aug 2023 04:16:52 +0200 (CEST) X-QQ-mid: bizesmtp66t1691374599t2mhw3rc Received: from steven.localdomain ( [183.81.182.182]) by bizesmtp.qq.com (ESMTP) with id ; Mon, 07 Aug 2023 10:16:37 +0800 (CST) X-QQ-SSF: 01400000000000D0F000000A0000000 X-QQ-FEAT: ExvpSp4Q+WnaJxGi+Kz6ZyuLZnY96DhK62Eb7LHCwd/iRfy/wniJ4jAnZo1y4 0s4VqlQQa9OFWlsMWkcsBmvXj6qcA6+4t0YWz4cSuLEuYPZGCL2hlpUPFTWQLpe9tqdyRLS SVPy0LIJ5XBTsmi5S9ncMrmfIcYN7lUvcWa1fOfJUd6WANUtd5gCXI4/7wQ2tNJH+KwgD9g Mqg47LIyVHVZYPkjD/ypjt9JtP3/1odG1Yac2mCpWx0WFcJv6CrcbLRq0kpR9liJEQ3PsbJ OUPoLssBHOoqHCPQoj++5hy36CZCTnCOmrrw/GuSC1GtZPy4PUadAyrcbx92kV73F86ucfP ZdMwirnVfhr2Z2dtMqGgBzWEZElaEKBv9bu02UizKdxdgBOnBzTXVZ4yukRRjH3I8OioGKk y1Nu0tttgISqqT+LIZoEYA== X-QQ-GoodBg: 2 X-BIZMAIL-ID: 8989455721256001728 From: Wenbo Cao To: Wenbo Cao Cc: dev@dpdk.org, ferruh.yigit@amd.com, andrew.rybchenko@oktetlabs.ru, yaojun@mucse.com, Stephen Hemminger Subject: [PATCH v5 4/8] net/rnp: add mbx basic api feature Date: Mon, 7 Aug 2023 02:16:11 +0000 Message-Id: <20230807021615.3663034-5-caowenbo@mucse.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20230807021615.3663034-1-caowenbo@mucse.com> References: <20230807021615.3663034-1-caowenbo@mucse.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:mucse.com:qybglogicsvrgz:qybglogicsvrgz5a-0 X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org mbx base code is for communicate with the firmware Signed-off-by: Wenbo Cao Suggested-by: Stephen Hemminger --- drivers/net/rnp/base/rnp_api.c | 23 ++ drivers/net/rnp/base/rnp_api.h | 7 + drivers/net/rnp/base/rnp_cfg.h | 7 + drivers/net/rnp/base/rnp_dma_regs.h | 73 ++++ drivers/net/rnp/base/rnp_eth_regs.h | 124 +++++++ drivers/net/rnp/base/rnp_hw.h | 112 +++++- drivers/net/rnp/meson.build | 1 + drivers/net/rnp/rnp.h | 35 ++ drivers/net/rnp/rnp_ethdev.c | 70 +++- drivers/net/rnp/rnp_logs.h | 9 + drivers/net/rnp/rnp_mbx.c | 522 ++++++++++++++++++++++++++++ drivers/net/rnp/rnp_mbx.h | 139 ++++++++ drivers/net/rnp/rnp_mbx_fw.c | 271 +++++++++++++++ drivers/net/rnp/rnp_mbx_fw.h | 22 ++ 14 files changed, 1412 insertions(+), 3 deletions(-) create mode 100644 drivers/net/rnp/base/rnp_api.c create mode 100644 drivers/net/rnp/base/rnp_api.h create mode 100644 drivers/net/rnp/base/rnp_cfg.h create mode 100644 drivers/net/rnp/base/rnp_dma_regs.h create mode 100644 drivers/net/rnp/base/rnp_eth_regs.h create mode 100644 drivers/net/rnp/rnp_mbx.c create mode 100644 drivers/net/rnp/rnp_mbx.h create mode 100644 drivers/net/rnp/rnp_mbx_fw.c create mode 100644 drivers/net/rnp/rnp_mbx_fw.h diff --git a/drivers/net/rnp/base/rnp_api.c b/drivers/net/rnp/base/rnp_api.c new file mode 100644 index 0000000000..550da6217d --- /dev/null +++ b/drivers/net/rnp/base/rnp_api.c @@ -0,0 +1,23 @@ +#include "rnp.h" +#include "rnp_api.h" + +int +rnp_init_hw(struct rte_eth_dev *dev) +{ + const struct rnp_mac_api *ops = RNP_DEV_TO_MAC_OPS(dev); + struct rnp_hw *hw = RNP_DEV_TO_HW(dev); + + if (ops->init_hw) + return ops->init_hw(hw); + return -EOPNOTSUPP; +} + +int +rnp_reset_hw(struct rte_eth_dev *dev, struct rnp_hw *hw) +{ + const struct rnp_mac_api *ops = RNP_DEV_TO_MAC_OPS(dev); + + if (ops->reset_hw) + return ops->reset_hw(hw); + return -EOPNOTSUPP; +} diff --git a/drivers/net/rnp/base/rnp_api.h b/drivers/net/rnp/base/rnp_api.h new file mode 100644 index 0000000000..df574dab77 --- /dev/null +++ b/drivers/net/rnp/base/rnp_api.h @@ -0,0 +1,7 @@ +#ifndef __RNP_API_H__ +#define __RNP_API_H__ +int +rnp_init_hw(struct rte_eth_dev *dev); +int +rnp_reset_hw(struct rte_eth_dev *dev, struct rnp_hw *hw); +#endif /* __RNP_API_H__ */ diff --git a/drivers/net/rnp/base/rnp_cfg.h b/drivers/net/rnp/base/rnp_cfg.h new file mode 100644 index 0000000000..90f25268ad --- /dev/null +++ b/drivers/net/rnp/base/rnp_cfg.h @@ -0,0 +1,7 @@ +#ifndef __RNP_CFG_H__ +#define __RNP_CFG_H__ +#include "rnp_osdep.h" + +#define RNP_NIC_RESET _NIC_(0x0010) +#define RNP_TX_QINQ_WORKAROUND _NIC_(0x801c) +#endif /* __RNP_CFG_H__ */ diff --git a/drivers/net/rnp/base/rnp_dma_regs.h b/drivers/net/rnp/base/rnp_dma_regs.h new file mode 100644 index 0000000000..bfe87e534d --- /dev/null +++ b/drivers/net/rnp/base/rnp_dma_regs.h @@ -0,0 +1,73 @@ +#ifndef __RNP_REGS_H__ +#define __RNP_REGS_H__ + +#include "rnp_osdep.h" + +/* mac address offset */ +#define RNP_DMA_CTRL (0x4) +#define RNP_VEB_BYPASS_EN BIT(4) +#define RNP_DMA_MEM_CFG_LE (0 << 5) +#define TSNR10_DMA_MEM_CFG_BE (1 << 5) +#define RNP_DMA_SCATTER_MEM_SHIFT (16) + +#define RNP_FIRMWARE_SYNC (0xc) +#define RNP_FIRMWARE_SYNC_MASK GENMASK(31, 16) +#define RNP_FIRMWARE_SYNC_MAGIC (0xa5a40000) +#define RNP_DRIVER_REMOVE (0x5a000000) +/* 1BIT <-> 16 bytes Dma Addr Size*/ +#define RNP_DMA_SCATTER_MEM_MASK GENMASK(31, 16) +#define RNP_DMA_TX_MAP_MODE_SHIFT (12) +#define RNP_DMA_TX_MAP_MODE_MASK GENMASK(15, 12) +#define RNP_DMA_RX_MEM_PAD_EN BIT(8) +/* === queue register ===== */ +/* enable */ +#define RNP_DMA_RXQ_START(qid) _RING_(0x0010 + 0x100 * (qid)) +#define RNP_DMA_RXQ_READY(qid) _RING_(0x0014 + 0x100 * (qid)) +#define RNP_DMA_TXQ_START(qid) _RING_(0x0018 + 0x100 * (qid)) +#define RNP_DMA_TXQ_READY(qid) _RING_(0x001c + 0x100 * (qid)) + +#define RNP_DMA_INT_STAT(qid) _RING_(0x0020 + 0x100 * (qid)) +#define RNP_DMA_INT_MASK(qid) _RING_(0x0024 + 0x100 * (qid)) +#define RNP_TX_INT_MASK BIT(1) +#define RNP_RX_INT_MASK BIT(0) +#define RNP_DMA_INT_CLER(qid) _RING_(0x0028 + 0x100 * (qid)) + +/* rx-queue */ +#define RNP_DMA_RXQ_BASE_ADDR_HI(qid) _RING_(0x0030 + 0x100 * (qid)) +#define RNP_DMA_RXQ_BASE_ADDR_LO(qid) _RING_(0x0034 + 0x100 * (qid)) +#define RNP_DMA_RXQ_LEN(qid) _RING_(0x0038 + 0x100 * (qid)) +#define RNP_DMA_RXQ_HEAD(qid) _RING_(0x003c + 0x100 * (qid)) +#define RNP_DMA_RXQ_TAIL(qid) _RING_(0x0040 + 0x100 * (qid)) +#define RNP_DMA_RXQ_DESC_FETCH_CTRL(qid) _RING_(0x0044 + 0x100 * (qid)) +#define RNP_DMA_RXQ_INT_DELAY_TIMER(qid) _RING_(0x0048 + 0x100 * (qid)) +#define RNP_DMA_RXQ_INT_DELAY_PKTCNT(qidx) _RING_(0x004c + 0x100 * (qid)) +#define RNP_DMA_RXQ_RX_PRI_LVL(qid) _RING_(0x0050 + 0x100 * (qid)) +#define RNP_DMA_RXQ_DROP_TIMEOUT_TH(qid) _RING_(0x0054 + 0x100 * (qid)) +/* tx-queue */ +#define RNP_DMA_TXQ_BASE_ADDR_HI(qid) _RING_(0x0060 + 0x100 * (qid)) +#define RNP_DMA_TXQ_BASE_ADDR_LO(qid) _RING_(0x0064 + 0x100 * (qid)) +#define RNP_DMA_TXQ_LEN(qid) _RING_(0x0068 + 0x100 * (qid)) +#define RNP_DMA_TXQ_HEAD(qid) _RING_(0x006c + 0x100 * (qid)) +#define RNP_DMA_TXQ_TAIL(qid) _RING_(0x0070 + 0x100 * (qid)) +#define RNP_DMA_TXQ_DESC_FETCH_CTRL(qid) _RING_(0x0074 + 0x100 * (qid)) +#define RNP_DMA_TXQ_INT_DELAY_TIMER(qid) _RING_(0x0078 + 0x100 * (qid)) +#define RNP_DMA_TXQ_INT_DELAY_PKTCNT(qid) _RING_(0x007c + 0x100 * (qid)) + +#define RNP_DMA_TXQ_PRI_LVL(qid) _RING_(0x0080 + 0x100 * (qid)) +#define RNP_DMA_TXQ_RATE_CTRL_TH(qid) _RING_(0x0084 + 0x100 * (qid)) +#define RNP_DMA_TXQ_RATE_CTRL_TM(qid) _RING_(0x0088 + 0x100 * (qid)) + +/* VEB Table Register */ +#define RNP_VBE_MAC_LO(port, nr) _RING_(0x00a0 + (4 * (port)) + \ + (0x100 * (nr))) +#define RNP_VBE_MAC_HI(port, nr) _RING_(0x00b0 + (4 * (port)) + \ + (0x100 * (nr))) +#define RNP_VEB_VID_CFG(port, nr) _RING_(0x00c0 + (4 * (port)) + \ + (0x100 * (nr))) +#define RNP_VEB_VF_RING(port, nr) _RING_(0x00d0 + (4 * (port)) + \ + (0x100 * (nr))) +#define RNP_MAX_VEB_TB (64) +#define RNP_VEB_RING_CFG_OFFSET (8) +#define RNP_VEB_SWITCH_VF_EN BIT(7) +#define MAX_VEB_TABLES_NUM (4) +#endif /* RNP_DMA_REGS_H_ */ diff --git a/drivers/net/rnp/base/rnp_eth_regs.h b/drivers/net/rnp/base/rnp_eth_regs.h new file mode 100644 index 0000000000..88e8e1e552 --- /dev/null +++ b/drivers/net/rnp/base/rnp_eth_regs.h @@ -0,0 +1,124 @@ +#ifndef _RNP_ETH_REGS_H_ +#define _RNP_ETH_REGS_H_ + +#include "rnp_osdep.h" + +/* PTP 1588 TM Offload */ +#define RNP_ETH_PTP_TX_STATUS(n) _ETH_(0x0400 + ((n) * 0x14)) +#define RNP_ETH_PTP_TX_HTIMES(n) _ETH_(0x0404 + ((n) * 0x14)) +#define RNP_ETH_PTP_TX_LTIMES(n) _ETH_(0x0408 + ((n) * 0x14)) +#define RNP_ETH_PTP_TX_TS_ST(n) _ETH_(0x040c + ((n) * 0x14)) +#define RNP_ETH_PTP_TX_CLEAR(n) _ETH_(0x0410 + ((n) * 0x14)) + +#define RNP_ETH_ENGINE_BYPASS _ETH_(0x8000) +#define RNP_EN_TUNNEL_VXLAN_PARSE _ETH_(0x8004) +#define RNP_ETH_MAC_LOOPBACK _ETH_(0x8008) +#define RNP_ETH_FIFO_CTRL _ETH_(0x800c) +#define RNP_ETH_FOUR_FIFO BIT(0) +#define RNP_ETH_TWO_FIFO BIT(1) +#define RNP_ETH_ONE_FIFO BIT(2) +#define RNP_FIFO_CFG_EN (0x1221) +#define RNP_ETH_VXLAN_PORT_CTRL _ETH_(0x8010) +#define RNP_ETH_VXLAN_DEF_PORT (4789) +#define RNP_HOST_FILTER_EN _ETH_(0x801c) +#define RNP_HW_SCTP_CKSUM_CTRL _ETH_(0x8038) +#define RNP_HW_CHECK_ERR_CTRL _ETH_(0x8060) +#define RNP_HW_ERR_HDR_LEN BIT(0) +#define RNP_HW_ERR_PKTLEN BIT(1) +#define RNP_HW_L3_CKSUM_ERR BIT(2) +#define RNP_HW_L4_CKSUM_ERR BIT(3) +#define RNP_HW_SCTP_CKSUM_ERR BIT(4) +#define RNP_HW_INNER_L3_CKSUM_ERR BIT(5) +#define RNP_HW_INNER_L4_CKSUM_ERR BIT(6) +#define RNP_HW_CKSUM_ERR_MASK GENMASK(6, 2) +#define RNP_HW_CHECK_ERR_MASK GENMASK(6, 0) +#define RNP_HW_ERR_RX_ALL_MASK GENMASK(1, 0) + +#define RNP_REDIR_CTRL _ETH_(0x8030) +#define RNP_VLAN_Q_STRIP_CTRL(n) _ETH_(0x8040 + 0x4 * ((n) / 32)) +/* This Just VLAN Master Switch */ +#define RNP_VLAN_TUNNEL_STRIP_EN _ETH_(0x8050) +#define RNP_VLAN_TUNNEL_STRIP_MODE _ETH_(0x8054) +#define RNP_VLAN_TUNNEL_STRIP_OUTER (0) +#define RNP_VLAN_TUNNEL_STRIP_INNER (1) +#define RNP_RSS_INNER_CTRL _ETH_(0x805c) +#define RNP_INNER_RSS_EN (1) + +#define RNP_ETH_DEFAULT_RX_RING _ETH_(0x806c) +#define RNP_RX_FC_HI_WATER(n) _ETH_(0x80c0 + ((n) * 0x8)) +#define RNP_RX_FC_LO_WATER(n) _ETH_(0x80c4 + ((n) * 0x8)) + +#define RNP_RX_FIFO_FULL_THRETH(n) _ETH_(0x8070 + ((n) * 0x8)) +#define RNP_RX_WORKAROUND_VAL _ETH_(0x7ff) +#define RNP_RX_DEFAULT_VAL _ETH_(0x270) + +#define RNP_MIN_FRAME_CTRL _ETH_(0x80f0) +#define RNP_MAX_FRAME_CTRL _ETH_(0x80f4) + +#define RNP_RX_FC_ENABLE _ETH_(0x8520) +#define RNP_RING_FC_EN(n) _ETH_(0x8524 + 0x4 * ((n) / 32)) +#define RNP_RING_FC_THRESH(n) _ETH_(0x8a00 + 0x4 * (n)) + +/* Mac Host Filter */ +#define RNP_MAC_FCTRL _ETH_(0x9110) +#define RNP_MAC_FCTRL_MPE BIT(8) /* Multicast Promiscuous En */ +#define RNP_MAC_FCTRL_UPE BIT(9) /* Unicast Promiscuous En */ +#define RNP_MAC_FCTRL_BAM BIT(10) /* Broadcast Accept Mode */ +#define RNP_MAC_FCTRL_BYPASS (RNP_MAC_FCTRL_MPE | \ + RNP_MAC_FCTRL_UPE | \ + RNP_MAC_FCTRL_BAM) +/* MC UC Mac Hash Filter Ctrl */ +#define RNP_MAC_MCSTCTRL _ETH_(0x9114) +#define RNP_MAC_HASH_MASK GENMASK(11, 0) +#define RNP_MAC_MULTICASE_TBL_EN BIT(2) +#define RNP_MAC_UNICASE_TBL_EN BIT(3) +#define RNP_UC_HASH_TB(n) _ETH_(0xA800 + ((n) * 0x4)) +#define RNP_MC_HASH_TB(n) _ETH_(0xAC00 + ((n) * 0x4)) + +#define RNP_VLAN_FILTER_CTRL _ETH_(0x9118) +#define RNP_L2TYPE_FILTER_CTRL (RNP_VLAN_FILTER_CTRL) +#define RNP_L2TYPE_FILTER_EN BIT(31) +#define RNP_VLAN_FILTER_EN BIT(30) + +#define RNP_FC_PAUSE_FWD_ACT _ETH_(0x9280) +#define RNP_FC_PAUSE_DROP BIT(31) +#define RNP_FC_PAUSE_PASS (0) +#define RNP_FC_PAUSE_TYPE _ETH_(0x9284) +#define RNP_FC_PAUSE_POLICY_EN BIT(31) +#define RNP_PAUSE_TYPE _ETH_(0x8808) + +#define RNP_INPUT_USE_CTRL _ETH_(0x91d0) +#define RNP_INPUT_VALID_MASK (0xf) +#define RNP_INPUT_POLICY(n) _ETH_(0x91e0 + ((n) * 0x4)) +/* RSS */ +#define RNP_RSS_MRQC_ADDR _ETH_(0x92a0) +#define RNP_SRIOV_CTRL RNP_RSS_MRQC_ADDR +#define RNP_SRIOV_ENABLE BIT(3) + +#define RNP_RSS_REDIR_TB(mac, idx) _ETH_(0xe000 + \ + ((mac) * 0x200) + ((idx) * 0x4)) +#define RNP_RSS_KEY_TABLE(idx) _ETH_(0x92d0 + ((idx) * 0x4)) +/*======================================================================= + *HOST_MAC_ADDRESS_FILTER + *======================================================================= + */ +#define RNP_RAL_BASE_ADDR(vf_id) _ETH_(0xA000 + 0x04 * (vf_id)) +#define RNP_RAH_BASE_ADDR(vf_id) _ETH_(0xA400 + 0x04 * (vf_id)) +#define RNP_MAC_FILTER_EN BIT(31) + +/* ETH Statistic */ +#define RNP_ETH_RXTRANS_DROP(p_id) _ETH_((0x8904) + ((p_id) * (0x40))) +#define RNP_ETH_RXTRANS_CAT_ERR(p_id) _ETH_((0x8928) + ((p_id) * (0x40))) +#define RNP_ETH_TXTM_DROP _ETH_(0X0470) + +#define RNP_VFTA_BASE_ADDR _ETH_(0xB000) +#define RNP_VFTA_HASH_TABLE(id) (RNP_VFTA_BASE_ADDR + 0x4 * (id)) +#define RNP_ETYPE_BASE_ADDR _ETH_(0xB300) +#define RNP_MPSAR_BASE_ADDR(vf_id) _ETH_(0xB400 + 0x04 * (vf_id)) +#define RNP_PFVLVF_BASE_ADDR _ETH_(0xB600) +#define RNP_PFVLVFB_BASE_ADDR _ETH_(0xB700) +#define RNP_TUNNEL_PFVLVF_BASE_ADDR _ETH_(0xB800) +#define RNP_TUNNEL_PFVLVFB_BASE_ADDR _ETH_(0xB900) + +#define RNP_TC_PORT_MAP_TB(port) _ETH_(0xe840 + 0x04 * (port)) +#endif /* RNP_ETH_REGS_H_ */ diff --git a/drivers/net/rnp/base/rnp_hw.h b/drivers/net/rnp/base/rnp_hw.h index d80d23f4b4..1db966cf21 100644 --- a/drivers/net/rnp/base/rnp_hw.h +++ b/drivers/net/rnp/base/rnp_hw.h @@ -4,16 +4,126 @@ #ifndef __RNP_HW_H__ #define __RNP_HW_H__ +#include +#include + +#include "rnp_osdep.h" + +static inline unsigned int rnp_rd_reg(volatile void *addr) +{ + unsigned int v = rte_read32(addr); + + return v; +} + +static inline void rnp_wr_reg(volatile void *reg, int val) +{ + rte_write32_relaxed((val), (reg)); +} + +#define mbx_rd32(_hw, _off) \ + rnp_rd_reg((uint8_t *)((_hw)->iobar4) + (_off)) +#define mbx_wr32(_hw, _off, _val) \ + rnp_wr_reg((uint8_t *)((_hw)->iobar4) + (_off), (_val)) +#define rnp_io_rd(_base, _off) \ + rnp_rd_reg((uint8_t *)(_base) + (_off)) +#define rnp_io_wr(_base, _off, _val) \ + rnp_wr_reg((uint8_t *)(_base) + (_off), (_val)) + +struct rnp_hw; +/* Mbx Operate info */ +enum MBX_ID { + MBX_PF = 0, + MBX_VF, + MBX_CM3CPU, + MBX_FW = MBX_CM3CPU, + MBX_VFCNT +}; +struct rnp_mbx_api { + void (*init_mbx)(struct rnp_hw *hw); + int32_t (*read)(struct rnp_hw *hw, + uint32_t *msg, + uint16_t size, + enum MBX_ID); + int32_t (*write)(struct rnp_hw *hw, + uint32_t *msg, + uint16_t size, + enum MBX_ID); + int32_t (*read_posted)(struct rte_eth_dev *dev, + uint32_t *msg, + uint16_t size, + enum MBX_ID); + int32_t (*write_posted)(struct rte_eth_dev *dev, + uint32_t *msg, + uint16_t size, + enum MBX_ID); + int32_t (*check_for_msg)(struct rnp_hw *hw, enum MBX_ID); + int32_t (*check_for_ack)(struct rnp_hw *hw, enum MBX_ID); + int32_t (*check_for_rst)(struct rnp_hw *hw, enum MBX_ID); +}; + +struct rnp_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct rnp_mbx_info { + struct rnp_mbx_api ops; + uint32_t usec_delay; /* retry interval delay time */ + uint32_t timeout; /* retry ops timeout limit */ + uint16_t size; /* data buffer size*/ + uint16_t vf_num; /* Virtual Function num */ + uint16_t pf_num; /* Physical Function num */ + uint16_t sriov_st; /* Sriov state */ + bool irq_enabled; + union { + struct { + unsigned short pf_req; + unsigned short pf_ack; + }; + struct { + unsigned short cpu_req; + unsigned short cpu_ack; + }; + }; + unsigned short vf_req[64]; + unsigned short vf_ack[64]; + + struct rnp_mbx_stats stats; + + rte_atomic16_t state; +} __rte_cache_aligned; + struct rnp_eth_adapter; +#define RNP_MAX_HW_PORT_PERR_PF (4) struct rnp_hw { struct rnp_eth_adapter *back; void *iobar0; uint32_t iobar0_len; void *iobar4; uint32_t iobar4_len; + void *link_sync; + void *dma_base; + void *eth_base; + void *veb_base; + void *mac_base[RNP_MAX_HW_PORT_PERR_PF]; + void *msix_base; + /* === dma == */ + void *dma_axi_en; + void *dma_axi_st; uint16_t device_id; uint16_t vendor_id; -} __rte_cache_aligned; + uint16_t function; + uint16_t pf_vf_num; + uint16_t max_vfs; + void *cookie_pool; + char cookie_p_name[RTE_MEMZONE_NAMESIZE]; + struct rnp_mbx_info mbx; +} __rte_cache_aligned; #endif /* __RNP_H__*/ diff --git a/drivers/net/rnp/meson.build b/drivers/net/rnp/meson.build index f85d597e68..60bba486fc 100644 --- a/drivers/net/rnp/meson.build +++ b/drivers/net/rnp/meson.build @@ -8,5 +8,6 @@ endif sources = files( 'rnp_ethdev.c', + 'rnp_mbx.c', ) includes += include_directories('base') diff --git a/drivers/net/rnp/rnp.h b/drivers/net/rnp/rnp.h index c7959c64aa..086667cec1 100644 --- a/drivers/net/rnp/rnp.h +++ b/drivers/net/rnp/rnp.h @@ -3,6 +3,7 @@ */ #ifndef __RNP_H__ #define __RNP_H__ +#include #include "base/rnp_hw.h" @@ -14,14 +15,17 @@ struct rnp_eth_port { struct rnp_eth_adapter *adapt; + struct rnp_hw *hw; struct rte_eth_dev *eth_dev; } __rte_cache_aligned; struct rnp_share_ops { + const struct rnp_mbx_api *mbx_api; } __rte_cache_aligned; struct rnp_eth_adapter { struct rnp_hw hw; + uint16_t max_vfs; struct rte_pci_device *pdev; struct rte_eth_dev *eth_dev; /* master eth_dev */ struct rnp_eth_port *ports[RNP_MAX_PORT_OF_PF]; @@ -34,5 +38,36 @@ struct rnp_eth_adapter { (((struct rnp_eth_port *)((eth_dev)->data->dev_private))) #define RNP_DEV_TO_ADAPTER(eth_dev) \ ((struct rnp_eth_adapter *)(RNP_DEV_TO_PORT(eth_dev)->adapt)) +#define RNP_DEV_TO_HW(eth_dev) \ + (&((struct rnp_eth_adapter *)(RNP_DEV_TO_PORT((eth_dev))->adapt))->hw) +#define RNP_DEV_PP_PRIV_TO_MBX_OPS(dev) \ + (((struct rnp_share_ops *)(dev)->process_private)->mbx_api) +#define RNP_DEV_TO_MBX_OPS(dev) RNP_DEV_PP_PRIV_TO_MBX_OPS(dev) +static inline void rnp_reg_offset_init(struct rnp_hw *hw) +{ + uint16_t i; + + if (hw->device_id == RNP_DEV_ID_N10G && hw->mbx.pf_num) { + hw->iobar4 = (void *)((uint8_t *)hw->iobar4 + 0x100000); + hw->msix_base = (void *)((uint8_t *)hw->iobar4 + 0xa4000); + hw->msix_base = (void *)((uint8_t *)hw->msix_base + 0x200); + } else { + hw->msix_base = (void *)((uint8_t *)hw->iobar4 + 0xa4000); + } + /* === dma status/config====== */ + hw->link_sync = (void *)((uint8_t *)hw->iobar4 + 0x000c); + hw->dma_axi_en = (void *)((uint8_t *)hw->iobar4 + 0x0010); + hw->dma_axi_st = (void *)((uint8_t *)hw->iobar4 + 0x0014); + + if (hw->mbx.pf_num) + hw->msix_base = (void *)((uint8_t *)0x200); + /* === queue registers === */ + hw->dma_base = (void *)((uint8_t *)hw->iobar4 + 0x08000); + hw->veb_base = (void *)((uint8_t *)hw->iobar4 + 0x0); + hw->eth_base = (void *)((uint8_t *)hw->iobar4 + 0x10000); + /* mac */ + for (i = 0; i < RNP_MAX_HW_PORT_PERR_PF; i++) + hw->mac_base[i] = (void *)((uint8_t *)hw->iobar4 + 0x60000 + 0x10000 * i); +} #endif /* __RNP_H__ */ diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c index 357375ee39..8a6635951b 100644 --- a/drivers/net/rnp/rnp_ethdev.c +++ b/drivers/net/rnp/rnp_ethdev.c @@ -8,6 +8,7 @@ #include #include "rnp.h" +#include "rnp_mbx.h" #include "rnp_logs.h" static int @@ -89,6 +90,58 @@ rnp_alloc_eth_port(struct rte_pci_device *master_pci, char *name) return NULL; } +static void rnp_get_nic_attr(struct rnp_eth_adapter *adapter) +{ + RTE_SET_USED(adapter); +} + +static int +rnp_process_resource_init(struct rte_eth_dev *eth_dev) +{ + struct rnp_share_ops *share_priv; + + /* allocate process_private memory this must can't + * belone to the dpdk mem resource manager + * such as from rte_malloc or rte_dma_zone.. + */ + /* use the process_prive point to resolve secondary process + * use point-func. This point is per process will be safe to cover. + * This will cause secondary process core-dump because of IPC + * Secondary will call primary process point func virt-address + * secondary process don't alloc user/pmd to alloc or free + * the memory of dpdk-mem resource it will cause hugepage + * mem exception + * be careful for secondary Process to use the share-mem of + * point correlation + */ + share_priv = calloc(1, sizeof(*share_priv)); + if (!share_priv) { + PMD_DRV_LOG(ERR, "calloc share_priv failed"); + return -ENOMEM; + } + memset(share_priv, 0, sizeof(*share_priv)); + eth_dev->process_private = share_priv; + + return 0; +} + +static void +rnp_common_ops_init(struct rnp_eth_adapter *adapter) +{ + struct rnp_share_ops *share_priv; + + share_priv = adapter->share_priv; + share_priv->mbx_api = &rnp_mbx_pf_ops; +} + +static int +rnp_special_ops_init(struct rte_eth_dev *eth_dev) +{ + RTE_SET_USED(eth_dev); + + return 0; +} + static int rnp_eth_dev_init(struct rte_eth_dev *dev) { @@ -124,6 +177,20 @@ rnp_eth_dev_init(struct rte_eth_dev *dev) hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; hw->device_id = pci_dev->id.device_id; + adapter->max_vfs = pci_dev->max_vfs; + ret = rnp_process_resource_init(dev); + if (ret) { + PMD_DRV_LOG(ERR, "share prive resource init failed"); + return ret; + } + adapter->share_priv = dev->process_private; + rnp_common_ops_init(adapter); + rnp_get_nic_attr(adapter); + /* We need Use Device Id To Change The Resource Mode */ + rnp_special_ops_init(dev); + port->adapt = adapter; + port->hw = hw; + rnp_init_mbx_ops_pf(hw); for (p_id = 0; p_id < adapter->num_ports; p_id++) { /* port 0 resource has been alloced When Probe */ if (!p_id) { @@ -158,11 +225,10 @@ rnp_eth_dev_init(struct rte_eth_dev *dev) continue; if (port->eth_dev) { rnp_dev_close(port->eth_dev); - rte_eth_dev_release_port(port->eth_dev); if (port->eth_dev->process_private) free(port->eth_dev->process_private); + rte_eth_dev_release_port(port->eth_dev); } - rte_free(port); } rte_free(adapter); diff --git a/drivers/net/rnp/rnp_logs.h b/drivers/net/rnp/rnp_logs.h index 1b3ee33745..f1648aabb5 100644 --- a/drivers/net/rnp/rnp_logs.h +++ b/drivers/net/rnp/rnp_logs.h @@ -13,6 +13,15 @@ extern int rnp_drv_logtype; #define RNP_PMD_DRV_LOG(level, fmt, args...) \ rte_log(RTE_LOG_##level, rnp_drv_logtype, \ "%s() " fmt, __func__, ##args) +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, rnp_drv_logtype, "%s(): " fmt, \ + __func__, ## args) +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#define RNP_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_##level, rnp_drv_logtype, \ + "rnp_net: (%d) " fmt, __LINE__, ##args) #ifdef RTE_LIBRTE_RNP_DEBUG_RX extern int rnp_rx_logtype; #define RNP_PMD_RX_LOG(level, fmt, args...) \ diff --git a/drivers/net/rnp/rnp_mbx.c b/drivers/net/rnp/rnp_mbx.c new file mode 100644 index 0000000000..29aedc554b --- /dev/null +++ b/drivers/net/rnp/rnp_mbx.c @@ -0,0 +1,522 @@ +#include +#include + +#include "rnp.h" +#include "rnp_hw.h" +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" +#include "rnp_logs.h" + +#define RNP_MAX_VF_FUNCTIONS (64) +/* == VEC == */ +#define VF2PF_MBOX_VEC(VF) (0xa5100 + 4 * (VF)) +#define CPU2PF_MBOX_VEC (0xa5300) + +/* == PF <--> VF mailbox ==== */ +#define SHARE_MEM_BYTES (64) /* 64bytes */ +/* for PF1 rtl will remap 6000 to 0xb000 */ +#define PF_VF_SHM(vf) ((0xa6000) + (64 * (vf))) +#define PF2VF_COUNTER(vf) (PF_VF_SHM(vf) + 0) +#define VF2PF_COUNTER(vf) (PF_VF_SHM(vf) + 4) +#define PF_VF_SHM_DATA(vf) (PF_VF_SHM(vf) + 8) +#define PF2VF_MBOX_CTRL(vf) ((0xa7100) + (4 * (vf))) +#define PF_VF_MBOX_MASK_LO ((0xa7200)) +#define PF_VF_MBOX_MASK_HI ((0xa7300)) + +/* === CPU <--> PF === */ +#define CPU_PF_SHM (0xaa000) +#define CPU2PF_COUNTER (CPU_PF_SHM + 0) +#define PF2CPU_COUNTER (CPU_PF_SHM + 4) +#define CPU_PF_SHM_DATA (CPU_PF_SHM + 8) +#define PF2CPU_MBOX_CTRL (0xaa100) +#define CPU_PF_MBOX_MASK (0xaa300) + +/* === CPU <--> VF === */ +#define CPU_VF_SHM(vf) (0xa8000 + (64 * (vf))) +#define CPU2VF_COUNTER(vf) (CPU_VF_SHM(vf) + 0) +#define VF2CPU_COUNTER(vf) (CPU_VF_SHM(vf) + 4) +#define CPU_VF_SHM_DATA(vf) (CPU_VF_SHM(vf) + 8) +#define VF2CPU_MBOX_CTRL(vf) (0xa9000 + 64 * (vf)) +#define CPU_VF_MBOX_MASK_LO(vf) (0xa9200 + 64 * (vf)) +#define CPU_VF_MBOX_MASK_HI(vf) (0xa9300 + 64 * (vf)) + +#define MBOX_CTRL_REQ (1 << 0) /* WO */ +/* VF:WR, PF:RO */ +#define MBOX_CTRL_PF_HOLD_SHM (1 << 3) /* VF:RO, PF:WR */ + +#define MBOX_IRQ_EN (0) +#define MBOX_IRQ_DISABLE (1) + +/****************************PF MBX OPS************************************/ +static inline u16 rnp_mbx_get_req(struct rnp_hw *hw, int reg) +{ + rte_mb(); + return mbx_rd32(hw, reg) & 0xffff; +} + +static inline u16 rnp_mbx_get_ack(struct rnp_hw *hw, int reg) +{ + rte_mb(); + return (mbx_rd32(hw, reg) >> 16) & 0xffff; +} + +static inline void rnp_mbx_inc_pf_req(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + int reg = (mbx_id == MBX_CM3CPU) ? + PF2CPU_COUNTER : PF2VF_COUNTER(mbx_id); + u32 v = mbx_rd32(hw, reg); + u16 req; + + req = (v & 0xffff); + req++; + v &= ~(0x0000ffff); + v |= req; + + rte_mb(); + mbx_wr32(hw, reg, v); + + /* update stats */ + /* hw->mbx.stats.msgs_tx++; */ +} + +static inline void rnp_mbx_inc_pf_ack(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + int reg = (mbx_id == MBX_CM3CPU) ? + PF2CPU_COUNTER : PF2VF_COUNTER(mbx_id); + u32 v = mbx_rd32(hw, reg); + u16 ack; + + ack = (v >> 16) & 0xffff; + ack++; + v &= ~(0xffff0000); + v |= (ack << 16); + + rte_mb(); + mbx_wr32(hw, reg, v); + + /* update stats */ + /* hw->mbx.stats.msgs_rx++; */ +} + +/** + * rnp_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static int32_t rnp_poll_for_msg(struct rte_eth_dev *dev, enum MBX_ID mbx_id) +{ + const struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev); + struct rnp_hw *hw = RNP_DEV_TO_HW(dev); + struct rnp_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !ops->check_for_msg) + goto out; + + while (countdown && ops->check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + rte_delay_us_block(mbx->usec_delay); + } + +out: + return countdown ? 0 : -ETIMEDOUT; +} + +/** + * rnp_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgment + **/ +static int32_t rnp_poll_for_ack(struct rte_eth_dev *dev, enum MBX_ID mbx_id) +{ + const struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev); + struct rnp_hw *hw = RNP_DEV_TO_HW(dev); + struct rnp_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !ops->check_for_ack) + goto out; + + while (countdown && ops->check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + rte_delay_us_block(mbx->usec_delay); + } + +out: + return countdown ? 0 : -ETIMEDOUT; +} + +/** + * rnp_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static int32_t +rnp_read_posted_mbx_pf(struct rte_eth_dev *dev, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + const struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev); + struct rnp_hw *hw = RNP_DEV_TO_HW(dev); + struct rnp_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + int32_t ret_val = -ETIMEDOUT; + + if (!ops->read || !countdown) + return -EOPNOTSUPP; + + ret_val = rnp_poll_for_msg(dev, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + return ops->read(hw, msg, size, mbx_id); + return ret_val; +} + +/** + * rnp_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static int32_t +rnp_write_posted_mbx_pf(struct rte_eth_dev *dev, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + const struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev); + struct rnp_hw *hw = RNP_DEV_TO_HW(dev); + struct rnp_mbx_info *mbx = &hw->mbx; + int32_t ret_val = -ETIMEDOUT; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!ops->write || !mbx->timeout) + goto out; + + /* send msg and hold buffer lock */ + if (ops->write) + ret_val = ops->write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = rnp_poll_for_ack(dev, mbx_id); +out: + return ret_val; +} + +/** + * rnp_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int32_t rnp_check_for_msg_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + int32_t ret_val = -ETIMEDOUT; + + if (mbx_id == MBX_CM3CPU) { + if (rnp_mbx_get_req(hw, CPU2PF_COUNTER) != hw->mbx.cpu_req) { + ret_val = 0; + /* hw->mbx.stats.reqs++; */ + } + } else { + if (rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx_id)) != + hw->mbx.vf_req[mbx_id]) { + ret_val = 0; + /* hw->mbx.stats.reqs++; */ + } + } + + return ret_val; +} + +/** + * rnp_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static int32_t rnp_check_for_ack_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + int32_t ret_val = -ETIMEDOUT; + + if (mbx_id == MBX_CM3CPU) { + if (rnp_mbx_get_ack(hw, CPU2PF_COUNTER) != hw->mbx.cpu_ack) { + ret_val = 0; + /* hw->mbx.stats.acks++; */ + } + } else { + if (rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx_id)) != hw->mbx.vf_ack[mbx_id]) { + ret_val = 0; + /* hw->mbx.stats.acks++; */ + } + } + + return ret_val; +} + +/** + * rnp_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @mbx_id: the VF index or CPU + * + * return SUCCESS if we obtained the mailbox lock + **/ +static int32_t rnp_obtain_mbx_lock_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + int32_t ret_val = -ETIMEDOUT; + int try_cnt = 5000; /* 500ms */ + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? + PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id); + + while (try_cnt-- > 0) { + /* Take ownership of the buffer */ + mbx_wr32(hw, CTRL_REG, MBOX_CTRL_PF_HOLD_SHM); + + /* reserve mailbox for cm3 use */ + if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_PF_HOLD_SHM) + return 0; + rte_delay_us_block(100); + } + + RNP_PMD_LOG(WARNING, "%s: failed to get:%d lock\n", + __func__, mbx_id); + return ret_val; +} + +/** + * rnp_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static int32_t rnp_write_mbx_pf(struct rnp_hw *hw, u32 *msg, + u16 size, enum MBX_ID mbx_id) +{ + u32 DATA_REG = (mbx_id == MBX_CM3CPU) ? + CPU_PF_SHM_DATA : PF_VF_SHM_DATA(mbx_id); + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? + PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id); + int32_t ret_val = 0; + u32 stat __rte_unused; + u16 i; + + if (size > RNP_VFMAILBOX_SIZE) { + RNP_PMD_LOG(ERR, "%s: size:%d should <%d\n", __func__, + size, RNP_VFMAILBOX_SIZE); + return -EINVAL; + } + + /* lock the mailbox to prevent pf/vf/cpu race condition */ + ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id); + if (ret_val) { + RNP_PMD_LOG(WARNING, "PF[%d] Can't Get Mbx-Lock Try Again\n", + hw->function); + return ret_val; + } + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) { +#ifdef MBX_WR_DEBUG + mbx_pwr32(hw, DATA_REG + i * 4, msg[i]); +#else + mbx_wr32(hw, DATA_REG + i * 4, msg[i]); +#endif + } + + /* flush msg and acks as we are overwriting the message buffer */ + if (mbx_id == MBX_CM3CPU) + hw->mbx.cpu_ack = rnp_mbx_get_ack(hw, CPU2PF_COUNTER); + else + hw->mbx.vf_ack[mbx_id] = rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx_id)); + + rnp_mbx_inc_pf_req(hw, mbx_id); + rte_mb(); + + rte_delay_us(300); + + /* Interrupt VF/CM3 to tell it a message + * has been sent and release buffer + */ + mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ); + + return 0; +} + +/** + * rnp_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF/CPU request so no polling for message is needed. + **/ +static int32_t rnp_read_mbx_pf(struct rnp_hw *hw, u32 *msg, + u16 size, enum MBX_ID mbx_id) +{ + u32 BUF_REG = (mbx_id == MBX_CM3CPU) ? + CPU_PF_SHM_DATA : PF_VF_SHM_DATA(mbx_id); + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? + PF2CPU_MBOX_CTRL : PF2VF_MBOX_CTRL(mbx_id); + int32_t ret_val = -EIO; + u32 stat __rte_unused, i; + if (size > RNP_VFMAILBOX_SIZE) { + RNP_PMD_LOG(ERR, "%s: size:%d should <%d\n", __func__, + size, RNP_VFMAILBOX_SIZE); + return -EINVAL; + } + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id); + if (ret_val) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) { +#ifdef MBX_RD_DEBUG + msg[i] = mbx_prd32(hw, BUF_REG + 4 * i); +#else + msg[i] = mbx_rd32(hw, BUF_REG + 4 * i); +#endif + } + mbx_wr32(hw, BUF_REG, 0); + + /* update req. used by rnpvf_check_for_msg_vf */ + if (mbx_id == MBX_CM3CPU) + hw->mbx.cpu_req = rnp_mbx_get_req(hw, CPU2PF_COUNTER); + else + hw->mbx.vf_req[mbx_id] = rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx_id)); + + /* this ack maybe too earier? */ + /* Acknowledge receipt and release mailbox, then we're done */ + rnp_mbx_inc_pf_ack(hw, mbx_id); + + rte_mb(); + + /* free ownership of the buffer */ + mbx_wr32(hw, CTRL_REG, 0); + +out_no_read: + + return ret_val; +} + +static void rnp_mbx_reset_pf(struct rnp_hw *hw) +{ + int v; + + /* reset pf->cm3 status */ + v = mbx_rd32(hw, CPU2PF_COUNTER); + hw->mbx.cpu_req = v & 0xffff; + hw->mbx.cpu_ack = (v >> 16) & 0xffff; + /* release pf->cm3 buffer lock */ + mbx_wr32(hw, PF2CPU_MBOX_CTRL, 0); + + rte_mb(); + /* enable irq to fw */ + mbx_wr32(hw, CPU_PF_MBOX_MASK, 0); +} + +static int get_pfvfnum(struct rnp_hw *hw) +{ + uint32_t addr_mask; + uint32_t offset; + uint32_t val; +#define RNP_PF_NUM_REG (0x75f000) +#define RNP_PFVF_SHIFT (4) +#define RNP_PF_SHIFT (6) +#define RNP_PF_BIT_MASK BIT(6) + addr_mask = hw->iobar0_len - 1; + offset = RNP_PF_NUM_REG & addr_mask; + val = rnp_io_rd(hw->iobar0, offset); + + return val >> RNP_PFVF_SHIFT; +} + +const struct rnp_mbx_api rnp_mbx_pf_ops = { + .read = rnp_read_mbx_pf, + .write = rnp_write_mbx_pf, + .read_posted = rnp_read_posted_mbx_pf, + .write_posted = rnp_write_posted_mbx_pf, + .check_for_msg = rnp_check_for_msg_pf, + .check_for_ack = rnp_check_for_ack_pf, +}; + +void *rnp_memzone_reserve(const char *name, unsigned int size) +{ +#define NO_FLAGS 0 + const struct rte_memzone *mz = NULL; + + if (name) { + if (size) { + mz = rte_memzone_reserve(name, size, + rte_socket_id(), NO_FLAGS); + if (mz) + memset(mz->addr, 0, size); + } else { + mz = rte_memzone_lookup(name); + } + return mz ? mz->addr : NULL; + } + return NULL; +} + +void rnp_init_mbx_ops_pf(struct rnp_hw *hw) +{ + struct rnp_eth_adapter *adapter = hw->back; + struct rnp_mbx_info *mbx = &hw->mbx; + struct mbx_req_cookie *cookie; + uint32_t vf_isolat_off; + + mbx->size = RNP_VFMAILBOX_SIZE; + mbx->usec_delay = RNP_MBX_DELAY_US; + mbx->timeout = (RNP_MBX_TIMEOUT_SECONDS * 1000 * 1000) / + mbx->usec_delay; + if (hw->device_id == RNP_DEV_ID_N10G) { + vf_isolat_off = RNP_VF_ISOLATE_CTRL & + (hw->iobar0_len - 1); + rnp_io_wr(hw->iobar0, vf_isolat_off, 0); + } + mbx->sriov_st = 0; + hw->pf_vf_num = get_pfvfnum(hw); + mbx->vf_num = UINT16_MAX; + mbx->pf_num = (hw->pf_vf_num & RNP_PF_BIT_MASK) >> RNP_PF_SHIFT; + hw->function = mbx->pf_num; + /* Retrieving and storing the HW base address of device */ + rnp_reg_offset_init(hw); + snprintf(hw->cookie_p_name, RTE_MEMZONE_NAMESIZE, "mbx_req_cookie%d_%d", + hw->function, adapter->eth_dev->data->port_id); + hw->cookie_pool = rnp_memzone_reserve(hw->cookie_p_name, + sizeof(struct mbx_req_cookie)); + + cookie = (struct mbx_req_cookie *)hw->cookie_pool; + if (cookie) { + cookie->timeout_ms = 1000; + cookie->magic = COOKIE_MAGIC; + cookie->priv_len = RNP_MAX_SHARE_MEM; + } + + rnp_mbx_reset_pf(hw); +} diff --git a/drivers/net/rnp/rnp_mbx.h b/drivers/net/rnp/rnp_mbx.h new file mode 100644 index 0000000000..87949c1726 --- /dev/null +++ b/drivers/net/rnp/rnp_mbx.h @@ -0,0 +1,139 @@ +#ifndef __TSRN10_MBX_H__ +#define __TSRN10_MBX_H__ + +#define VF_NUM_MASK_TEMP (0xff0) +#define VF_NUM_OFF (4) +#define RNP_VF_NUM (0x75f000) +#define RNP_VF_NB_MASK (0x3f) +#define RNP_PF_NB_MASK (0x40) +#define RNP_VF_ISOLATE_CTRL (0x7982fc) +#define RNP_IS_SRIOV BIT(7) +#define RNP_SRIOV_ST_SHIFT (24) +#define RNP_VF_DEFAULT_PORT (0) + +/* Mbx Ctrl state */ +#define RNP_VFMAILBOX_SIZE (14) /* 16 32 bit words - 64 bytes */ +#define TSRN10_VFMBX_SIZE (RNP_VFMAILBOX_SIZE) +#define RNP_VT_MSGTYPE_ACK (0x80000000) + +#define RNP_VT_MSGTYPE_NACK (0x40000000) +/* Messages below or'd with * this are the NACK */ +#define RNP_VT_MSGTYPE_CTS (0x20000000) +/* Indicates that VF is still + *clear to send requests + */ +#define RNP_VT_MSGINFO_SHIFT (16) + +#define RNP_VT_MSGINFO_MASK (0xFF << RNP_VT_MSGINFO_SHIFT) +/* The mailbox memory size is 64 bytes accessed by 32-bit registers */ +#define RNP_VLVF_VIEN (0x80000000) /* filter is valid */ +#define RNP_VLVF_ENTRIES (64) +#define RNP_VLVF_VLANID_MASK (0x00000FFF) +/* Every VF own 64 bytes mem for communitate accessed by 32-bit */ + +#define RNP_VF_RESET (0x01) /* VF requests reset */ +#define RNP_VF_SET_MAC_ADDR (0x02) /* VF requests PF to set MAC addr */ +#define RNP_VF_SET_MULTICAST (0x03) /* VF requests PF to set MC addr */ +#define RNP_VF_SET_VLAN (0x04) /* VF requests PF to set VLAN */ + +#define RNP_VF_SET_LPE (0x05) /* VF requests PF to set VMOLR.LPE */ +#define RNP_VF_SET_MACVLAN (0x06) /* VF requests PF for unicast filter */ +#define RNP_VF_GET_MACVLAN (0x07) /* VF requests mac */ +#define RNP_VF_API_NEGOTIATE (0x08) /* negotiate API version */ +#define RNP_VF_GET_QUEUES (0x09) /* get queue configuration */ +#define RNP_VF_GET_LINK (0x10) /* get link status */ + +#define RNP_VF_SET_VLAN_STRIP (0x0a) /* VF Requests PF to set VLAN STRIP */ +#define RNP_VF_REG_RD (0x0b) /* VF Read Reg */ +#define RNP_VF_GET_MAX_MTU (0x0c) /* VF Get Max Mtu */ +#define RNP_VF_SET_MTU (0x0d) /* VF Set Mtu */ +#define RNP_VF_GET_FW (0x0e) /* VF Get Firmware Version */ + +#define RNP_PF_VFNUM_MASK GENMASK(26, 21) + +#define RNP_PF_SET_FCS (0x10) /* PF set fcs status */ +#define RNP_PF_SET_PAUSE (0x11) /* PF set pause status */ +#define RNP_PF_SET_FT_PADDING (0x12) /* PF set ft padding status */ +#define RNP_PF_SET_VLAN_FILTER (0x13) /* PF set ntuple status */ +#define RNP_PF_SET_VLAN (0x14) +#define RNP_PF_SET_LINK (0x15) +#define RNP_PF_SET_SPEED_40G BIT(8) +#define RNP_PF_SET_SPEED_10G BIT(7) +#define RNP_PF_SET_SPEED_1G BIT(5) +#define RNP_PF_SET_SPEED_100M BIT(3) + +#define RNP_PF_SET_MTU (0x16) +#define RNP_PF_SET_RESET (0x17) +#define RNP_PF_LINK_UP BIT(31) +#define RNP_PF_SPEED_MASK GENMASK(15, 0) + +/* Define mailbox register bits */ +#define RNP_PF_REMOVE (0x0f) + +/* Mailbox API ID VF Request */ +/* length of permanent address message returned from PF */ +#define RNP_VF_PERMADDR_MSG_LEN (11) +#define RNP_VF_TX_QUEUES (1) /* number of Tx queues supported */ +#define RNP_VF_RX_QUEUES (2) /* number of Rx queues supported */ +#define RNP_VF_TRANS_VLAN (3) /* Indication of port vlan */ +#define RNP_VF_DEF_QUEUE (4) /* Default queue offset */ +/* word in permanent address message with the current multicast type */ +#define RNP_VF_VLAN_WORD (5) +#define RNP_VF_PHY_TYPE_WORD (6) +#define RNP_VF_FW_VERSION_WORD (7) +#define RNP_VF_LINK_STATUS_WORD (8) +#define RNP_VF_AXI_MHZ (9) +#define RNP_VF_RNP_VF_FEATURE (10) +#define RNP_VF_RNP_VF_FILTER_EN BIT(0) + +#define RNP_LINK_SPEED_UNKNOWN 0 +#define RNP_LINK_SPEED_10_FULL BIT(2) +#define RNP_LINK_SPEED_100_FULL BIT(3) +#define RNP_LINK_SPEED_1GB_FULL BIT(4) +#define RNP_LINK_SPEED_10GB_FULL BIT(5) +#define RNP_LINK_SPEED_40GB_FULL BIT(6) +#define RNP_LINK_SPEED_25GB_FULL BIT(7) +#define RNP_LINK_SPEED_50GB_FULL BIT(8) +#define RNP_LINK_SPEED_100GB_FULL BIT(9) +#define RNP_LINK_SPEED_10_HALF BIT(10) +#define RNP_LINK_SPEED_100_HALF BIT(11) +#define RNP_LINK_SPEED_1GB_HALF BIT(12) + +/* Mailbox API ID PF Request */ +#define RNP_VF_MC_TYPE_WORD (3) +#define RNP_VF_DMA_VERSION_WORD (4) +/* Get Queue write-back reference value */ +#define RNP_PF_CONTROL_PRING_MSG (0x0100) /* PF control message */ + +#define TSRN10_MBX_VECTOR_ID (0) +#define TSRN10_PF2VF_MBX_VEC_CTR(n) (0xa5000 + 0x4 * (n)) + +#define RNP_VF_INIT_TIMEOUT (200) /* Number of retries to clear RSTI */ +#define RNP_VF_MBX_INIT_TIMEOUT (2000) /* number of retries on mailbox */ + +#define MBOX_CTRL_REQ (1 << 0) /* WO */ +#define MBOX_CTRL_VF_HOLD_SHM (1 << 2) /* VF:WR, PF:RO */ +#define VF_NUM_MASK 0x3f +#define VFNUM(num) ((num) & VF_NUM_MASK) + +#define PF_VF_SHM(vf) \ + ((0xa6000) + (64 * (vf))) /* for PF1 rtl will remap 6000 to 0xb000 */ +#define PF2VF_COUNTER(vf) (PF_VF_SHM(vf) + 0) +#define VF2PF_COUNTER(vf) (PF_VF_SHM(vf) + 4) +#define PF_VF_SHM_DATA(vf) (PF_VF_SHM(vf) + 8) +#define VF2PF_MBOX_CTRL(vf) ((0xa7000) + (4 * (vf))) + +/* Error Codes */ +#define RNP_ERR_INVALID_MAC_ADDR (-1) +#define RNP_ERR_MBX (-100) + +#define RNP_MBX_DELAY_US (100) /* Delay us for Retry */ +/* Max Retry Time */ +#define RNP_MBX_TIMEOUT_SECONDS (2) /* Max Retry Time 2s */ +#define RNP_ARRAY_OPCODE_OFFSET (0) +#define RNP_ARRAY_CTRL_OFFSET (1) + +void rnp_init_mbx_ops_pf(struct rnp_hw *hw); +extern const struct rnp_mbx_api rnp_mbx_pf_ops; +void *rnp_memzone_reserve(const char *name, unsigned int size); +#endif diff --git a/drivers/net/rnp/rnp_mbx_fw.c b/drivers/net/rnp/rnp_mbx_fw.c new file mode 100644 index 0000000000..6fe008351b --- /dev/null +++ b/drivers/net/rnp/rnp_mbx_fw.c @@ -0,0 +1,271 @@ +#include + +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" +#include "rnp_logs.h" + +static int +rnp_fw_send_cmd_wait(struct rte_eth_dev *dev, struct mbx_fw_cmd_req *req, + struct mbx_fw_cmd_reply *reply) +{ + const struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev); + struct rnp_hw *hw = RNP_DEV_TO_HW(dev); + int err; + + rte_spinlock_lock(&hw->fw_lock); + + err = ops->write_posted(dev, (u32 *)req, + (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + if (err) { + RNP_PMD_LOG(ERR, "%s: write_posted failed! err:0x%x\n", + __func__, err); + rte_spinlock_unlock(&hw->fw_lock); + return err; + } + + err = ops->read_posted(dev, (u32 *)reply, sizeof(*reply) / 4, MBX_FW); + rte_spinlock_unlock(&hw->fw_lock); + if (err) { + RNP_PMD_LOG(ERR, + "%s: read_posted failed! err:0x%x. " + "req-op:0x%x\n", + __func__, + err, + req->opcode); + goto err_quit; + } + + if (reply->error_code) { + RNP_PMD_LOG(ERR, + "%s: reply err:0x%x. req-op:0x%x\n", + __func__, + reply->error_code, + req->opcode); + err = -reply->error_code; + goto err_quit; + } + + return 0; +err_quit: + RNP_PMD_LOG(ERR, + "%s:PF[%d]: req:%08x_%08x_%08x_%08x " + "reply:%08x_%08x_%08x_%08x\n", + __func__, + hw->function, + ((int *)req)[0], + ((int *)req)[1], + ((int *)req)[2], + ((int *)req)[3], + ((int *)reply)[0], + ((int *)reply)[1], + ((int *)reply)[2], + ((int *)reply)[3]); + + return err; +} + +static int rnp_mbx_fw_post_req(struct rte_eth_dev *dev, + struct mbx_fw_cmd_req *req, + struct mbx_req_cookie *cookie) +{ + const struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev); + struct rnp_hw *hw = RNP_DEV_TO_HW(dev); + int err = 0; + int timeout_cnt; +#define WAIT_MS 10 + + cookie->done = 0; + + rte_spinlock_lock(&hw->fw_lock); + + /* down_interruptible(&pf_cpu_lock); */ + err = ops->write(hw, (u32 *)req, + (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + if (err) { + RNP_PMD_LOG(ERR, "rnp_write_mbx failed!\n"); + goto quit; + } + + timeout_cnt = cookie->timeout_ms / WAIT_MS; + while (timeout_cnt > 0) { + rte_delay_ms(WAIT_MS); + timeout_cnt--; + if (cookie->done) + break; + } + +quit: + rte_spinlock_unlock(&hw->fw_lock); + return err; +} + +static int rnp_fw_get_capablity(struct rte_eth_dev *dev, + struct phy_abilities *abil) +{ + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_phy_abalities_req(&req, &req); + + err = rnp_fw_send_cmd_wait(dev, &req, &reply); + if (err) + return err; + + memcpy(abil, &reply.phy_abilities, sizeof(*abil)); + + return 0; +} + +#define RNP_MBX_API_MAX_RETRY (10) +int rnp_mbx_get_capability(struct rte_eth_dev *dev, + int *lane_mask, + int *nic_mode) +{ + struct rnp_hw *hw = RNP_DEV_TO_HW(dev); + struct phy_abilities ablity; + uint16_t temp_lmask; + uint16_t lane_bit = 0; + uint16_t retry = 0; + int lane_cnt = 0; + uint8_t lane_idx; + int err = -EIO; + uint8_t idx; + + memset(&ablity, 0, sizeof(ablity)); + + /* enable CM3CPU to PF MBX IRQ */ + do { + err = rnp_fw_get_capablity(dev, &ablity); + if (retry > RNP_MBX_API_MAX_RETRY) + break; + retry++; + } while (err); + if (!err) { + hw->lane_mask = ablity.lane_mask; + hw->nic_mode = ablity.nic_mode; + hw->pfvfnum = ablity.pfnum; + hw->fw_version = ablity.fw_version; + hw->axi_mhz = ablity.axi_mhz; + hw->fw_uid = ablity.fw_uid; + if (ablity.phy_type == PHY_TYPE_SGMII) { + hw->is_sgmii = 1; + hw->sgmii_phy_id = ablity.phy_id; + } + + if (ablity.ext_ablity != 0xffffffff && ablity.e.valid) { + hw->ncsi_en = (ablity.e.ncsi_en == 1); + hw->ncsi_rar_entries = 1; + } + + if (hw->nic_mode == RNP_SINGLE_10G && + hw->fw_version >= 0x00050201 && + ablity.speed == RTE_ETH_SPEED_NUM_10G) { + hw->force_speed_stat = FORCE_SPEED_STAT_DISABLED; + hw->force_10g_1g_speed_ablity = 1; + } + + if (lane_mask) + *lane_mask = hw->lane_mask; + if (nic_mode) + *nic_mode = hw->nic_mode; + + lane_cnt = __builtin_popcount(hw->lane_mask); + temp_lmask = hw->lane_mask; + for (idx = 0; idx < lane_cnt; idx++) { + hw->phy_port_ids[idx] = ablity.port_ids[idx]; + lane_bit = ffs(temp_lmask) - 1; + lane_idx = ablity.port_ids[idx] % lane_cnt; + hw->lane_of_port[lane_idx] = lane_bit; + temp_lmask &= ~BIT(lane_bit); + } + hw->max_port_num = lane_cnt; + } + + RNP_PMD_LOG(INFO, + "%s: nic-mode:%d lane_cnt:%d lane_mask:0x%x " + "pfvfnum:0x%x, fw_version:0x%08x, ports:%d-%d-%d-%d ncsi:en:%d\n", + __func__, + hw->nic_mode, + lane_cnt, + hw->lane_mask, + hw->pfvfnum, + ablity.fw_version, + ablity.port_ids[0], + ablity.port_ids[1], + ablity.port_ids[2], + ablity.port_ids[3], + hw->ncsi_en); + + if (lane_cnt <= 0 || lane_cnt > 4) + return -EIO; + + return err; +} + +int rnp_mbx_link_event_enable(struct rte_eth_dev *dev, int enable) +{ + const struct rnp_mbx_api *ops = RNP_DEV_TO_MBX_OPS(dev); + struct rnp_hw *hw = RNP_DEV_TO_HW(dev); + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err, v; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + rte_spinlock_lock(&hw->fw_lock); + if (enable) { + v = rnp_rd_reg(hw->link_sync); + v &= ~RNP_FIRMWARE_SYNC_MASK; + v |= RNP_FIRMWARE_SYNC_MAGIC; + rnp_wr_reg(hw->link_sync, v); + } else { + rnp_wr_reg(hw->link_sync, 0); + } + rte_spinlock_unlock(&hw->fw_lock); + + build_link_set_event_mask(&req, BIT(EVT_LINK_UP), + (enable & 1) << EVT_LINK_UP, &req); + + rte_spinlock_lock(&hw->fw_lock); + err = ops->write_posted(dev, (u32 *)&req, + (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + rte_spinlock_unlock(&hw->fw_lock); + + rte_delay_ms(200); + + return err; +} + +int rnp_mbx_fw_reset_phy(struct rte_eth_dev *dev) +{ + struct rnp_hw *hw = RNP_DEV_TO_HW(dev); + struct mbx_fw_cmd_reply reply; + struct mbx_req_cookie *cookie; + struct mbx_fw_cmd_req req; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (hw->mbx.irq_enabled) { + cookie = rnp_memzone_reserve(hw->cookie_p_name, 0); + if (!cookie) + return -ENOMEM; + memset(cookie->priv, 0, cookie->priv_len); + build_reset_phy_req(&req, cookie); + return rnp_mbx_fw_post_req(dev, &req, cookie); + } + build_reset_phy_req(&req, &req); + + return rnp_fw_send_cmd_wait(dev, &req, &reply); +} diff --git a/drivers/net/rnp/rnp_mbx_fw.h b/drivers/net/rnp/rnp_mbx_fw.h new file mode 100644 index 0000000000..439090b5a3 --- /dev/null +++ b/drivers/net/rnp/rnp_mbx_fw.h @@ -0,0 +1,22 @@ +#ifndef __RNP_MBX_FW_H__ +#define __RNP_MBX_FW_H__ + +struct mbx_fw_cmd_reply; +typedef void (*cookie_cb)(struct mbx_fw_cmd_reply *reply, void *priv); +#define RNP_MAX_SHARE_MEM (8 * 8) +struct mbx_req_cookie { + int magic; +#define COOKIE_MAGIC 0xCE + cookie_cb cb; + int timeout_ms; + int errcode; + + /* wait_queue_head_t wait; */ + volatile int done; + int priv_len; + char priv[RNP_MAX_SHARE_MEM]; +}; +struct mbx_fw_cmd_reply { +} __rte_cache_aligned; + +#endif /* __RNP_MBX_FW_H__*/ -- 2.27.0