DPDK patches and discussions
 help / color / mirror / Atom feed
From: Howard Wang <howard_wang@realsil.com.cn>
To: <dev@dpdk.org>
Cc: <pro_nic_dpdk@realtek.com>, Howard Wang <howard_wang@realsil.com.cn>
Subject: [PATCH v6 06/17] net/r8169: add phy registers access routines
Date: Fri, 8 Nov 2024 20:11:12 +0800	[thread overview]
Message-ID: <20241108121123.248797-7-howard_wang@realsil.com.cn> (raw)
In-Reply-To: <20241108121123.248797-1-howard_wang@realsil.com.cn>

Signed-off-by: Howard Wang <howard_wang@realsil.com.cn>
---
 drivers/net/r8169/r8169_ethdev.h |   1 +
 drivers/net/r8169/r8169_hw.c     |   2 +-
 drivers/net/r8169/r8169_phy.c    | 219 +++++++++++++++++++++++++++++++
 drivers/net/r8169/r8169_phy.h    |  18 +++
 4 files changed, 239 insertions(+), 1 deletion(-)

diff --git a/drivers/net/r8169/r8169_ethdev.h b/drivers/net/r8169/r8169_ethdev.h
index ac0b3eef47..0d4e6079e0 100644
--- a/drivers/net/r8169/r8169_ethdev.h
+++ b/drivers/net/r8169/r8169_ethdev.h
@@ -16,6 +16,7 @@ struct rtl_hw {
 	u8  *mmio_addr;
 	u32 mcfg;
 	u8  HwSuppIntMitiVer;
+	u16 cur_page;
 
 	/* Enable Tx No Close */
 	u8 EnableTxNoClose;
diff --git a/drivers/net/r8169/r8169_hw.c b/drivers/net/r8169/r8169_hw.c
index 52c387c8e7..9bf3437c6a 100644
--- a/drivers/net/r8169/r8169_hw.c
+++ b/drivers/net/r8169/r8169_hw.c
@@ -355,7 +355,7 @@ void
 rtl_disable_rxdvgate(struct rtl_hw *hw)
 {
 	switch (hw->mcfg) {
-	case CFG_METHOD_1 ... CFG_METHOD_3:
+	case CFG_METHOD_69 ... CFG_METHOD_71:
 		RTL_W8(hw, 0xF2, RTL_R8(hw, 0xF2) & ~BIT_3);
 		rte_delay_ms(2);
 	}
diff --git a/drivers/net/r8169/r8169_phy.c b/drivers/net/r8169/r8169_phy.c
index 11c28deefe..853e1d4922 100644
--- a/drivers/net/r8169/r8169_phy.c
+++ b/drivers/net/r8169/r8169_phy.c
@@ -36,3 +36,222 @@ rtl_set_mac_ocp_bit(struct rtl_hw *hw, u16 addr, u16 mask)
 {
 	rtl_clear_set_mac_ocp_bit(hw, addr, 0, mask);
 }
+
+static u16
+rtl_map_phy_ocp_addr(u16 PageNum, u8 RegNum)
+{
+	u8 ocp_reg_num = 0;
+	u16 ocp_page_num = 0;
+	u16 ocp_phy_address = 0;
+
+	if (PageNum == 0) {
+		ocp_page_num = OCP_STD_PHY_BASE_PAGE + (RegNum / 8);
+		ocp_reg_num = 0x10 + (RegNum % 8);
+	} else {
+		ocp_page_num = PageNum;
+		ocp_reg_num = RegNum;
+	}
+
+	ocp_page_num <<= 4;
+
+	if (ocp_reg_num < 16) {
+		ocp_phy_address = 0;
+	} else {
+		ocp_reg_num -= 16;
+		ocp_reg_num <<= 1;
+
+		ocp_phy_address = ocp_page_num + ocp_reg_num;
+	}
+
+	return ocp_phy_address;
+}
+
+static u32
+rtl_mdio_real_read_phy_ocp(struct rtl_hw *hw, u32 RegAddr)
+{
+	u32 data32;
+	int i, value = 0;
+
+	data32 = RegAddr / 2;
+	data32 <<= OCPR_Addr_Reg_shift;
+
+	RTL_W32(hw, PHYOCP, data32);
+	for (i = 0; i < 100; i++) {
+		rte_delay_us(1);
+
+		if (RTL_R32(hw, PHYOCP) & OCPR_Flag)
+			break;
+	}
+	value = RTL_R32(hw, PHYOCP) & OCPDR_Data_Mask;
+
+	return value;
+}
+
+u32
+rtl_mdio_direct_read_phy_ocp(struct rtl_hw *hw, u32 RegAddr)
+{
+	return rtl_mdio_real_read_phy_ocp(hw, RegAddr);
+}
+
+static u32
+rtl_mdio_read_phy_ocp(struct rtl_hw *hw, u16 PageNum, u32 RegAddr)
+{
+	u16 ocp_addr;
+
+	ocp_addr = rtl_map_phy_ocp_addr(PageNum, RegAddr);
+
+	return rtl_mdio_direct_read_phy_ocp(hw, ocp_addr);
+}
+
+static u32
+rtl_mdio_real_read(struct rtl_hw *hw, u32 RegAddr)
+{
+	return rtl_mdio_read_phy_ocp(hw, hw->cur_page, RegAddr);
+}
+
+static void
+rtl_mdio_real_write_phy_ocp(struct rtl_hw *hw, u32 RegAddr, u32 value)
+{
+	u32 data32;
+	int i;
+
+	data32 = RegAddr / 2;
+	data32 <<= OCPR_Addr_Reg_shift;
+	data32 |= OCPR_Write | value;
+
+	RTL_W32(hw, PHYOCP, data32);
+	for (i = 0; i < 100; i++) {
+		rte_delay_us(1);
+
+		if (!(RTL_R32(hw, PHYOCP) & OCPR_Flag))
+			break;
+	}
+}
+
+void
+rtl_mdio_direct_write_phy_ocp(struct rtl_hw *hw, u32 RegAddr, u32 value)
+{
+	rtl_mdio_real_write_phy_ocp(hw, RegAddr, value);
+}
+
+static void
+rtl_mdio_write_phy_ocp(struct rtl_hw *hw, u16 PageNum, u32 RegAddr, u32 value)
+{
+	u16 ocp_addr;
+
+	ocp_addr = rtl_map_phy_ocp_addr(PageNum, RegAddr);
+
+	rtl_mdio_direct_write_phy_ocp(hw, ocp_addr, value);
+}
+
+static void
+rtl_mdio_real_write(struct rtl_hw *hw, u32 RegAddr, u32 value)
+{
+	if (RegAddr == 0x1F)
+		hw->cur_page = value;
+	rtl_mdio_write_phy_ocp(hw, hw->cur_page, RegAddr, value);
+}
+
+u32
+rtl_mdio_read(struct rtl_hw *hw, u32 RegAddr)
+{
+	return rtl_mdio_real_read(hw, RegAddr);
+}
+
+void
+rtl_mdio_write(struct rtl_hw *hw, u32 RegAddr, u32 value)
+{
+	rtl_mdio_real_write(hw, RegAddr, value);
+}
+
+void
+rtl_clear_and_set_eth_phy_ocp_bit(struct rtl_hw *hw, u16 addr, u16 clearmask,
+				  u16 setmask)
+{
+	u16 phy_reg_value;
+
+	phy_reg_value = rtl_mdio_direct_read_phy_ocp(hw, addr);
+	phy_reg_value &= ~clearmask;
+	phy_reg_value |= setmask;
+	rtl_mdio_direct_write_phy_ocp(hw, addr, phy_reg_value);
+}
+
+void
+rtl_clear_eth_phy_ocp_bit(struct rtl_hw *hw, u16 addr, u16 mask)
+{
+	rtl_clear_and_set_eth_phy_ocp_bit(hw, addr, mask, 0);
+}
+
+void
+rtl_set_eth_phy_ocp_bit(struct rtl_hw *hw, u16 addr, u16 mask)
+{
+	rtl_clear_and_set_eth_phy_ocp_bit(hw, addr, 0, mask);
+}
+
+void
+rtl_ephy_write(struct rtl_hw *hw, int addr, int value)
+{
+	int i;
+
+	RTL_W32(hw, EPHYAR, EPHYAR_Write |
+		(addr & EPHYAR_Reg_Mask_v2) << EPHYAR_Reg_shift |
+		(value & EPHYAR_Data_Mask));
+
+	for (i = 0; i < 10; i++) {
+		rte_delay_us(100);
+
+		/* Check if the NIC has completed EPHY write */
+		if (!(RTL_R32(hw, EPHYAR) & EPHYAR_Flag))
+			break;
+	}
+
+	rte_delay_us(20);
+}
+
+static u16
+rtl_ephy_read(struct rtl_hw *hw, int addr)
+{
+	int i;
+	u16 value = 0xffff;
+
+	RTL_W32(hw, EPHYAR, EPHYAR_Read | (addr & EPHYAR_Reg_Mask_v2) <<
+		EPHYAR_Reg_shift);
+
+	for (i = 0; i < 10; i++) {
+		rte_delay_us(100);
+
+		/* Check if the NIC has completed EPHY read */
+		if (RTL_R32(hw, EPHYAR) & EPHYAR_Flag) {
+			value = (u16)(RTL_R32(hw, EPHYAR) & EPHYAR_Data_Mask);
+			break;
+		}
+	}
+
+	rte_delay_us(20);
+
+	return value;
+}
+
+void
+rtl_clear_and_set_pcie_phy_bit(struct rtl_hw *hw, u8 addr, u16 clearmask,
+			       u16 setmask)
+{
+	u16 ephy_value;
+
+	ephy_value = rtl_ephy_read(hw, addr);
+	ephy_value &= ~clearmask;
+	ephy_value |= setmask;
+	rtl_ephy_write(hw, addr, ephy_value);
+}
+
+void
+rtl_clear_pcie_phy_bit(struct rtl_hw *hw, u8 addr, u16 mask)
+{
+	rtl_clear_and_set_pcie_phy_bit(hw, addr, mask, 0);
+}
+
+void
+rtl_set_pcie_phy_bit(struct rtl_hw *hw, u8 addr, u16 mask)
+{
+	rtl_clear_and_set_pcie_phy_bit(hw, addr, 0, mask);
+}
diff --git a/drivers/net/r8169/r8169_phy.h b/drivers/net/r8169/r8169_phy.h
index ee2aa43fde..66c487210f 100644
--- a/drivers/net/r8169/r8169_phy.h
+++ b/drivers/net/r8169/r8169_phy.h
@@ -16,4 +16,22 @@
 void rtl_clear_mac_ocp_bit(struct rtl_hw *hw, u16 addr, u16 mask);
 void rtl_set_mac_ocp_bit(struct rtl_hw *hw, u16 addr, u16 mask);
 
+u32 rtl_mdio_direct_read_phy_ocp(struct rtl_hw *hw, u32 RegAddr);
+void rtl_mdio_direct_write_phy_ocp(struct rtl_hw *hw, u32 RegAddr, u32 value);
+
+u32 rtl_mdio_read(struct rtl_hw *hw, u32 RegAddr);
+void rtl_mdio_write(struct rtl_hw *hw, u32 RegAddr, u32 value);
+
+void rtl_clear_and_set_eth_phy_ocp_bit(struct rtl_hw *hw, u16 addr,
+				       u16 clearmask, u16 setmask);
+void rtl_clear_eth_phy_ocp_bit(struct rtl_hw *hw, u16 addr, u16 mask);
+void rtl_set_eth_phy_ocp_bit(struct rtl_hw *hw, u16 addr, u16 mask);
+
+void rtl_ephy_write(struct rtl_hw *hw, int addr, int value);
+
+void rtl_clear_and_set_pcie_phy_bit(struct rtl_hw *hw, u8 addr, u16 clearmask,
+				    u16 setmask);
+void rtl_clear_pcie_phy_bit(struct rtl_hw *hw, u8 addr, u16 mask);
+void rtl_set_pcie_phy_bit(struct rtl_hw *hw, u8 addr, u16 mask);
+
 #endif
-- 
2.34.1


  parent reply	other threads:[~2024-11-08 12:13 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-11-08 12:11 [PATCH v6 00/17] Modify code as suggested by the maintainer Howard Wang
2024-11-08 12:11 ` [PATCH v6 01/17] net/r8169: add PMD driver skeleton Howard Wang
2024-11-08 15:58   ` Stephen Hemminger
2024-11-08 12:11 ` [PATCH v6 02/17] net/r8169: add logging structure Howard Wang
2024-11-08 12:11 ` [PATCH v6 03/17] net/r8169: add hardware registers access routines Howard Wang
2024-11-08 12:11 ` [PATCH v6 04/17] net/r8169: implement core logic for Tx/Rx Howard Wang
2024-11-08 12:11 ` [PATCH v6 05/17] net/r8169: add support for hw config Howard Wang
2024-11-08 12:11 ` Howard Wang [this message]
2024-11-08 12:11 ` [PATCH v6 07/17] net/r8169: add support for hardware operations Howard Wang
2024-11-08 12:11 ` [PATCH v6 08/17] net/r8169: add support for phy configuration Howard Wang
2024-11-08 12:11 ` [PATCH v6 09/17] net/r8169: add support for hw initialization Howard Wang
2024-11-08 12:11 ` [PATCH v6 10/17] net/r8169: add link status and interrupt management Howard Wang
2024-11-08 12:11 ` [PATCH v6 11/17] net/r8169: implement Rx path Howard Wang
2024-11-08 12:11 ` [PATCH v6 12/17] net/r8169: implement Tx path Howard Wang
2024-11-08 12:11 ` [PATCH v6 13/17] net/r8169: implement device statistics Howard Wang
2024-11-08 12:11 ` [PATCH v6 14/17] net/r8169: implement promisc and allmulti modes Howard Wang
2024-11-08 12:11 ` [PATCH v6 15/17] net/r8169: implement MTU configuration Howard Wang
2024-11-08 12:11 ` [PATCH v6 16/17] net/r8169: add support for getting fw version Howard Wang
2024-11-08 12:11 ` [PATCH v6 17/17] net/r8169: add driver_start and driver_stop Howard Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241108121123.248797-7-howard_wang@realsil.com.cn \
    --to=howard_wang@realsil.com.cn \
    --cc=dev@dpdk.org \
    --cc=pro_nic_dpdk@realtek.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).