DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jiawen Wu <jiawenwu@trustnetic.com>
To: dev@dpdk.org
Cc: Jiawen Wu <jiawenwu@trustnetic.com>
Subject: [dpdk-dev] [PATCH v1 29/42] net/txgbe: add queue stats mapping and enable RX DMA unit
Date: Tue,  1 Sep 2020 19:51:00 +0800	[thread overview]
Message-ID: <20200901115113.1529675-29-jiawenwu@trustnetic.com> (raw)
In-Reply-To: <20200901115113.1529675-1-jiawenwu@trustnetic.com>

Add queue stats mapping set, complete receive and transmit unit with DMA and sec path.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
 drivers/net/txgbe/base/txgbe_hw.c   | 389 +++++++++++++++++++++++++++-
 drivers/net/txgbe/base/txgbe_hw.h   |   9 +
 drivers/net/txgbe/base/txgbe_type.h |   1 +
 drivers/net/txgbe/txgbe_ethdev.c    |  52 ++++
 4 files changed, 450 insertions(+), 1 deletion(-)

diff --git a/drivers/net/txgbe/base/txgbe_hw.c b/drivers/net/txgbe/base/txgbe_hw.c
index 13f79741a..05f323a07 100644
--- a/drivers/net/txgbe/base/txgbe_hw.c
+++ b/drivers/net/txgbe/base/txgbe_hw.c
@@ -9,6 +9,8 @@
 #include "txgbe_mng.h"
 #include "txgbe_hw.h"
 
+#define TXGBE_RAPTOR_MAX_TX_QUEUES 128
+#define TXGBE_RAPTOR_MAX_RX_QUEUES 128
 
 STATIC s32 txgbe_setup_copper_link_raptor(struct txgbe_hw *hw,
 					 u32 speed,
@@ -111,6 +113,149 @@ s32 txgbe_init_hw(struct txgbe_hw *hw)
 	return status;
 }
 
+/**
+ *  txgbe_clear_hw_cntrs - Generic clear hardware counters
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears all hardware statistics counters by reading them from the hardware
+ *  Statistics counters are clear on read.
+ **/
+s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw)
+{
+	u16 i = 0;
+
+	DEBUGFUNC("txgbe_clear_hw_cntrs");
+
+	/* QP Stats */
+	/* don't write clear queue stats */
+	for (i = 0; i < TXGBE_MAX_QP; i++) {
+		hw->qp_last[i].rx_qp_packets = 0;
+		hw->qp_last[i].tx_qp_packets = 0;
+		hw->qp_last[i].rx_qp_bytes = 0;
+		hw->qp_last[i].tx_qp_bytes = 0;
+		hw->qp_last[i].rx_qp_mc_packets = 0;
+	}
+
+	/* PB Stats */
+	for (i = 0; i < TXGBE_MAX_UP; i++) {
+		rd32(hw, TXGBE_PBRXUPXON(i));
+		rd32(hw, TXGBE_PBRXUPXOFF(i));
+		rd32(hw, TXGBE_PBTXUPXON(i));
+		rd32(hw, TXGBE_PBTXUPXOFF(i));
+		rd32(hw, TXGBE_PBTXUPOFF(i));
+
+		rd32(hw, TXGBE_PBRXMISS(i));
+	}
+	rd32(hw, TXGBE_PBRXLNKXON);
+	rd32(hw, TXGBE_PBRXLNKXOFF);
+	rd32(hw, TXGBE_PBTXLNKXON);
+	rd32(hw, TXGBE_PBTXLNKXOFF);
+
+	/* DMA Stats */
+	rd32(hw, TXGBE_DMARXPKT);
+	rd32(hw, TXGBE_DMATXPKT);
+
+	rd64(hw, TXGBE_DMARXOCTL);
+	rd64(hw, TXGBE_DMATXOCTL);
+
+	/* MAC Stats */
+	rd64(hw, TXGBE_MACRXERRCRCL);
+	rd64(hw, TXGBE_MACRXMPKTL);
+	rd64(hw, TXGBE_MACTXMPKTL);
+
+	rd64(hw, TXGBE_MACRXPKTL);
+	rd64(hw, TXGBE_MACTXPKTL);
+	rd64(hw, TXGBE_MACRXGBOCTL);
+
+	rd64(hw, TXGBE_MACRXOCTL);
+	rd32(hw, TXGBE_MACTXOCTL);
+
+	rd64(hw, TXGBE_MACRX1to64L);
+	rd64(hw, TXGBE_MACRX65to127L);
+	rd64(hw, TXGBE_MACRX128to255L);
+	rd64(hw, TXGBE_MACRX256to511L);
+	rd64(hw, TXGBE_MACRX512to1023L);
+	rd64(hw, TXGBE_MACRX1024toMAXL);
+	rd64(hw, TXGBE_MACTX1to64L);
+	rd64(hw, TXGBE_MACTX65to127L);
+	rd64(hw, TXGBE_MACTX128to255L);
+	rd64(hw, TXGBE_MACTX256to511L);
+	rd64(hw, TXGBE_MACTX512to1023L);
+	rd64(hw, TXGBE_MACTX1024toMAXL);
+
+	rd64(hw, TXGBE_MACRXERRLENL);
+	rd32(hw, TXGBE_MACRXOVERSIZE);
+	rd32(hw, TXGBE_MACRXJABBER);
+
+	/* FCoE Stats */
+	rd32(hw, TXGBE_FCOECRC);
+	rd32(hw, TXGBE_FCOELAST);
+	rd32(hw, TXGBE_FCOERPDC);
+	rd32(hw, TXGBE_FCOEPRC);
+	rd32(hw, TXGBE_FCOEPTC);
+	rd32(hw, TXGBE_FCOEDWRC);
+	rd32(hw, TXGBE_FCOEDWTC);
+
+	/* Flow Director Stats */
+	rd32(hw, TXGBE_FDIRMATCH);
+	rd32(hw, TXGBE_FDIRMISS);
+	rd32(hw, TXGBE_FDIRUSED);
+	rd32(hw, TXGBE_FDIRUSED);
+	rd32(hw, TXGBE_FDIRFAIL);
+	rd32(hw, TXGBE_FDIRFAIL);
+
+	/* MACsec Stats */
+	rd32(hw, TXGBE_LSECTX_UTPKT);
+	rd32(hw, TXGBE_LSECTX_ENCPKT);
+	rd32(hw, TXGBE_LSECTX_PROTPKT);
+	rd32(hw, TXGBE_LSECTX_ENCOCT);
+	rd32(hw, TXGBE_LSECTX_PROTOCT);
+	rd32(hw, TXGBE_LSECRX_UTPKT);
+	rd32(hw, TXGBE_LSECRX_BTPKT);
+	rd32(hw, TXGBE_LSECRX_NOSCIPKT);
+	rd32(hw, TXGBE_LSECRX_UNSCIPKT);
+	rd32(hw, TXGBE_LSECRX_DECOCT);
+	rd32(hw, TXGBE_LSECRX_VLDOCT);
+	rd32(hw, TXGBE_LSECRX_UNCHKPKT);
+	rd32(hw, TXGBE_LSECRX_DLYPKT);
+	rd32(hw, TXGBE_LSECRX_LATEPKT);
+	for (i = 0; i < 2; i++) {
+		rd32(hw, TXGBE_LSECRX_OKPKT(i));
+		rd32(hw, TXGBE_LSECRX_INVPKT(i));
+		rd32(hw, TXGBE_LSECRX_BADPKT(i));
+	}
+	rd32(hw, TXGBE_LSECRX_INVSAPKT);
+	rd32(hw, TXGBE_LSECRX_BADSAPKT);
+
+	return 0;
+}
+
+/**
+ *  txgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines the LAN function id by reading memory-mapped registers and swaps
+ *  the port value if requested, and set MAC instance for devices that share
+ *  CS4227.
+ **/
+void txgbe_set_lan_id_multi_port(struct txgbe_hw *hw)
+{
+	struct txgbe_bus_info *bus = &hw->bus;
+	u32 reg;
+
+	DEBUGFUNC("txgbe_set_lan_id_multi_port_pcie");
+
+	reg = rd32(hw, TXGBE_PORTSTAT);
+	bus->lan_id = TXGBE_PORTSTAT_ID(reg);
+
+	/* check for single port */
+	reg = rd32(hw, TXGBE_PWR);
+	if (TXGBE_PWR_LANID_SWAP == TXGBE_PWR_LANID(reg))
+		bus->func = 0;
+	else
+		bus->func = bus->lan_id;
+}
+
 /**
  *  txgbe_stop_hw - Generic stop Tx/Rx units
  *  @hw: pointer to hardware structure
@@ -133,6 +278,9 @@ s32 txgbe_stop_hw(struct txgbe_hw *hw)
 	 */
 	hw->adapter_stopped = true;
 
+	/* Disable the receive unit */
+	txgbe_disable_rx(hw);
+
 	/* Clear interrupt mask to stop interrupts from being generated */
 	wr32(hw, TXGBE_IENMISC, 0);
 	wr32(hw, TXGBE_IMS(0), TXGBE_IMS_MASK);
@@ -279,6 +427,113 @@ s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
 	return 0;
 }
 
+/**
+ *  txgbe_disable_sec_rx_path - Stops the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the receive data path and waits for the HW to internally empty
+ *  the Rx security block
+ **/
+s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw)
+{
+#define TXGBE_MAX_SECRX_POLL 4000
+
+	int i;
+	u32 secrxreg;
+
+	DEBUGFUNC("txgbe_disable_sec_rx_path");
+
+	secrxreg = rd32(hw, TXGBE_SECRXCTL);
+	secrxreg |= TXGBE_SECRXCTL_XDSA;
+	wr32(hw, TXGBE_SECRXCTL, secrxreg);
+	for (i = 0; i < TXGBE_MAX_SECRX_POLL; i++) {
+		secrxreg = rd32(hw, TXGBE_SECRXSTAT);
+		if (secrxreg & TXGBE_SECRXSTAT_RDY)
+			break;
+		else
+			/* Use interrupt-safe sleep just in case */
+			usec_delay(10);
+	}
+
+	/* For informational purposes only */
+	if (i >= TXGBE_MAX_SECRX_POLL)
+		DEBUGOUT("Rx unit being enabled before security "
+			 "path fully disabled.  Continuing with init.\n");
+
+	return 0;
+}
+
+/**
+ *  txgbe_enable_sec_rx_path - Enables the receive data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the receive data path.
+ **/
+s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw)
+{
+	u32 secrxreg;
+
+	DEBUGFUNC("txgbe_enable_sec_rx_path");
+
+	secrxreg = rd32(hw, TXGBE_SECRXCTL);
+	secrxreg &= ~TXGBE_SECRXCTL_XDSA;
+	wr32(hw, TXGBE_SECRXCTL, secrxreg);
+	txgbe_flush(hw);
+
+	return 0;
+}
+
+/**
+ *  txgbe_disable_sec_tx_path - Stops the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Stops the transmit data path and waits for the HW to internally empty
+ *  the Tx security block
+ **/
+int txgbe_disable_sec_tx_path(struct txgbe_hw *hw)
+{
+#define TXGBE_MAX_SECTX_POLL 40
+
+	int i;
+	u32 sectxreg;
+
+	sectxreg = rd32(hw, TXGBE_SECTXCTL);
+	sectxreg |= TXGBE_SECTXCTL_XDSA;
+	wr32(hw, TXGBE_SECTXCTL, sectxreg);
+	for (i = 0; i < TXGBE_MAX_SECTX_POLL; i++) {
+		sectxreg = rd32(hw, TXGBE_SECTXSTAT);
+		if (sectxreg & TXGBE_SECTXSTAT_RDY)
+			break;
+		/* Use interrupt-safe sleep just in case */
+		usec_delay(1000);
+	}
+
+	/* For informational purposes only */
+	if (i >= TXGBE_MAX_SECTX_POLL)
+		PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+			 "path fully disabled.  Continuing with init.");
+
+	return 0;
+}
+
+/**
+ *  txgbe_enable_sec_tx_path - Enables the transmit data path
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables the transmit data path.
+ **/
+int txgbe_enable_sec_tx_path(struct txgbe_hw *hw)
+{
+	uint32_t sectxreg;
+
+	sectxreg = rd32(hw, TXGBE_SECTXCTL);
+	sectxreg &= ~TXGBE_SECTXCTL_XDSA;
+	wr32(hw, TXGBE_SECTXCTL, sectxreg);
+	txgbe_flush(hw);
+
+	return 0;
+}
+
 
 /**
  *  txgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
@@ -453,6 +708,38 @@ void txgbe_clear_tx_pending(struct txgbe_hw *hw)
 }
 
 
+void txgbe_disable_rx(struct txgbe_hw *hw)
+{
+	u32 pfdtxgswc;
+
+	pfdtxgswc = rd32(hw, TXGBE_PSRCTL);
+	if (pfdtxgswc & TXGBE_PSRCTL_LBENA) {
+		pfdtxgswc &= ~TXGBE_PSRCTL_LBENA;
+		wr32(hw, TXGBE_PSRCTL, pfdtxgswc);
+		hw->mac.set_lben = true;
+	} else {
+		hw->mac.set_lben = false;
+	}
+
+	wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
+	wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
+}
+
+void txgbe_enable_rx(struct txgbe_hw *hw)
+{
+	u32 pfdtxgswc;
+
+	wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, TXGBE_MACRXCFG_ENA);
+	wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, TXGBE_PBRXCTL_ENA);
+
+	if (hw->mac.set_lben) {
+		pfdtxgswc = rd32(hw, TXGBE_PSRCTL);
+		pfdtxgswc |= TXGBE_PSRCTL_LBENA;
+		wr32(hw, TXGBE_PSRCTL, pfdtxgswc);
+		hw->mac.set_lben = false;
+	}
+}
+
 /**
  *  txgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
  *  @hw: pointer to hardware structure
@@ -824,12 +1111,16 @@ s32 txgbe_setup_sfp_modules(struct txgbe_hw *hw)
  **/
 s32 txgbe_init_ops_pf(struct txgbe_hw *hw)
 {
+	struct txgbe_bus_info *bus = &hw->bus;
 	struct txgbe_mac_info *mac = &hw->mac;
 	struct txgbe_phy_info *phy = &hw->phy;
 	struct txgbe_rom_info *rom = &hw->rom;
 
 	DEBUGFUNC("txgbe_init_ops_pf");
 
+	/* BUS */
+	bus->set_lan_id = txgbe_set_lan_id_multi_port;
+
 	/* PHY */
 	phy->get_media_type = txgbe_get_media_type_raptor;
 	phy->identify = txgbe_identify_phy;
@@ -849,13 +1140,21 @@ s32 txgbe_init_ops_pf(struct txgbe_hw *hw)
 	/* MAC */
 	mac->init_hw = txgbe_init_hw;
 	mac->start_hw = txgbe_start_hw_raptor;
+	mac->clear_hw_cntrs = txgbe_clear_hw_cntrs;
+	mac->enable_rx_dma = txgbe_enable_rx_dma_raptor;
 	mac->stop_hw = txgbe_stop_hw;
 	mac->reset_hw = txgbe_reset_hw;
 
+	mac->disable_sec_rx_path = txgbe_disable_sec_rx_path;
+	mac->enable_sec_rx_path = txgbe_enable_sec_rx_path;
+	mac->disable_sec_tx_path = txgbe_disable_sec_tx_path;
+	mac->enable_sec_tx_path = txgbe_enable_sec_tx_path;
 	mac->get_device_caps = txgbe_get_device_caps;
 	mac->autoc_read = txgbe_autoc_read;
 	mac->autoc_write = txgbe_autoc_write;
 
+	mac->enable_rx = txgbe_enable_rx;
+	mac->disable_rx = txgbe_disable_rx;
 	/* Link */
 	mac->get_link_capabilities = txgbe_get_link_capabilities_raptor;
 	mac->check_link = txgbe_check_mac_link;
@@ -873,6 +1172,8 @@ s32 txgbe_init_ops_pf(struct txgbe_hw *hw)
 	rom->validate_checksum = txgbe_validate_eeprom_checksum;
 	rom->update_checksum = txgbe_update_eeprom_checksum;
 	rom->calc_checksum = txgbe_calc_eeprom_checksum;
+	mac->max_rx_queues	= TXGBE_RAPTOR_MAX_RX_QUEUES;
+	mac->max_tx_queues	= TXGBE_RAPTOR_MAX_TX_QUEUES;
 
 	return 0;
 }
@@ -1456,7 +1757,63 @@ txgbe_check_flash_load(struct txgbe_hw *hw, u32 check_bit)
 static void
 txgbe_reset_misc(struct txgbe_hw *hw)
 {
-	RTE_SET_USED(hw);
+	int i;
+	u32 value;
+
+	wr32(hw, TXGBE_ISBADDRL, hw->isb_dma & 0x00000000FFFFFFFF);
+	wr32(hw, TXGBE_ISBADDRH, hw->isb_dma >> 32);
+
+	value = rd32_epcs(hw, SR_XS_PCS_CTRL2);
+	if ((value & 0x3) != SR_PCS_CTRL2_TYPE_SEL_X) {
+		hw->link_status = TXGBE_LINK_STATUS_NONE;
+	}
+
+	/* receive packets that size > 2048 */
+	wr32m(hw, TXGBE_MACRXCFG,
+		TXGBE_MACRXCFG_JUMBO, TXGBE_MACRXCFG_JUMBO);
+
+	wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+		TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
+
+	/* clear counters on read */
+	wr32m(hw, TXGBE_MACCNTCTL,
+		TXGBE_MACCNTCTL_RC, TXGBE_MACCNTCTL_RC);
+
+	wr32m(hw, TXGBE_RXFCCFG,
+		TXGBE_RXFCCFG_FC, TXGBE_RXFCCFG_FC);
+	wr32m(hw, TXGBE_TXFCCFG,
+		TXGBE_TXFCCFG_FC, TXGBE_TXFCCFG_FC);
+
+	wr32m(hw, TXGBE_MACRXFLT,
+		TXGBE_MACRXFLT_PROMISC, TXGBE_MACRXFLT_PROMISC);
+
+	wr32m(hw, TXGBE_RSTSTAT,
+		TXGBE_RSTSTAT_TMRINIT_MASK, TXGBE_RSTSTAT_TMRINIT(30));
+
+	/* errata 4: initialize mng flex tbl and wakeup flex tbl*/
+	wr32(hw, TXGBE_MNGFLEXSEL, 0);
+	for (i = 0; i < 16; i++) {
+		wr32(hw, TXGBE_MNGFLEXDWL(i), 0);
+		wr32(hw, TXGBE_MNGFLEXDWH(i), 0);
+		wr32(hw, TXGBE_MNGFLEXMSK(i), 0);
+	}
+	wr32(hw, TXGBE_LANFLEXSEL, 0);
+	for (i = 0; i < 16; i++) {
+		wr32(hw, TXGBE_LANFLEXDWL(i), 0);
+		wr32(hw, TXGBE_LANFLEXDWH(i), 0);
+		wr32(hw, TXGBE_LANFLEXMSK(i), 0);
+	}
+
+	/* set pause frame dst mac addr */
+	wr32(hw, TXGBE_RXPBPFCDMACL, 0xC2000001);
+	wr32(hw, TXGBE_RXPBPFCDMACH, 0x0180);
+
+	/* enable mac transmiter */
+	wr32m(hw, TXGBE_MACTXCFG, TXGBE_MACTXCFG_TE, TXGBE_MACTXCFG_TE);
+
+	for (i = 0; i < 4; i++) {
+		wr32m(hw, TXGBE_IVAR(i), 0x80808080, 0);
+	}
 }
 
 /**
@@ -1620,6 +1977,36 @@ s32 txgbe_start_hw_raptor(struct txgbe_hw *hw)
 	return err;
 }
 
+/**
+ *  txgbe_enable_rx_dma_raptor - Enable the Rx DMA unit
+ *  @hw: pointer to hardware structure
+ *  @regval: register value to write to RXCTRL
+ *
+ *  Enables the Rx DMA unit
+ **/
+s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval)
+{
+
+	DEBUGFUNC("txgbe_enable_rx_dma_raptor");
+
+	/*
+	 * Workaround silicon errata when enabling the Rx datapath.
+	 * If traffic is incoming before we enable the Rx unit, it could hang
+	 * the Rx DMA unit.  Therefore, make sure the security engine is
+	 * completely disabled prior to enabling the Rx unit.
+	 */
+
+	hw->mac.disable_sec_rx_path(hw);
+
+	if (regval & TXGBE_PBRXCTL_ENA)
+		txgbe_enable_rx(hw);
+	else
+		txgbe_disable_rx(hw);
+
+	hw->mac.enable_sec_rx_path(hw);
+
+	return 0;
+}
 
 /**
  *  txgbe_verify_lesm_fw_enabled_raptor - Checks LESM FW module state.
diff --git a/drivers/net/txgbe/base/txgbe_hw.h b/drivers/net/txgbe/base/txgbe_hw.h
index a597383b8..86b616d38 100644
--- a/drivers/net/txgbe/base/txgbe_hw.h
+++ b/drivers/net/txgbe/base/txgbe_hw.h
@@ -12,12 +12,19 @@ s32 txgbe_start_hw(struct txgbe_hw *hw);
 s32 txgbe_stop_hw(struct txgbe_hw *hw);
 s32 txgbe_start_hw_gen2(struct txgbe_hw *hw);
 s32 txgbe_start_hw_raptor(struct txgbe_hw *hw);
+s32 txgbe_clear_hw_cntrs(struct txgbe_hw *hw);
+
+void txgbe_set_lan_id_multi_port(struct txgbe_hw *hw);
 
 s32 txgbe_led_on(struct txgbe_hw *hw, u32 index);
 s32 txgbe_led_off(struct txgbe_hw *hw, u32 index);
 
 s32 txgbe_set_rar(struct txgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
 			  u32 enable_addr);
+s32 txgbe_disable_sec_rx_path(struct txgbe_hw *hw);
+s32 txgbe_enable_sec_rx_path(struct txgbe_hw *hw);
+s32 txgbe_disable_sec_tx_path(struct txgbe_hw *hw);
+s32 txgbe_enable_sec_tx_path(struct txgbe_hw *hw);
 
 s32 txgbe_validate_mac_addr(u8 *mac_addr);
 
@@ -30,6 +37,8 @@ void txgbe_clear_tx_pending(struct txgbe_hw *hw);
 
 extern s32 txgbe_reset_pipeline_raptor(struct txgbe_hw *hw);
 
+void txgbe_disable_rx(struct txgbe_hw *hw);
+void txgbe_enable_rx(struct txgbe_hw *hw);
 s32 txgbe_setup_mac_link_multispeed_fiber(struct txgbe_hw *hw,
 					  u32 speed,
 					  bool autoneg_wait_to_complete);
diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index f9a18d581..35a8ed3eb 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -509,6 +509,7 @@ struct txgbe_mac_info {
 	bool orig_link_settings_stored;
 	bool autotry_restart;
 	u8 flags;
+	bool set_lben;
 	u32  max_link_up_time;
 };
 
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 51554844e..c43d5b56f 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -248,6 +248,57 @@ txgbe_disable_intr(struct txgbe_hw *hw)
 	txgbe_flush(hw);
 }
 
+static int
+txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+				  uint16_t queue_id,
+				  uint8_t stat_idx,
+				  uint8_t is_rx)
+{
+	struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
+	struct txgbe_stat_mappings *stat_mappings =
+		TXGBE_DEV_STAT_MAPPINGS(eth_dev);
+	uint32_t qsmr_mask = 0;
+	uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
+	uint32_t q_map;
+	uint8_t n, offset;
+
+	if (hw->mac.type != txgbe_mac_raptor)
+		return -ENOSYS;
+
+	PMD_INIT_LOG(DEBUG, "Setting port %d, %s queue_id %d to stat index %d",
+		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_idx);
+
+	n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
+	if (n >= TXGBE_NB_STAT_MAPPING) {
+		PMD_INIT_LOG(ERR, "Nb of stat mapping registers exceeded");
+		return -EIO;
+	}
+	offset = (uint8_t)(queue_id % NB_QMAP_FIELDS_PER_QSM_REG);
+
+	/* Now clear any previous stat_idx set */
+	clearing_mask <<= (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+	if (!is_rx)
+		stat_mappings->tqsm[n] &= ~clearing_mask;
+	else
+		stat_mappings->rqsm[n] &= ~clearing_mask;
+
+	q_map = (uint32_t)stat_idx;
+	q_map &= QMAP_FIELD_RESERVED_BITS_MASK;
+	qsmr_mask = q_map << (QSM_REG_NB_BITS_PER_QMAP_FIELD * offset);
+	if (!is_rx)
+		stat_mappings->tqsm[n] |= qsmr_mask;
+	else
+		stat_mappings->rqsm[n] |= qsmr_mask;
+
+	PMD_INIT_LOG(DEBUG, "Set port %d, %s queue_id %d to stat index %d",
+		     (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+		     queue_id, stat_idx);
+	PMD_INIT_LOG(DEBUG, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
+		     is_rx ? stat_mappings->rqsm[n] : stat_mappings->tqsm[n]);
+	return 0;
+}
+
 static int
 eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
 {
@@ -1958,6 +2009,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
 	.xstats_reset               = txgbe_dev_xstats_reset,
 	.xstats_get_names           = txgbe_dev_xstats_get_names,
 	.xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
+	.queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
 	.dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
 	.rx_queue_start	            = txgbe_dev_rx_queue_start,
 	.rx_queue_stop              = txgbe_dev_rx_queue_stop,
-- 
2.18.4




  parent reply	other threads:[~2020-09-01 11:57 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-01 11:50 [dpdk-dev] [PATCH v1 01/42] net/txgbe: add build and doc infrastructure Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 02/42] net/txgbe: add ethdev probe and remove Jiawen Wu
2020-09-09 17:50   ` Ferruh Yigit
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 03/42] net/txgbe: add device init and uninit Jiawen Wu
2020-09-09 17:52   ` Ferruh Yigit
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 04/42] net/txgbe: add error types and dummy function Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 05/42] net/txgbe: add mac type and HW ops dummy Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 06/42] net/txgbe: add EEPROM functions Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 07/42] net/txgbe: add HW init function Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 08/42] net/txgbe: add HW reset operation Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 09/42] net/txgbe: add PHY init Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 10/42] net/txgbe: add module identify Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 11/42] net/txgbe: add PHY reset Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 12/42] net/txgbe: add device start and stop Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 13/42] net/txgbe: add interrupt operation Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 14/42] net/txgbe: add link status change Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 15/42] net/txgbe: add multi-speed link setup Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 16/42] net/txgbe: add autoc read and write Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 17/42] net/txgbe: support device LED on and off Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 18/42] net/txgbe: add rx and tx init Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 19/42] net/txgbe: add RX and TX start Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 20/42] net/txgbe: add RX and TX stop Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 21/42] net/txgbe: add RX and TX queues setup Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 22/42] net/txgbe: add packet type Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 23/42] net/txgbe: fill simple transmit function Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 24/42] net/txgbe: fill transmit function with hardware offload Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 25/42] net/txgbe: fill receive functions Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 26/42] net/txgbe: fill TX prepare funtion Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 27/42] net/txgbe: add device stats get Jiawen Wu
2020-09-01 11:50 ` [dpdk-dev] [PATCH v1 28/42] net/txgbe: add device xstats get Jiawen Wu
2020-09-09 17:53   ` Ferruh Yigit
2020-09-01 11:51 ` Jiawen Wu [this message]
2020-09-09 17:54   ` [dpdk-dev] [PATCH v1 29/42] net/txgbe: add queue stats mapping and enable RX DMA unit Ferruh Yigit
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 30/42] net/txgbe: add device info get Jiawen Wu
2020-09-09 17:54   ` Ferruh Yigit
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 31/42] net/txgbe: add MAC address operations Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 32/42] net/txgbe: add FW version get operation Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 33/42] net/txgbe: add EEPROM info " Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 34/42] net/txgbe: add remaining RX and TX queue operations Jiawen Wu
2020-09-09 18:15   ` Ferruh Yigit
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 35/42] net/txgbe: add VLAN handle support Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 36/42] net/txgbe: add flow control support Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 37/42] net/txgbe: add FC auto negotiation support Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 38/42] net/txgbe: add DCB packet buffer allocation Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 39/42] net/txgbe: configure DCB HW resources Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 40/42] net/txgbe: add device promiscuous and allmulticast mode Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 41/42] net/txgbe: add MTU set operation Jiawen Wu
2020-09-01 11:51 ` [dpdk-dev] [PATCH v1 42/42] net/txgbe: add register dump support Jiawen Wu
2020-09-09 17:48 ` [dpdk-dev] [PATCH v1 01/42] net/txgbe: add build and doc infrastructure Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200901115113.1529675-29-jiawenwu@trustnetic.com \
    --to=jiawenwu@trustnetic.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).