DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/3] net/igc: support PTP timesync
@ 2022-12-20  3:41 Simei Su
  2022-12-20  3:41 ` [PATCH 1/3] net/igc: code refactoring Simei Su
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Simei Su @ 2022-12-20  3:41 UTC (permalink / raw)
  To: qi.z.zhang, junfeng.guo; +Cc: dev, wenjun1.wu, Simei Su

[PATCH 1/3] code refactoring.
[PATCH 2/3] add related definitions for ptp timesync.
[PATCH 3/3] add IEEE1588 API to support timesync.

Simei Su (3):
  net/igc: code refactoring
  net/igc/base: support PTP timesync
  net/igc: support IEEE 1588 PTP

 drivers/net/igc/base/igc_defines.h |  11 ++
 drivers/net/igc/igc_ethdev.c       | 222 +++++++++++++++++++++++++++++++++++++
 drivers/net/igc/igc_ethdev.h       |   4 +-
 drivers/net/igc/igc_txrx.c         | 166 ++++++++-------------------
 drivers/net/igc/igc_txrx.h         | 116 +++++++++++++++++++
 5 files changed, 397 insertions(+), 122 deletions(-)

-- 
2.9.5


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/3] net/igc: code refactoring
  2022-12-20  3:41 [PATCH 0/3] net/igc: support PTP timesync Simei Su
@ 2022-12-20  3:41 ` Simei Su
  2023-01-17  2:25   ` Zhang, Qi Z
  2022-12-20  3:41 ` [PATCH 2/3] net/igc/base: support PTP timesync Simei Su
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 11+ messages in thread
From: Simei Su @ 2022-12-20  3:41 UTC (permalink / raw)
  To: qi.z.zhang, junfeng.guo; +Cc: dev, wenjun1.wu, Simei Su

Move related structures for Rx/Tx queue from igc_txrx.c to igc_txrx.h
to make code cleaner and variables used more conveniently.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/igc/igc_txrx.c | 118 ---------------------------------------------
 drivers/net/igc/igc_txrx.h | 115 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 115 insertions(+), 118 deletions(-)

diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index ffd219b..c462e91 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -93,124 +93,6 @@
 
 #define IGC_TX_OFFLOAD_NOTSUP_MASK (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
 
-/**
- * Structure associated with each descriptor of the RX ring of a RX queue.
- */
-struct igc_rx_entry {
-	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
-};
-
-/**
- * Structure associated with each RX queue.
- */
-struct igc_rx_queue {
-	struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
-	volatile union igc_adv_rx_desc *rx_ring;
-	/**< RX ring virtual address. */
-	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
-	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
-	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
-	struct igc_rx_entry *sw_ring;   /**< address of RX software ring. */
-	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
-	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
-	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
-	uint16_t            rx_tail;    /**< current value of RDT register. */
-	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
-	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
-	uint16_t            queue_id;   /**< RX queue index. */
-	uint16_t            reg_idx;    /**< RX queue register index. */
-	uint16_t            port_id;    /**< Device port identifier. */
-	uint8_t             pthresh;    /**< Prefetch threshold register. */
-	uint8_t             hthresh;    /**< Host threshold register. */
-	uint8_t             wthresh;    /**< Write-back threshold register. */
-	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
-	uint8_t             drop_en;	/**< If not 0, set SRRCTL.Drop_En. */
-	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
-};
-
-/** Offload features */
-union igc_tx_offload {
-	uint64_t data;
-	struct {
-		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
-		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
-		uint64_t vlan_tci:16;
-		/**< VLAN Tag Control Identifier(CPU order). */
-		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
-		uint64_t tso_segsz:16; /**< TCP TSO segment size. */
-		/* uint64_t unused:8; */
-	};
-};
-
-/*
- * Compare mask for igc_tx_offload.data,
- * should be in sync with igc_tx_offload layout.
- */
-#define TX_MACIP_LEN_CMP_MASK	0x000000000000FFFFULL /**< L2L3 header mask. */
-#define TX_VLAN_CMP_MASK	0x00000000FFFF0000ULL /**< Vlan mask. */
-#define TX_TCP_LEN_CMP_MASK	0x000000FF00000000ULL /**< TCP header mask. */
-#define TX_TSO_MSS_CMP_MASK	0x00FFFF0000000000ULL /**< TSO segsz mask. */
-/** Mac + IP + TCP + Mss mask. */
-#define TX_TSO_CMP_MASK	\
-	(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
-
-/**
- * Structure to check if new context need be built
- */
-struct igc_advctx_info {
-	uint64_t flags;           /**< ol_flags related to context build. */
-	/** tx offload: vlan, tso, l2-l3-l4 lengths. */
-	union igc_tx_offload tx_offload;
-	/** compare mask for tx offload. */
-	union igc_tx_offload tx_offload_mask;
-};
-
-/**
- * Hardware context number
- */
-enum {
-	IGC_CTX_0    = 0, /**< CTX0    */
-	IGC_CTX_1    = 1, /**< CTX1    */
-	IGC_CTX_NUM  = 2, /**< CTX_NUM */
-};
-
-/**
- * Structure associated with each descriptor of the TX ring of a TX queue.
- */
-struct igc_tx_entry {
-	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
-	uint16_t next_id; /**< Index of next descriptor in ring. */
-	uint16_t last_id; /**< Index of last scattered descriptor. */
-};
-
-/**
- * Structure associated with each TX queue.
- */
-struct igc_tx_queue {
-	volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
-	uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
-	struct igc_tx_entry    *sw_ring; /**< virtual address of SW ring. */
-	volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
-	uint32_t               txd_type;      /**< Device-specific TXD type */
-	uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
-	uint16_t               tx_tail;  /**< Current value of TDT register. */
-	uint16_t               tx_head;
-	/**< Index of first used TX descriptor. */
-	uint16_t               queue_id; /**< TX queue index. */
-	uint16_t               reg_idx;  /**< TX queue register index. */
-	uint16_t               port_id;  /**< Device port identifier. */
-	uint8_t                pthresh;  /**< Prefetch threshold register. */
-	uint8_t                hthresh;  /**< Host threshold register. */
-	uint8_t                wthresh;  /**< Write-back threshold register. */
-	uint8_t                ctx_curr;
-
-	/**< Start context position for transmit queue. */
-	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
-	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
-};
-
 static inline uint64_t
 rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)
 {
diff --git a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h
index 02a0a05..5731761 100644
--- a/drivers/net/igc/igc_txrx.h
+++ b/drivers/net/igc/igc_txrx.h
@@ -11,6 +11,121 @@
 extern "C" {
 #endif
 
+struct igc_rx_entry {
+	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct igc_rx_queue {
+	struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
+	volatile union igc_adv_rx_desc *rx_ring;
+	/**< RX ring virtual address. */
+	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
+	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
+	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
+	struct igc_rx_entry *sw_ring;   /**< address of RX software ring. */
+	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
+	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
+	uint16_t            rx_tail;    /**< current value of RDT register. */
+	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
+	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
+	uint16_t            queue_id;   /**< RX queue index. */
+	uint16_t            reg_idx;    /**< RX queue register index. */
+	uint16_t            port_id;    /**< Device port identifier. */
+	uint8_t             pthresh;    /**< Prefetch threshold register. */
+	uint8_t             hthresh;    /**< Host threshold register. */
+	uint8_t             wthresh;    /**< Write-back threshold register. */
+	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
+	uint8_t             drop_en;    /**< If not 0, set SRRCTL.Drop_En. */
+	uint32_t            flags;      /**< RX flags. */
+	uint64_t            offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
+};
+
+/** Offload features */
+union igc_tx_offload {
+	uint64_t data;
+	struct {
+		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+		uint64_t vlan_tci:16;
+		/**< VLAN Tag Control Identifier(CPU order). */
+		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+		uint64_t tso_segsz:16; /**< TCP TSO segment size. */
+		/* uint64_t unused:8; */
+	};
+};
+
+/**
+ * Compare mask for igc_tx_offload.data,
+ * should be in sync with igc_tx_offload layout.
+ */
+#define TX_MACIP_LEN_CMP_MASK  0x000000000000FFFFULL /**< L2L3 header mask. */
+#define TX_VLAN_CMP_MASK       0x00000000FFFF0000ULL /**< Vlan mask. */
+#define TX_TCP_LEN_CMP_MASK    0x000000FF00000000ULL /**< TCP header mask. */
+#define TX_TSO_MSS_CMP_MASK    0x00FFFF0000000000ULL /**< TSO segsz mask. */
+/** Mac + IP + TCP + Mss mask. */
+#define TX_TSO_CMP_MASK        \
+	(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
+
+/**
+ * Structure to check if new context need be built
+ */
+struct igc_advctx_info {
+	uint64_t flags;           /**< ol_flags related to context build. */
+	/** tx offload: vlan, tso, l2-l3-l4 lengths. */
+	union igc_tx_offload tx_offload;
+	/** compare mask for tx offload. */
+	union igc_tx_offload tx_offload_mask;
+};
+
+/**
+ * Hardware context number
+ */
+enum {
+	IGC_CTX_0    = 0, /**< CTX0    */
+	IGC_CTX_1    = 1, /**< CTX1    */
+	IGC_CTX_NUM  = 2, /**< CTX_NUM */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct igc_tx_entry {
+	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+	uint16_t next_id; /**< Index of next descriptor in ring. */
+	uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct igc_tx_queue {
+	volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
+	uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
+	struct igc_tx_entry    *sw_ring; /**< virtual address of SW ring. */
+	volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
+	uint32_t               txd_type;      /**< Device-specific TXD type */
+	uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
+	uint16_t               tx_tail;  /**< Current value of TDT register. */
+	uint16_t               tx_head;
+	/**< Index of first used TX descriptor. */
+	uint16_t               queue_id; /**< TX queue index. */
+	uint16_t               reg_idx;  /**< TX queue register index. */
+	uint16_t               port_id;  /**< Device port identifier. */
+	uint8_t                pthresh;  /**< Prefetch threshold register. */
+	uint8_t                hthresh;  /**< Host threshold register. */
+	uint8_t                wthresh;  /**< Write-back threshold register. */
+	uint8_t                ctx_curr;
+
+	/**< Start context position for transmit queue. */
+	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
+	/**< Hardware context history.*/
+	uint64_t               offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
+};
+
 /*
  * RX/TX function prototypes
  */
-- 
2.9.5


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 2/3] net/igc/base: support PTP timesync
  2022-12-20  3:41 [PATCH 0/3] net/igc: support PTP timesync Simei Su
  2022-12-20  3:41 ` [PATCH 1/3] net/igc: code refactoring Simei Su
@ 2022-12-20  3:41 ` Simei Su
  2022-12-20  3:41 ` [PATCH 3/3] net/igc: support IEEE 1588 PTP Simei Su
  2023-01-17 13:26 ` [PATCH v2 0/3] net/igc: support PTP timesync Simei Su
  3 siblings, 0 replies; 11+ messages in thread
From: Simei Su @ 2022-12-20  3:41 UTC (permalink / raw)
  To: qi.z.zhang, junfeng.guo; +Cc: dev, wenjun1.wu, Simei Su

Add definitions for timesync enabling.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/igc/base/igc_defines.h | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/drivers/net/igc/base/igc_defines.h b/drivers/net/igc/base/igc_defines.h
index 61964bc..dd7330a 100644
--- a/drivers/net/igc/base/igc_defines.h
+++ b/drivers/net/igc/base/igc_defines.h
@@ -795,6 +795,17 @@
 
 #define TSYNC_INTERRUPTS	TSINTR_TXTS
 
+/* Split Replication Receive Control */
+#define IGC_SRRCTL_TIMESTAMP           0x40000000
+#define IGC_SRRCTL_TIMER1SEL(timer)    (((timer) & 0x3) << 14)
+#define IGC_SRRCTL_TIMER0SEL(timer)    (((timer) & 0x3) << 17)
+
+/* Sample RX tstamp in PHY sop */
+#define IGC_TSYNCRXCTL_RXSYNSIG         0x00000400
+
+/* Sample TX tstamp in PHY sop */
+#define IGC_TSYNCTXCTL_TXSYNSIG         0x00000020
+
 /* TSAUXC Configuration Bits */
 #define TSAUXC_EN_TT0	(1 << 0)  /* Enable target time 0. */
 #define TSAUXC_EN_TT1	(1 << 1)  /* Enable target time 1. */
-- 
2.9.5


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 3/3] net/igc: support IEEE 1588 PTP
  2022-12-20  3:41 [PATCH 0/3] net/igc: support PTP timesync Simei Su
  2022-12-20  3:41 ` [PATCH 1/3] net/igc: code refactoring Simei Su
  2022-12-20  3:41 ` [PATCH 2/3] net/igc/base: support PTP timesync Simei Su
@ 2022-12-20  3:41 ` Simei Su
  2023-01-17 13:26 ` [PATCH v2 0/3] net/igc: support PTP timesync Simei Su
  3 siblings, 0 replies; 11+ messages in thread
From: Simei Su @ 2022-12-20  3:41 UTC (permalink / raw)
  To: qi.z.zhang, junfeng.guo; +Cc: dev, wenjun1.wu, Simei Su

Add igc support for new ethdev APIs to enable/disable and read/write/adjust
IEEE1588 PTP timestamps.

The example command for running ptpclient is as below:
./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/igc/igc_ethdev.c | 222 +++++++++++++++++++++++++++++++++++++++++++
 drivers/net/igc/igc_ethdev.h |   4 +-
 drivers/net/igc/igc_txrx.c   |  50 +++++++++-
 drivers/net/igc/igc_txrx.h   |   1 +
 4 files changed, 272 insertions(+), 5 deletions(-)

diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index dcd262f..ef3346b 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -78,6 +78,16 @@
 #define IGC_ALARM_INTERVAL	8000000u
 /* us, about 13.6s some per-queue registers will wrap around back to 0. */
 
+/* Transmit and receive latency (for PTP timestamps) */
+#define IGC_I225_TX_LATENCY_10		240
+#define IGC_I225_TX_LATENCY_100		58
+#define IGC_I225_TX_LATENCY_1000	80
+#define IGC_I225_TX_LATENCY_2500	1325
+#define IGC_I225_RX_LATENCY_10		6450
+#define IGC_I225_RX_LATENCY_100		185
+#define IGC_I225_RX_LATENCY_1000	300
+#define IGC_I225_RX_LATENCY_2500	1485
+
 static const struct rte_eth_desc_lim rx_desc_lim = {
 	.nb_max = IGC_MAX_RXD,
 	.nb_min = IGC_MIN_RXD,
@@ -245,6 +255,18 @@ eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 		      enum rte_vlan_type vlan_type, uint16_t tpid);
+static int eth_igc_timesync_enable(struct rte_eth_dev *dev);
+static int eth_igc_timesync_disable(struct rte_eth_dev *dev);
+static int eth_igc_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp,
+					  uint32_t flags);
+static int eth_igc_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp);
+static int eth_igc_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int eth_igc_timesync_read_time(struct rte_eth_dev *dev,
+				  struct timespec *timestamp);
+static int eth_igc_timesync_write_time(struct rte_eth_dev *dev,
+				   const struct timespec *timestamp);
 
 static const struct eth_dev_ops eth_igc_ops = {
 	.dev_configure		= eth_igc_configure,
@@ -298,6 +320,13 @@ static const struct eth_dev_ops eth_igc_ops = {
 	.vlan_tpid_set		= eth_igc_vlan_tpid_set,
 	.vlan_strip_queue_set	= eth_igc_vlan_strip_queue_set,
 	.flow_ops_get		= eth_igc_flow_ops_get,
+	.timesync_enable	= eth_igc_timesync_enable,
+	.timesync_disable	= eth_igc_timesync_disable,
+	.timesync_read_rx_timestamp = eth_igc_timesync_read_rx_timestamp,
+	.timesync_read_tx_timestamp = eth_igc_timesync_read_tx_timestamp,
+	.timesync_adjust_time	= eth_igc_timesync_adjust_time,
+	.timesync_read_time	= eth_igc_timesync_read_time,
+	.timesync_write_time	= eth_igc_timesync_write_time,
 };
 
 /*
@@ -2582,6 +2611,199 @@ eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 }
 
 static int
+eth_igc_timesync_enable(struct rte_eth_dev *dev)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+	struct timespec system_time;
+	struct igc_rx_queue *rxq;
+	uint32_t val;
+	uint16_t i;
+
+	IGC_WRITE_REG(hw, IGC_TSAUXC, 0x0);
+
+	clock_gettime(CLOCK_REALTIME, &system_time);
+	IGC_WRITE_REG(hw, IGC_SYSTIML, system_time.tv_nsec);
+	IGC_WRITE_REG(hw, IGC_SYSTIMH, system_time.tv_sec);
+
+	/* Enable timestamping of received PTP packets. */
+	val = IGC_READ_REG(hw, IGC_RXPBS);
+	val |= IGC_RXPBS_CFG_TS_EN;
+	IGC_WRITE_REG(hw, IGC_RXPBS, val);
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		val = IGC_READ_REG(hw, IGC_SRRCTL(i));
+		/* For now, only support retrieving Rx timestamp from timer0. */
+		val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) |
+		       IGC_SRRCTL_TIMESTAMP;
+		IGC_WRITE_REG(hw, IGC_SRRCTL(i), val);
+	}
+
+	val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL |
+	      IGC_TSYNCRXCTL_RXSYNSIG;
+	IGC_WRITE_REG(hw, IGC_TSYNCRXCTL, val);
+
+	/* Enable Timestamping of transmitted PTP packets. */
+	IGC_WRITE_REG(hw, IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED |
+		      IGC_TSYNCTXCTL_TXSYNSIG);
+
+	/* Read TXSTMP registers to discard any timestamp previously stored. */
+	IGC_READ_REG(hw, IGC_TXSTMPL);
+	IGC_READ_REG(hw, IGC_TXSTMPH);
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+	}
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+	ts->tv_nsec = IGC_READ_REG(hw, IGC_SYSTIML);
+	ts->tv_sec = IGC_READ_REG(hw, IGC_SYSTIMH);
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+	IGC_WRITE_REG(hw, IGC_SYSTIML, ts->tv_nsec);
+	IGC_WRITE_REG(hw, IGC_SYSTIMH, ts->tv_sec);
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+	uint32_t nsec, sec;
+	uint64_t systime, ns;
+	struct timespec ts;
+
+	nsec = (uint64_t)IGC_READ_REG(hw, IGC_SYSTIML);
+	sec = (uint64_t)IGC_READ_REG(hw, IGC_SYSTIMH);
+	systime = sec * NSEC_PER_SEC + nsec;
+
+	ns = systime + delta;
+	ts = rte_ns_to_timespec(ns);
+
+	IGC_WRITE_REG(hw, IGC_SYSTIML, ts.tv_nsec);
+	IGC_WRITE_REG(hw, IGC_SYSTIMH, ts.tv_sec);
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_read_rx_timestamp(__rte_unused struct rte_eth_dev *dev,
+			       struct timespec *timestamp,
+			       uint32_t flags)
+{
+	struct rte_eth_link link;
+	int adjust = 0;
+	struct igc_rx_queue *rxq;
+	uint64_t rx_timestamp;
+
+	/* Get current link speed. */
+	eth_igc_link_update(dev, 1);
+	rte_eth_linkstatus_get(dev, &link);
+
+	switch (link.link_speed) {
+	case SPEED_10:
+		adjust = IGC_I225_RX_LATENCY_10;
+		break;
+	case SPEED_100:
+		adjust = IGC_I225_RX_LATENCY_100;
+		break;
+	case SPEED_1000:
+		adjust = IGC_I225_RX_LATENCY_1000;
+		break;
+	case SPEED_2500:
+		adjust = IGC_I225_RX_LATENCY_2500;
+		break;
+	}
+
+	rxq = dev->data->rx_queues[flags];
+	rx_timestamp = rxq->rx_timestamp - adjust;
+	*timestamp = rte_ns_to_timespec(rx_timestamp);
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+	struct rte_eth_link link;
+	uint32_t val, nsec, sec;
+	uint64_t tx_timestamp;
+	int adjust = 0;
+
+	val = IGC_READ_REG(hw, IGC_TSYNCTXCTL);
+	if (!(val & IGC_TSYNCTXCTL_VALID))
+		return -EINVAL;
+
+	nsec = (uint64_t)IGC_READ_REG(hw, IGC_TXSTMPL);
+	sec = (uint64_t)IGC_READ_REG(hw, IGC_TXSTMPH);
+	tx_timestamp = sec * NSEC_PER_SEC + nsec;
+
+	/* Get current link speed. */
+	eth_igc_link_update(dev, 1);
+	rte_eth_linkstatus_get(dev, &link);
+
+	switch (link.link_speed) {
+	case SPEED_10:
+		adjust = IGC_I225_TX_LATENCY_10;
+		break;
+	case SPEED_100:
+		adjust = IGC_I225_TX_LATENCY_100;
+		break;
+	case SPEED_1000:
+		adjust = IGC_I225_TX_LATENCY_1000;
+		break;
+	case SPEED_2500:
+		adjust = IGC_I225_TX_LATENCY_2500;
+		break;
+	}
+
+	tx_timestamp += adjust;
+	*timestamp = rte_ns_to_timespec(tx_timestamp);
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_disable(struct rte_eth_dev *dev)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+	uint32_t val;
+
+	/* Disable timestamping of transmitted PTP packets. */
+	IGC_WRITE_REG(hw, IGC_TSYNCTXCTL, 0);
+
+	/* Disable timestamping of received PTP packets. */
+	IGC_WRITE_REG(hw, IGC_TSYNCRXCTL, 0);
+
+	val = IGC_READ_REG(hw, IGC_RXPBS);
+	val &= IGC_RXPBS_CFG_TS_EN;
+	IGC_WRITE_REG(hw, IGC_RXPBS, val);
+
+	val = IGC_READ_REG(hw, IGC_SRRCTL(0));
+	val &= ~IGC_SRRCTL_TIMESTAMP;
+	IGC_WRITE_REG(hw, IGC_SRRCTL(0), val);
+
+	return 0;
+}
+
+static int
 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h
index f56cad7..237d3c1 100644
--- a/drivers/net/igc/igc_ethdev.h
+++ b/drivers/net/igc/igc_ethdev.h
@@ -7,6 +7,7 @@
 
 #include <rte_ethdev.h>
 #include <rte_flow.h>
+#include <rte_time.h>
 
 #include "base/igc_osdep.h"
 #include "base/igc_hw.h"
@@ -75,7 +76,8 @@ extern "C" {
 	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM  | \
 	RTE_ETH_RX_OFFLOAD_KEEP_CRC    | \
 	RTE_ETH_RX_OFFLOAD_SCATTER     | \
-	RTE_ETH_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_RSS_HASH    | \
+	RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 
 #define IGC_TX_OFFLOAD_ALL	(    \
 	RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index c462e91..0236c7f 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -81,7 +81,8 @@
 		RTE_MBUF_F_TX_IP_CKSUM |	\
 		RTE_MBUF_F_TX_L4_MASK |	\
 		RTE_MBUF_F_TX_TCP_SEG |	\
-		RTE_MBUF_F_TX_UDP_SEG)
+		RTE_MBUF_F_TX_UDP_SEG | \
+		RTE_MBUF_F_TX_IEEE1588_TMST)
 
 #define IGC_TX_OFFLOAD_SEG	(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)
 
@@ -93,6 +94,8 @@
 
 #define IGC_TX_OFFLOAD_NOTSUP_MASK (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
 
+#define IGC_TS_HDR_LEN 16
+
 static inline uint64_t
 rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)
 {
@@ -222,6 +225,9 @@ rx_desc_get_pkt_info(struct igc_rx_queue *rxq, struct rte_mbuf *rxm,
 
 	pkt_flags |= rx_desc_statuserr_to_pkt_flags(staterr);
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+
 	rxm->ol_flags = pkt_flags;
 	pkt_info = rte_le_to_cpu_16(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
 	rxm->packet_type = rx_desc_pkt_info_to_pkt_type(pkt_info);
@@ -328,8 +334,15 @@ igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rxm = rxe->mbuf;
 		rxe->mbuf = nmb;
 		rxdp->read.hdr_addr = 0;
-		rxdp->read.pkt_addr =
+
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+			rxdp->read.pkt_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)) -
+			IGC_TS_HDR_LEN;
+		else
+			rxdp->read.pkt_addr =
 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+
 		rxm->next = NULL;
 
 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
@@ -340,6 +353,14 @@ igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
 		rx_desc_get_pkt_info(rxq, rxm, &rxd, staterr);
 
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			uint32_t *ts = rte_pktmbuf_mtod_offset(rxm,
+					uint32_t *, -IGC_TS_HDR_LEN);
+			rxq->rx_timestamp = (uint64_t)ts[3] * NSEC_PER_SEC +
+					ts[2];
+			rxm->timesync = rxq->queue_id;
+		}
+
 		/*
 		 * Store the mbuf address into the next entry of the array
 		 * of returned packets.
@@ -472,8 +493,15 @@ igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		rxm = rxe->mbuf;
 		rxe->mbuf = nmb;
 		rxdp->read.hdr_addr = 0;
-		rxdp->read.pkt_addr =
+
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+			rxdp->read.pkt_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)) -
+				IGC_TS_HDR_LEN;
+		else
+			rxdp->read.pkt_addr =
 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+
 		rxm->next = NULL;
 
 		/*
@@ -537,6 +565,14 @@ igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 		rx_desc_get_pkt_info(rxq, first_seg, &rxd, staterr);
 
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			uint32_t *ts = rte_pktmbuf_mtod_offset(first_seg,
+					uint32_t *, -IGC_TS_HDR_LEN);
+			rxq->rx_timestamp = (uint64_t)ts[3] * NSEC_PER_SEC +
+					ts[2];
+			rxm->timesync = rxq->queue_id;
+		}
+
 		/*
 		 * Store the mbuf address into the next entry of the array
 		 * of returned packets.
@@ -682,7 +718,10 @@ igc_alloc_rx_queue_mbufs(struct igc_rx_queue *rxq)
 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
 		rxd = &rxq->rx_ring[i];
 		rxd->read.hdr_addr = 0;
-		rxd->read.pkt_addr = dma_addr;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+			rxd->read.pkt_addr = dma_addr - IGC_TS_HDR_LEN;
+		else
+			rxd->read.pkt_addr = dma_addr;
 		rxe[i].mbuf = mbuf;
 	}
 
@@ -985,6 +1024,9 @@ igc_rx_init(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		rxq->flags = 0;
 
+		if (offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+			rxq->offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
 		/* Allocate buffers for descriptor rings and set up queue */
 		ret = igc_alloc_rx_queue_mbufs(rxq);
 		if (ret)
diff --git a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h
index 5731761..e7272f8 100644
--- a/drivers/net/igc/igc_txrx.h
+++ b/drivers/net/igc/igc_txrx.h
@@ -42,6 +42,7 @@ struct igc_rx_queue {
 	uint8_t             drop_en;    /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
 	uint64_t            offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
+	uint64_t            rx_timestamp;
 };
 
 /** Offload features */
-- 
2.9.5


^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [PATCH 1/3] net/igc: code refactoring
  2022-12-20  3:41 ` [PATCH 1/3] net/igc: code refactoring Simei Su
@ 2023-01-17  2:25   ` Zhang, Qi Z
  2023-01-17  4:00     ` Su, Simei
  0 siblings, 1 reply; 11+ messages in thread
From: Zhang, Qi Z @ 2023-01-17  2:25 UTC (permalink / raw)
  To: Su, Simei, Guo, Junfeng; +Cc: dev, Wu, Wenjun1



> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Tuesday, December 20, 2022 11:41 AM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Guo, Junfeng
> <junfeng.guo@intel.com>
> Cc: dev@dpdk.org; Wu, Wenjun1 <wenjun1.wu@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH 1/3] net/igc: code refactoring
> 
> Move related structures for Rx/Tx queue from igc_txrx.c to igc_txrx.h to
> make code cleaner and variables used more conveniently.

Not sure if this is necessary.
If a structure only be used internally, keep it internally should be OK.
Otherwise need to give a reason why to expose it.

> 
> Signed-off-by: Simei Su <simei.su@intel.com>
> ---
>  drivers/net/igc/igc_txrx.c | 118 ---------------------------------------------
>  drivers/net/igc/igc_txrx.h | 115
> +++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 115 insertions(+), 118 deletions(-)
> 
> diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c index
> ffd219b..c462e91 100644
> --- a/drivers/net/igc/igc_txrx.c
> +++ b/drivers/net/igc/igc_txrx.c
> @@ -93,124 +93,6 @@
> 
>  #define IGC_TX_OFFLOAD_NOTSUP_MASK
> (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
> 
> -/**
> - * Structure associated with each descriptor of the RX ring of a RX queue.
> - */
> -struct igc_rx_entry {
> -	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
> -};
> -
> -/**
> - * Structure associated with each RX queue.
> - */
> -struct igc_rx_queue {
> -	struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring.
> */
> -	volatile union igc_adv_rx_desc *rx_ring;
> -	/**< RX ring virtual address. */
> -	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
> -	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
> -	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
> -	struct igc_rx_entry *sw_ring;   /**< address of RX software ring. */
> -	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet.
> */
> -	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet.
> */
> -	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
> -	uint16_t            rx_tail;    /**< current value of RDT register. */
> -	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
> -	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
> -	uint16_t            queue_id;   /**< RX queue index. */
> -	uint16_t            reg_idx;    /**< RX queue register index. */
> -	uint16_t            port_id;    /**< Device port identifier. */
> -	uint8_t             pthresh;    /**< Prefetch threshold register. */
> -	uint8_t             hthresh;    /**< Host threshold register. */
> -	uint8_t             wthresh;    /**< Write-back threshold register. */
> -	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
> -	uint8_t             drop_en;	/**< If not 0, set SRRCTL.Drop_En. */
> -	uint32_t            flags;      /**< RX flags. */
> -	uint64_t	    offloads;   /**< offloads of
> RTE_ETH_RX_OFFLOAD_* */
> -};
> -
> -/** Offload features */
> -union igc_tx_offload {
> -	uint64_t data;
> -	struct {
> -		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> -		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
> -		uint64_t vlan_tci:16;
> -		/**< VLAN Tag Control Identifier(CPU order). */
> -		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
> -		uint64_t tso_segsz:16; /**< TCP TSO segment size. */
> -		/* uint64_t unused:8; */
> -	};
> -};
> -
> -/*
> - * Compare mask for igc_tx_offload.data,
> - * should be in sync with igc_tx_offload layout.
> - */
> -#define TX_MACIP_LEN_CMP_MASK	0x000000000000FFFFULL /**< L2L3
> header mask. */
> -#define TX_VLAN_CMP_MASK	0x00000000FFFF0000ULL /**< Vlan
> mask. */
> -#define TX_TCP_LEN_CMP_MASK	0x000000FF00000000ULL /**< TCP
> header mask. */
> -#define TX_TSO_MSS_CMP_MASK	0x00FFFF0000000000ULL /**< TSO
> segsz mask. */
> -/** Mac + IP + TCP + Mss mask. */
> -#define TX_TSO_CMP_MASK	\
> -	(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK |
> TX_TSO_MSS_CMP_MASK)
> -
> -/**
> - * Structure to check if new context need be built
> - */
> -struct igc_advctx_info {
> -	uint64_t flags;           /**< ol_flags related to context build. */
> -	/** tx offload: vlan, tso, l2-l3-l4 lengths. */
> -	union igc_tx_offload tx_offload;
> -	/** compare mask for tx offload. */
> -	union igc_tx_offload tx_offload_mask;
> -};
> -
> -/**
> - * Hardware context number
> - */
> -enum {
> -	IGC_CTX_0    = 0, /**< CTX0    */
> -	IGC_CTX_1    = 1, /**< CTX1    */
> -	IGC_CTX_NUM  = 2, /**< CTX_NUM */
> -};
> -
> -/**
> - * Structure associated with each descriptor of the TX ring of a TX queue.
> - */
> -struct igc_tx_entry {
> -	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
> -	uint16_t next_id; /**< Index of next descriptor in ring. */
> -	uint16_t last_id; /**< Index of last scattered descriptor. */
> -};
> -
> -/**
> - * Structure associated with each TX queue.
> - */
> -struct igc_tx_queue {
> -	volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
> -	uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
> -	struct igc_tx_entry    *sw_ring; /**< virtual address of SW ring. */
> -	volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
> -	uint32_t               txd_type;      /**< Device-specific TXD type */
> -	uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
> -	uint16_t               tx_tail;  /**< Current value of TDT register. */
> -	uint16_t               tx_head;
> -	/**< Index of first used TX descriptor. */
> -	uint16_t               queue_id; /**< TX queue index. */
> -	uint16_t               reg_idx;  /**< TX queue register index. */
> -	uint16_t               port_id;  /**< Device port identifier. */
> -	uint8_t                pthresh;  /**< Prefetch threshold register. */
> -	uint8_t                hthresh;  /**< Host threshold register. */
> -	uint8_t                wthresh;  /**< Write-back threshold register. */
> -	uint8_t                ctx_curr;
> -
> -	/**< Start context position for transmit queue. */
> -	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
> -	/**< Hardware context history.*/
> -	uint64_t	       offloads; /**< offloads of
> RTE_ETH_TX_OFFLOAD_* */
> -};
> -
>  static inline uint64_t
>  rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)  { diff --git
> a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h index
> 02a0a05..5731761 100644
> --- a/drivers/net/igc/igc_txrx.h
> +++ b/drivers/net/igc/igc_txrx.h
> @@ -11,6 +11,121 @@
>  extern "C" {
>  #endif
> 
> +struct igc_rx_entry {
> +	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor.
> */ };
> +
> +/**
> + * Structure associated with each RX queue.
> + */
> +struct igc_rx_queue {
> +	struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring.
> */
> +	volatile union igc_adv_rx_desc *rx_ring;
> +	/**< RX ring virtual address. */
> +	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
> +	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
> +	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
> +	struct igc_rx_entry *sw_ring;   /**< address of RX software ring. */
> +	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet.
> */
> +	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet.
> */
> +	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
> +	uint16_t            rx_tail;    /**< current value of RDT register. */
> +	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
> +	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
> +	uint16_t            queue_id;   /**< RX queue index. */
> +	uint16_t            reg_idx;    /**< RX queue register index. */
> +	uint16_t            port_id;    /**< Device port identifier. */
> +	uint8_t             pthresh;    /**< Prefetch threshold register. */
> +	uint8_t             hthresh;    /**< Host threshold register. */
> +	uint8_t             wthresh;    /**< Write-back threshold register. */
> +	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
> +	uint8_t             drop_en;    /**< If not 0, set SRRCTL.Drop_En. */
> +	uint32_t            flags;      /**< RX flags. */
> +	uint64_t            offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_*
> */
> +};
> +
> +/** Offload features */
> +union igc_tx_offload {
> +	uint64_t data;
> +	struct {
> +		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> +		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
> +		uint64_t vlan_tci:16;
> +		/**< VLAN Tag Control Identifier(CPU order). */
> +		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
> +		uint64_t tso_segsz:16; /**< TCP TSO segment size. */
> +		/* uint64_t unused:8; */
> +	};
> +};
> +
> +/**
> + * Compare mask for igc_tx_offload.data,
> + * should be in sync with igc_tx_offload layout.
> + */
> +#define TX_MACIP_LEN_CMP_MASK  0x000000000000FFFFULL /**< L2L3
> header mask. */
> +#define TX_VLAN_CMP_MASK       0x00000000FFFF0000ULL /**< Vlan mask.
> */
> +#define TX_TCP_LEN_CMP_MASK    0x000000FF00000000ULL /**< TCP
> header mask. */
> +#define TX_TSO_MSS_CMP_MASK    0x00FFFF0000000000ULL /**< TSO
> segsz mask. */
> +/** Mac + IP + TCP + Mss mask. */
> +#define TX_TSO_CMP_MASK        \
> +	(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK |
> TX_TSO_MSS_CMP_MASK)
> +
> +/**
> + * Structure to check if new context need be built  */ struct
> +igc_advctx_info {
> +	uint64_t flags;           /**< ol_flags related to context build. */
> +	/** tx offload: vlan, tso, l2-l3-l4 lengths. */
> +	union igc_tx_offload tx_offload;
> +	/** compare mask for tx offload. */
> +	union igc_tx_offload tx_offload_mask;
> +};
> +
> +/**
> + * Hardware context number
> + */
> +enum {
> +	IGC_CTX_0    = 0, /**< CTX0    */
> +	IGC_CTX_1    = 1, /**< CTX1    */
> +	IGC_CTX_NUM  = 2, /**< CTX_NUM */
> +};
> +
> +/**
> + * Structure associated with each descriptor of the TX ring of a TX queue.
> + */
> +struct igc_tx_entry {
> +	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
> +	uint16_t next_id; /**< Index of next descriptor in ring. */
> +	uint16_t last_id; /**< Index of last scattered descriptor. */ };
> +
> +/**
> + * Structure associated with each TX queue.
> + */
> +struct igc_tx_queue {
> +	volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
> +	uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
> +	struct igc_tx_entry    *sw_ring; /**< virtual address of SW ring. */
> +	volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
> +	uint32_t               txd_type;      /**< Device-specific TXD type */
> +	uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
> +	uint16_t               tx_tail;  /**< Current value of TDT register. */
> +	uint16_t               tx_head;
> +	/**< Index of first used TX descriptor. */
> +	uint16_t               queue_id; /**< TX queue index. */
> +	uint16_t               reg_idx;  /**< TX queue register index. */
> +	uint16_t               port_id;  /**< Device port identifier. */
> +	uint8_t                pthresh;  /**< Prefetch threshold register. */
> +	uint8_t                hthresh;  /**< Host threshold register. */
> +	uint8_t                wthresh;  /**< Write-back threshold register. */
> +	uint8_t                ctx_curr;
> +
> +	/**< Start context position for transmit queue. */
> +	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
> +	/**< Hardware context history.*/
> +	uint64_t               offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_*
> */
> +};
> +
>  /*
>   * RX/TX function prototypes
>   */
> --
> 2.9.5


^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [PATCH 1/3] net/igc: code refactoring
  2023-01-17  2:25   ` Zhang, Qi Z
@ 2023-01-17  4:00     ` Su, Simei
  0 siblings, 0 replies; 11+ messages in thread
From: Su, Simei @ 2023-01-17  4:00 UTC (permalink / raw)
  To: Zhang, Qi Z, Guo, Junfeng; +Cc: dev, Wu, Wenjun1

Hi Qi,

> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: Tuesday, January 17, 2023 10:25 AM
> To: Su, Simei <simei.su@intel.com>; Guo, Junfeng <junfeng.guo@intel.com>
> Cc: dev@dpdk.org; Wu, Wenjun1 <wenjun1.wu@intel.com>
> Subject: RE: [PATCH 1/3] net/igc: code refactoring
> 
> 
> 
> > -----Original Message-----
> > From: Su, Simei <simei.su@intel.com>
> > Sent: Tuesday, December 20, 2022 11:41 AM
> > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Guo, Junfeng
> > <junfeng.guo@intel.com>
> > Cc: dev@dpdk.org; Wu, Wenjun1 <wenjun1.wu@intel.com>; Su, Simei
> > <simei.su@intel.com>
> > Subject: [PATCH 1/3] net/igc: code refactoring
> >
> > Move related structures for Rx/Tx queue from igc_txrx.c to igc_txrx.h
> > to make code cleaner and variables used more conveniently.
> 
> Not sure if this is necessary.
> If a structure only be used internally, keep it internally should be OK.
> Otherwise need to give a reason why to expose it.

OK. I will rework commit log to give detailed reason in v2.

Thanks,
Simei

> 
> >
> > Signed-off-by: Simei Su <simei.su@intel.com>
> > ---
> >  drivers/net/igc/igc_txrx.c | 118
> > ---------------------------------------------
> >  drivers/net/igc/igc_txrx.h | 115
> > +++++++++++++++++++++++++++++++++++++++++++
> >  2 files changed, 115 insertions(+), 118 deletions(-)
> >
> > diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
> > index
> > ffd219b..c462e91 100644
> > --- a/drivers/net/igc/igc_txrx.c
> > +++ b/drivers/net/igc/igc_txrx.c
> > @@ -93,124 +93,6 @@
> >
> >  #define IGC_TX_OFFLOAD_NOTSUP_MASK
> > (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
> >
> > -/**
> > - * Structure associated with each descriptor of the RX ring of a RX queue.
> > - */
> > -struct igc_rx_entry {
> > -	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
> > -};
> > -
> > -/**
> > - * Structure associated with each RX queue.
> > - */
> > -struct igc_rx_queue {
> > -	struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring.
> > */
> > -	volatile union igc_adv_rx_desc *rx_ring;
> > -	/**< RX ring virtual address. */
> > -	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
> > -	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
> > -	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
> > -	struct igc_rx_entry *sw_ring;   /**< address of RX software ring. */
> > -	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet.
> > */
> > -	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet.
> > */
> > -	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
> > -	uint16_t            rx_tail;    /**< current value of RDT register. */
> > -	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
> > -	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
> > -	uint16_t            queue_id;   /**< RX queue index. */
> > -	uint16_t            reg_idx;    /**< RX queue register index. */
> > -	uint16_t            port_id;    /**< Device port identifier. */
> > -	uint8_t             pthresh;    /**< Prefetch threshold register. */
> > -	uint8_t             hthresh;    /**< Host threshold register. */
> > -	uint8_t             wthresh;    /**< Write-back threshold register. */
> > -	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
> > -	uint8_t             drop_en;	/**< If not 0, set SRRCTL.Drop_En. */
> > -	uint32_t            flags;      /**< RX flags. */
> > -	uint64_t	    offloads;   /**< offloads of
> > RTE_ETH_RX_OFFLOAD_* */
> > -};
> > -
> > -/** Offload features */
> > -union igc_tx_offload {
> > -	uint64_t data;
> > -	struct {
> > -		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> > -		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
> > -		uint64_t vlan_tci:16;
> > -		/**< VLAN Tag Control Identifier(CPU order). */
> > -		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
> > -		uint64_t tso_segsz:16; /**< TCP TSO segment size. */
> > -		/* uint64_t unused:8; */
> > -	};
> > -};
> > -
> > -/*
> > - * Compare mask for igc_tx_offload.data,
> > - * should be in sync with igc_tx_offload layout.
> > - */
> > -#define TX_MACIP_LEN_CMP_MASK	0x000000000000FFFFULL /**< L2L3
> > header mask. */
> > -#define TX_VLAN_CMP_MASK	0x00000000FFFF0000ULL /**< Vlan
> > mask. */
> > -#define TX_TCP_LEN_CMP_MASK	0x000000FF00000000ULL /**< TCP
> > header mask. */
> > -#define TX_TSO_MSS_CMP_MASK	0x00FFFF0000000000ULL /**< TSO
> > segsz mask. */
> > -/** Mac + IP + TCP + Mss mask. */
> > -#define TX_TSO_CMP_MASK	\
> > -	(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK |
> > TX_TSO_MSS_CMP_MASK)
> > -
> > -/**
> > - * Structure to check if new context need be built
> > - */
> > -struct igc_advctx_info {
> > -	uint64_t flags;           /**< ol_flags related to context build. */
> > -	/** tx offload: vlan, tso, l2-l3-l4 lengths. */
> > -	union igc_tx_offload tx_offload;
> > -	/** compare mask for tx offload. */
> > -	union igc_tx_offload tx_offload_mask;
> > -};
> > -
> > -/**
> > - * Hardware context number
> > - */
> > -enum {
> > -	IGC_CTX_0    = 0, /**< CTX0    */
> > -	IGC_CTX_1    = 1, /**< CTX1    */
> > -	IGC_CTX_NUM  = 2, /**< CTX_NUM */
> > -};
> > -
> > -/**
> > - * Structure associated with each descriptor of the TX ring of a TX queue.
> > - */
> > -struct igc_tx_entry {
> > -	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
> > -	uint16_t next_id; /**< Index of next descriptor in ring. */
> > -	uint16_t last_id; /**< Index of last scattered descriptor. */
> > -};
> > -
> > -/**
> > - * Structure associated with each TX queue.
> > - */
> > -struct igc_tx_queue {
> > -	volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
> > -	uint64_t               tx_ring_phys_addr; /**< TX ring DMA address.
> */
> > -	struct igc_tx_entry    *sw_ring; /**< virtual address of SW ring. */
> > -	volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
> > -	uint32_t               txd_type;      /**< Device-specific TXD type
> */
> > -	uint16_t               nb_tx_desc;    /**< number of TX descriptors.
> */
> > -	uint16_t               tx_tail;  /**< Current value of TDT register. */
> > -	uint16_t               tx_head;
> > -	/**< Index of first used TX descriptor. */
> > -	uint16_t               queue_id; /**< TX queue index. */
> > -	uint16_t               reg_idx;  /**< TX queue register index. */
> > -	uint16_t               port_id;  /**< Device port identifier. */
> > -	uint8_t                pthresh;  /**< Prefetch threshold register. */
> > -	uint8_t                hthresh;  /**< Host threshold register. */
> > -	uint8_t                wthresh;  /**< Write-back threshold register.
> */
> > -	uint8_t                ctx_curr;
> > -
> > -	/**< Start context position for transmit queue. */
> > -	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
> > -	/**< Hardware context history.*/
> > -	uint64_t	       offloads; /**< offloads of
> > RTE_ETH_TX_OFFLOAD_* */
> > -};
> > -
> >  static inline uint64_t
> >  rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)  { diff --git
> > a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h index
> > 02a0a05..5731761 100644
> > --- a/drivers/net/igc/igc_txrx.h
> > +++ b/drivers/net/igc/igc_txrx.h
> > @@ -11,6 +11,121 @@
> >  extern "C" {
> >  #endif
> >
> > +struct igc_rx_entry {
> > +	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor.
> > */ };
> > +
> > +/**
> > + * Structure associated with each RX queue.
> > + */
> > +struct igc_rx_queue {
> > +	struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring.
> > */
> > +	volatile union igc_adv_rx_desc *rx_ring;
> > +	/**< RX ring virtual address. */
> > +	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
> > +	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
> > +	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
> > +	struct igc_rx_entry *sw_ring;   /**< address of RX software ring. */
> > +	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet.
> > */
> > +	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet.
> > */
> > +	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
> > +	uint16_t            rx_tail;    /**< current value of RDT register. */
> > +	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
> > +	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
> > +	uint16_t            queue_id;   /**< RX queue index. */
> > +	uint16_t            reg_idx;    /**< RX queue register index. */
> > +	uint16_t            port_id;    /**< Device port identifier. */
> > +	uint8_t             pthresh;    /**< Prefetch threshold register. */
> > +	uint8_t             hthresh;    /**< Host threshold register. */
> > +	uint8_t             wthresh;    /**< Write-back threshold register. */
> > +	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
> > +	uint8_t             drop_en;    /**< If not 0, set SRRCTL.Drop_En. */
> > +	uint32_t            flags;      /**< RX flags. */
> > +	uint64_t            offloads;   /**< offloads of
> RTE_ETH_RX_OFFLOAD_*
> > */
> > +};
> > +
> > +/** Offload features */
> > +union igc_tx_offload {
> > +	uint64_t data;
> > +	struct {
> > +		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> > +		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
> > +		uint64_t vlan_tci:16;
> > +		/**< VLAN Tag Control Identifier(CPU order). */
> > +		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
> > +		uint64_t tso_segsz:16; /**< TCP TSO segment size. */
> > +		/* uint64_t unused:8; */
> > +	};
> > +};
> > +
> > +/**
> > + * Compare mask for igc_tx_offload.data,
> > + * should be in sync with igc_tx_offload layout.
> > + */
> > +#define TX_MACIP_LEN_CMP_MASK  0x000000000000FFFFULL /**< L2L3
> > header mask. */
> > +#define TX_VLAN_CMP_MASK       0x00000000FFFF0000ULL /**< Vlan
> mask.
> > */
> > +#define TX_TCP_LEN_CMP_MASK    0x000000FF00000000ULL /**< TCP
> > header mask. */
> > +#define TX_TSO_MSS_CMP_MASK    0x00FFFF0000000000ULL /**< TSO
> > segsz mask. */
> > +/** Mac + IP + TCP + Mss mask. */
> > +#define TX_TSO_CMP_MASK        \
> > +	(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK |
> > TX_TSO_MSS_CMP_MASK)
> > +
> > +/**
> > + * Structure to check if new context need be built  */ struct
> > +igc_advctx_info {
> > +	uint64_t flags;           /**< ol_flags related to context build. */
> > +	/** tx offload: vlan, tso, l2-l3-l4 lengths. */
> > +	union igc_tx_offload tx_offload;
> > +	/** compare mask for tx offload. */
> > +	union igc_tx_offload tx_offload_mask; };
> > +
> > +/**
> > + * Hardware context number
> > + */
> > +enum {
> > +	IGC_CTX_0    = 0, /**< CTX0    */
> > +	IGC_CTX_1    = 1, /**< CTX1    */
> > +	IGC_CTX_NUM  = 2, /**< CTX_NUM */
> > +};
> > +
> > +/**
> > + * Structure associated with each descriptor of the TX ring of a TX queue.
> > + */
> > +struct igc_tx_entry {
> > +	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
> > +	uint16_t next_id; /**< Index of next descriptor in ring. */
> > +	uint16_t last_id; /**< Index of last scattered descriptor. */ };
> > +
> > +/**
> > + * Structure associated with each TX queue.
> > + */
> > +struct igc_tx_queue {
> > +	volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
> > +	uint64_t               tx_ring_phys_addr; /**< TX ring DMA address.
> */
> > +	struct igc_tx_entry    *sw_ring; /**< virtual address of SW ring. */
> > +	volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
> > +	uint32_t               txd_type;      /**< Device-specific TXD type
> */
> > +	uint16_t               nb_tx_desc;    /**< number of TX descriptors.
> */
> > +	uint16_t               tx_tail;  /**< Current value of TDT register. */
> > +	uint16_t               tx_head;
> > +	/**< Index of first used TX descriptor. */
> > +	uint16_t               queue_id; /**< TX queue index. */
> > +	uint16_t               reg_idx;  /**< TX queue register index. */
> > +	uint16_t               port_id;  /**< Device port identifier. */
> > +	uint8_t                pthresh;  /**< Prefetch threshold register. */
> > +	uint8_t                hthresh;  /**< Host threshold register. */
> > +	uint8_t                wthresh;  /**< Write-back threshold register.
> */
> > +	uint8_t                ctx_curr;
> > +
> > +	/**< Start context position for transmit queue. */
> > +	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
> > +	/**< Hardware context history.*/
> > +	uint64_t               offloads; /**< offloads of
> RTE_ETH_TX_OFFLOAD_*
> > */
> > +};
> > +
> >  /*
> >   * RX/TX function prototypes
> >   */
> > --
> > 2.9.5


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 0/3] net/igc: support PTP timesync
  2022-12-20  3:41 [PATCH 0/3] net/igc: support PTP timesync Simei Su
                   ` (2 preceding siblings ...)
  2022-12-20  3:41 ` [PATCH 3/3] net/igc: support IEEE 1588 PTP Simei Su
@ 2023-01-17 13:26 ` Simei Su
  2023-01-17 13:26   ` [PATCH v2 1/3] net/igc: code refactoring Simei Su
                     ` (3 more replies)
  3 siblings, 4 replies; 11+ messages in thread
From: Simei Su @ 2023-01-17 13:26 UTC (permalink / raw)
  To: qi.z.zhang, junfeng.guo; +Cc: dev, wenjun1.wu, Simei Su

[PATCH v2 1/3] code refactoring.
[PATCH v2 2/3] add related definitions for ptp timesync.
[PATCH v2 3/3] add IEEE1588 API to support timesync.

v2:
* Refine commit log.
* Update the doc/guides/nics/features/igc.ini to add "Timesync" feature.
* Add release notes.

Simei Su (3):
  net/igc: code refactoring
  net/igc/base: support PTP timesync
  net/igc: support IEEE 1588 PTP

 doc/guides/nics/features/igc.ini       |   1 +
 doc/guides/rel_notes/release_23_03.rst |   3 +
 drivers/net/igc/base/igc_defines.h     |  11 ++
 drivers/net/igc/igc_ethdev.c           | 222 +++++++++++++++++++++++++++++++++
 drivers/net/igc/igc_ethdev.h           |   4 +-
 drivers/net/igc/igc_txrx.c             | 166 +++++++-----------------
 drivers/net/igc/igc_txrx.h             | 116 +++++++++++++++++
 7 files changed, 401 insertions(+), 122 deletions(-)

-- 
2.9.5


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 1/3] net/igc: code refactoring
  2023-01-17 13:26 ` [PATCH v2 0/3] net/igc: support PTP timesync Simei Su
@ 2023-01-17 13:26   ` Simei Su
  2023-01-17 13:26   ` [PATCH v2 2/3] net/igc/base: support PTP timesync Simei Su
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 11+ messages in thread
From: Simei Su @ 2023-01-17 13:26 UTC (permalink / raw)
  To: qi.z.zhang, junfeng.guo; +Cc: dev, wenjun1.wu, Simei Su

This patch moves some structures from rxtx.c to rxtx.h for the
timesync enabling feature. For example, variables in "igc_rx_queue"
structure can be used by variables both in igc_ethdev.c and igc_txrx.c
more conveniently. It is also consistent with other PMD coding styles.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/igc/igc_txrx.c | 118 ---------------------------------------------
 drivers/net/igc/igc_txrx.h | 115 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 115 insertions(+), 118 deletions(-)

diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index ffd219b..c462e91 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -93,124 +93,6 @@
 
 #define IGC_TX_OFFLOAD_NOTSUP_MASK (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
 
-/**
- * Structure associated with each descriptor of the RX ring of a RX queue.
- */
-struct igc_rx_entry {
-	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
-};
-
-/**
- * Structure associated with each RX queue.
- */
-struct igc_rx_queue {
-	struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
-	volatile union igc_adv_rx_desc *rx_ring;
-	/**< RX ring virtual address. */
-	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
-	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
-	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
-	struct igc_rx_entry *sw_ring;   /**< address of RX software ring. */
-	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
-	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
-	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
-	uint16_t            rx_tail;    /**< current value of RDT register. */
-	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
-	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
-	uint16_t            queue_id;   /**< RX queue index. */
-	uint16_t            reg_idx;    /**< RX queue register index. */
-	uint16_t            port_id;    /**< Device port identifier. */
-	uint8_t             pthresh;    /**< Prefetch threshold register. */
-	uint8_t             hthresh;    /**< Host threshold register. */
-	uint8_t             wthresh;    /**< Write-back threshold register. */
-	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
-	uint8_t             drop_en;	/**< If not 0, set SRRCTL.Drop_En. */
-	uint32_t            flags;      /**< RX flags. */
-	uint64_t	    offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
-};
-
-/** Offload features */
-union igc_tx_offload {
-	uint64_t data;
-	struct {
-		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
-		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
-		uint64_t vlan_tci:16;
-		/**< VLAN Tag Control Identifier(CPU order). */
-		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
-		uint64_t tso_segsz:16; /**< TCP TSO segment size. */
-		/* uint64_t unused:8; */
-	};
-};
-
-/*
- * Compare mask for igc_tx_offload.data,
- * should be in sync with igc_tx_offload layout.
- */
-#define TX_MACIP_LEN_CMP_MASK	0x000000000000FFFFULL /**< L2L3 header mask. */
-#define TX_VLAN_CMP_MASK	0x00000000FFFF0000ULL /**< Vlan mask. */
-#define TX_TCP_LEN_CMP_MASK	0x000000FF00000000ULL /**< TCP header mask. */
-#define TX_TSO_MSS_CMP_MASK	0x00FFFF0000000000ULL /**< TSO segsz mask. */
-/** Mac + IP + TCP + Mss mask. */
-#define TX_TSO_CMP_MASK	\
-	(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
-
-/**
- * Structure to check if new context need be built
- */
-struct igc_advctx_info {
-	uint64_t flags;           /**< ol_flags related to context build. */
-	/** tx offload: vlan, tso, l2-l3-l4 lengths. */
-	union igc_tx_offload tx_offload;
-	/** compare mask for tx offload. */
-	union igc_tx_offload tx_offload_mask;
-};
-
-/**
- * Hardware context number
- */
-enum {
-	IGC_CTX_0    = 0, /**< CTX0    */
-	IGC_CTX_1    = 1, /**< CTX1    */
-	IGC_CTX_NUM  = 2, /**< CTX_NUM */
-};
-
-/**
- * Structure associated with each descriptor of the TX ring of a TX queue.
- */
-struct igc_tx_entry {
-	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
-	uint16_t next_id; /**< Index of next descriptor in ring. */
-	uint16_t last_id; /**< Index of last scattered descriptor. */
-};
-
-/**
- * Structure associated with each TX queue.
- */
-struct igc_tx_queue {
-	volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
-	uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
-	struct igc_tx_entry    *sw_ring; /**< virtual address of SW ring. */
-	volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
-	uint32_t               txd_type;      /**< Device-specific TXD type */
-	uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
-	uint16_t               tx_tail;  /**< Current value of TDT register. */
-	uint16_t               tx_head;
-	/**< Index of first used TX descriptor. */
-	uint16_t               queue_id; /**< TX queue index. */
-	uint16_t               reg_idx;  /**< TX queue register index. */
-	uint16_t               port_id;  /**< Device port identifier. */
-	uint8_t                pthresh;  /**< Prefetch threshold register. */
-	uint8_t                hthresh;  /**< Host threshold register. */
-	uint8_t                wthresh;  /**< Write-back threshold register. */
-	uint8_t                ctx_curr;
-
-	/**< Start context position for transmit queue. */
-	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
-	/**< Hardware context history.*/
-	uint64_t	       offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
-};
-
 static inline uint64_t
 rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)
 {
diff --git a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h
index 02a0a05..5731761 100644
--- a/drivers/net/igc/igc_txrx.h
+++ b/drivers/net/igc/igc_txrx.h
@@ -11,6 +11,121 @@
 extern "C" {
 #endif
 
+struct igc_rx_entry {
+	struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct igc_rx_queue {
+	struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
+	volatile union igc_adv_rx_desc *rx_ring;
+	/**< RX ring virtual address. */
+	uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
+	volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
+	volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
+	struct igc_rx_entry *sw_ring;   /**< address of RX software ring. */
+	struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+	struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
+	uint16_t            nb_rx_desc; /**< number of RX descriptors. */
+	uint16_t            rx_tail;    /**< current value of RDT register. */
+	uint16_t            nb_rx_hold; /**< number of held free RX desc. */
+	uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
+	uint16_t            queue_id;   /**< RX queue index. */
+	uint16_t            reg_idx;    /**< RX queue register index. */
+	uint16_t            port_id;    /**< Device port identifier. */
+	uint8_t             pthresh;    /**< Prefetch threshold register. */
+	uint8_t             hthresh;    /**< Host threshold register. */
+	uint8_t             wthresh;    /**< Write-back threshold register. */
+	uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
+	uint8_t             drop_en;    /**< If not 0, set SRRCTL.Drop_En. */
+	uint32_t            flags;      /**< RX flags. */
+	uint64_t            offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
+};
+
+/** Offload features */
+union igc_tx_offload {
+	uint64_t data;
+	struct {
+		uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+		uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
+		uint64_t vlan_tci:16;
+		/**< VLAN Tag Control Identifier(CPU order). */
+		uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+		uint64_t tso_segsz:16; /**< TCP TSO segment size. */
+		/* uint64_t unused:8; */
+	};
+};
+
+/**
+ * Compare mask for igc_tx_offload.data,
+ * should be in sync with igc_tx_offload layout.
+ */
+#define TX_MACIP_LEN_CMP_MASK  0x000000000000FFFFULL /**< L2L3 header mask. */
+#define TX_VLAN_CMP_MASK       0x00000000FFFF0000ULL /**< Vlan mask. */
+#define TX_TCP_LEN_CMP_MASK    0x000000FF00000000ULL /**< TCP header mask. */
+#define TX_TSO_MSS_CMP_MASK    0x00FFFF0000000000ULL /**< TSO segsz mask. */
+/** Mac + IP + TCP + Mss mask. */
+#define TX_TSO_CMP_MASK        \
+	(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
+
+/**
+ * Structure to check if new context need be built
+ */
+struct igc_advctx_info {
+	uint64_t flags;           /**< ol_flags related to context build. */
+	/** tx offload: vlan, tso, l2-l3-l4 lengths. */
+	union igc_tx_offload tx_offload;
+	/** compare mask for tx offload. */
+	union igc_tx_offload tx_offload_mask;
+};
+
+/**
+ * Hardware context number
+ */
+enum {
+	IGC_CTX_0    = 0, /**< CTX0    */
+	IGC_CTX_1    = 1, /**< CTX1    */
+	IGC_CTX_NUM  = 2, /**< CTX_NUM */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct igc_tx_entry {
+	struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+	uint16_t next_id; /**< Index of next descriptor in ring. */
+	uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct igc_tx_queue {
+	volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
+	uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
+	struct igc_tx_entry    *sw_ring; /**< virtual address of SW ring. */
+	volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
+	uint32_t               txd_type;      /**< Device-specific TXD type */
+	uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
+	uint16_t               tx_tail;  /**< Current value of TDT register. */
+	uint16_t               tx_head;
+	/**< Index of first used TX descriptor. */
+	uint16_t               queue_id; /**< TX queue index. */
+	uint16_t               reg_idx;  /**< TX queue register index. */
+	uint16_t               port_id;  /**< Device port identifier. */
+	uint8_t                pthresh;  /**< Prefetch threshold register. */
+	uint8_t                hthresh;  /**< Host threshold register. */
+	uint8_t                wthresh;  /**< Write-back threshold register. */
+	uint8_t                ctx_curr;
+
+	/**< Start context position for transmit queue. */
+	struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
+	/**< Hardware context history.*/
+	uint64_t               offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
+};
+
 /*
  * RX/TX function prototypes
  */
-- 
2.9.5


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 2/3] net/igc/base: support PTP timesync
  2023-01-17 13:26 ` [PATCH v2 0/3] net/igc: support PTP timesync Simei Su
  2023-01-17 13:26   ` [PATCH v2 1/3] net/igc: code refactoring Simei Su
@ 2023-01-17 13:26   ` Simei Su
  2023-01-17 13:26   ` [PATCH v2 3/3] net/igc: support IEEE 1588 PTP Simei Su
  2023-01-29 13:05   ` [PATCH v2 0/3] net/igc: support PTP timesync Zhang, Qi Z
  3 siblings, 0 replies; 11+ messages in thread
From: Simei Su @ 2023-01-17 13:26 UTC (permalink / raw)
  To: qi.z.zhang, junfeng.guo; +Cc: dev, wenjun1.wu, Simei Su

Add definitions for timesync enabling.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/igc/base/igc_defines.h | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/drivers/net/igc/base/igc_defines.h b/drivers/net/igc/base/igc_defines.h
index 61964bc..dd7330a 100644
--- a/drivers/net/igc/base/igc_defines.h
+++ b/drivers/net/igc/base/igc_defines.h
@@ -795,6 +795,17 @@
 
 #define TSYNC_INTERRUPTS	TSINTR_TXTS
 
+/* Split Replication Receive Control */
+#define IGC_SRRCTL_TIMESTAMP           0x40000000
+#define IGC_SRRCTL_TIMER1SEL(timer)    (((timer) & 0x3) << 14)
+#define IGC_SRRCTL_TIMER0SEL(timer)    (((timer) & 0x3) << 17)
+
+/* Sample RX tstamp in PHY sop */
+#define IGC_TSYNCRXCTL_RXSYNSIG         0x00000400
+
+/* Sample TX tstamp in PHY sop */
+#define IGC_TSYNCTXCTL_TXSYNSIG         0x00000020
+
 /* TSAUXC Configuration Bits */
 #define TSAUXC_EN_TT0	(1 << 0)  /* Enable target time 0. */
 #define TSAUXC_EN_TT1	(1 << 1)  /* Enable target time 1. */
-- 
2.9.5


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v2 3/3] net/igc: support IEEE 1588 PTP
  2023-01-17 13:26 ` [PATCH v2 0/3] net/igc: support PTP timesync Simei Su
  2023-01-17 13:26   ` [PATCH v2 1/3] net/igc: code refactoring Simei Su
  2023-01-17 13:26   ` [PATCH v2 2/3] net/igc/base: support PTP timesync Simei Su
@ 2023-01-17 13:26   ` Simei Su
  2023-01-29 13:05   ` [PATCH v2 0/3] net/igc: support PTP timesync Zhang, Qi Z
  3 siblings, 0 replies; 11+ messages in thread
From: Simei Su @ 2023-01-17 13:26 UTC (permalink / raw)
  To: qi.z.zhang, junfeng.guo; +Cc: dev, wenjun1.wu, Simei Su

Add igc support for new ethdev APIs to enable/disable and read/write/adjust
IEEE1588 PTP timestamps.

The example command for running ptpclient is as below:
./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1

Signed-off-by: Simei Su <simei.su@intel.com>
---
 doc/guides/nics/features/igc.ini       |   1 +
 doc/guides/rel_notes/release_23_03.rst |   3 +
 drivers/net/igc/igc_ethdev.c           | 222 +++++++++++++++++++++++++++++++++
 drivers/net/igc/igc_ethdev.h           |   4 +-
 drivers/net/igc/igc_txrx.c             |  50 +++++++-
 drivers/net/igc/igc_txrx.h             |   1 +
 6 files changed, 276 insertions(+), 5 deletions(-)

diff --git a/doc/guides/nics/features/igc.ini b/doc/guides/nics/features/igc.ini
index b5deea3..25a997c 100644
--- a/doc/guides/nics/features/igc.ini
+++ b/doc/guides/nics/features/igc.ini
@@ -33,6 +33,7 @@ VLAN filter          = Y
 VLAN offload         = Y
 Linux                = Y
 x86-64               = Y
+Timesync             = Y
 
 [rte_flow items]
 eth                  = P
diff --git a/doc/guides/rel_notes/release_23_03.rst b/doc/guides/rel_notes/release_23_03.rst
index b8c5b68..6e086a2 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -55,6 +55,9 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel igc driver.**
+
+  * Added timesync API support.
 
 Removed Items
 -------------
diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index dcd262f..ef3346b 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -78,6 +78,16 @@
 #define IGC_ALARM_INTERVAL	8000000u
 /* us, about 13.6s some per-queue registers will wrap around back to 0. */
 
+/* Transmit and receive latency (for PTP timestamps) */
+#define IGC_I225_TX_LATENCY_10		240
+#define IGC_I225_TX_LATENCY_100		58
+#define IGC_I225_TX_LATENCY_1000	80
+#define IGC_I225_TX_LATENCY_2500	1325
+#define IGC_I225_RX_LATENCY_10		6450
+#define IGC_I225_RX_LATENCY_100		185
+#define IGC_I225_RX_LATENCY_1000	300
+#define IGC_I225_RX_LATENCY_2500	1485
+
 static const struct rte_eth_desc_lim rx_desc_lim = {
 	.nb_max = IGC_MAX_RXD,
 	.nb_min = IGC_MIN_RXD,
@@ -245,6 +255,18 @@ eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 		      enum rte_vlan_type vlan_type, uint16_t tpid);
+static int eth_igc_timesync_enable(struct rte_eth_dev *dev);
+static int eth_igc_timesync_disable(struct rte_eth_dev *dev);
+static int eth_igc_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp,
+					  uint32_t flags);
+static int eth_igc_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp);
+static int eth_igc_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int eth_igc_timesync_read_time(struct rte_eth_dev *dev,
+				  struct timespec *timestamp);
+static int eth_igc_timesync_write_time(struct rte_eth_dev *dev,
+				   const struct timespec *timestamp);
 
 static const struct eth_dev_ops eth_igc_ops = {
 	.dev_configure		= eth_igc_configure,
@@ -298,6 +320,13 @@ static const struct eth_dev_ops eth_igc_ops = {
 	.vlan_tpid_set		= eth_igc_vlan_tpid_set,
 	.vlan_strip_queue_set	= eth_igc_vlan_strip_queue_set,
 	.flow_ops_get		= eth_igc_flow_ops_get,
+	.timesync_enable	= eth_igc_timesync_enable,
+	.timesync_disable	= eth_igc_timesync_disable,
+	.timesync_read_rx_timestamp = eth_igc_timesync_read_rx_timestamp,
+	.timesync_read_tx_timestamp = eth_igc_timesync_read_tx_timestamp,
+	.timesync_adjust_time	= eth_igc_timesync_adjust_time,
+	.timesync_read_time	= eth_igc_timesync_read_time,
+	.timesync_write_time	= eth_igc_timesync_write_time,
 };
 
 /*
@@ -2582,6 +2611,199 @@ eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 }
 
 static int
+eth_igc_timesync_enable(struct rte_eth_dev *dev)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+	struct timespec system_time;
+	struct igc_rx_queue *rxq;
+	uint32_t val;
+	uint16_t i;
+
+	IGC_WRITE_REG(hw, IGC_TSAUXC, 0x0);
+
+	clock_gettime(CLOCK_REALTIME, &system_time);
+	IGC_WRITE_REG(hw, IGC_SYSTIML, system_time.tv_nsec);
+	IGC_WRITE_REG(hw, IGC_SYSTIMH, system_time.tv_sec);
+
+	/* Enable timestamping of received PTP packets. */
+	val = IGC_READ_REG(hw, IGC_RXPBS);
+	val |= IGC_RXPBS_CFG_TS_EN;
+	IGC_WRITE_REG(hw, IGC_RXPBS, val);
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		val = IGC_READ_REG(hw, IGC_SRRCTL(i));
+		/* For now, only support retrieving Rx timestamp from timer0. */
+		val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) |
+		       IGC_SRRCTL_TIMESTAMP;
+		IGC_WRITE_REG(hw, IGC_SRRCTL(i), val);
+	}
+
+	val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL |
+	      IGC_TSYNCRXCTL_RXSYNSIG;
+	IGC_WRITE_REG(hw, IGC_TSYNCRXCTL, val);
+
+	/* Enable Timestamping of transmitted PTP packets. */
+	IGC_WRITE_REG(hw, IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED |
+		      IGC_TSYNCTXCTL_TXSYNSIG);
+
+	/* Read TXSTMP registers to discard any timestamp previously stored. */
+	IGC_READ_REG(hw, IGC_TXSTMPL);
+	IGC_READ_REG(hw, IGC_TXSTMPH);
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		rxq->offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+	}
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+	ts->tv_nsec = IGC_READ_REG(hw, IGC_SYSTIML);
+	ts->tv_sec = IGC_READ_REG(hw, IGC_SYSTIMH);
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+
+	IGC_WRITE_REG(hw, IGC_SYSTIML, ts->tv_nsec);
+	IGC_WRITE_REG(hw, IGC_SYSTIMH, ts->tv_sec);
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+	uint32_t nsec, sec;
+	uint64_t systime, ns;
+	struct timespec ts;
+
+	nsec = (uint64_t)IGC_READ_REG(hw, IGC_SYSTIML);
+	sec = (uint64_t)IGC_READ_REG(hw, IGC_SYSTIMH);
+	systime = sec * NSEC_PER_SEC + nsec;
+
+	ns = systime + delta;
+	ts = rte_ns_to_timespec(ns);
+
+	IGC_WRITE_REG(hw, IGC_SYSTIML, ts.tv_nsec);
+	IGC_WRITE_REG(hw, IGC_SYSTIMH, ts.tv_sec);
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_read_rx_timestamp(__rte_unused struct rte_eth_dev *dev,
+			       struct timespec *timestamp,
+			       uint32_t flags)
+{
+	struct rte_eth_link link;
+	int adjust = 0;
+	struct igc_rx_queue *rxq;
+	uint64_t rx_timestamp;
+
+	/* Get current link speed. */
+	eth_igc_link_update(dev, 1);
+	rte_eth_linkstatus_get(dev, &link);
+
+	switch (link.link_speed) {
+	case SPEED_10:
+		adjust = IGC_I225_RX_LATENCY_10;
+		break;
+	case SPEED_100:
+		adjust = IGC_I225_RX_LATENCY_100;
+		break;
+	case SPEED_1000:
+		adjust = IGC_I225_RX_LATENCY_1000;
+		break;
+	case SPEED_2500:
+		adjust = IGC_I225_RX_LATENCY_2500;
+		break;
+	}
+
+	rxq = dev->data->rx_queues[flags];
+	rx_timestamp = rxq->rx_timestamp - adjust;
+	*timestamp = rte_ns_to_timespec(rx_timestamp);
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+			       struct timespec *timestamp)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+	struct rte_eth_link link;
+	uint32_t val, nsec, sec;
+	uint64_t tx_timestamp;
+	int adjust = 0;
+
+	val = IGC_READ_REG(hw, IGC_TSYNCTXCTL);
+	if (!(val & IGC_TSYNCTXCTL_VALID))
+		return -EINVAL;
+
+	nsec = (uint64_t)IGC_READ_REG(hw, IGC_TXSTMPL);
+	sec = (uint64_t)IGC_READ_REG(hw, IGC_TXSTMPH);
+	tx_timestamp = sec * NSEC_PER_SEC + nsec;
+
+	/* Get current link speed. */
+	eth_igc_link_update(dev, 1);
+	rte_eth_linkstatus_get(dev, &link);
+
+	switch (link.link_speed) {
+	case SPEED_10:
+		adjust = IGC_I225_TX_LATENCY_10;
+		break;
+	case SPEED_100:
+		adjust = IGC_I225_TX_LATENCY_100;
+		break;
+	case SPEED_1000:
+		adjust = IGC_I225_TX_LATENCY_1000;
+		break;
+	case SPEED_2500:
+		adjust = IGC_I225_TX_LATENCY_2500;
+		break;
+	}
+
+	tx_timestamp += adjust;
+	*timestamp = rte_ns_to_timespec(tx_timestamp);
+
+	return 0;
+}
+
+static int
+eth_igc_timesync_disable(struct rte_eth_dev *dev)
+{
+	struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+	uint32_t val;
+
+	/* Disable timestamping of transmitted PTP packets. */
+	IGC_WRITE_REG(hw, IGC_TSYNCTXCTL, 0);
+
+	/* Disable timestamping of received PTP packets. */
+	IGC_WRITE_REG(hw, IGC_TSYNCRXCTL, 0);
+
+	val = IGC_READ_REG(hw, IGC_RXPBS);
+	val &= IGC_RXPBS_CFG_TS_EN;
+	IGC_WRITE_REG(hw, IGC_RXPBS, val);
+
+	val = IGC_READ_REG(hw, IGC_SRRCTL(0));
+	val &= ~IGC_SRRCTL_TIMESTAMP;
+	IGC_WRITE_REG(hw, IGC_SRRCTL(0), val);
+
+	return 0;
+}
+
+static int
 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/igc/igc_ethdev.h b/drivers/net/igc/igc_ethdev.h
index f56cad7..237d3c1 100644
--- a/drivers/net/igc/igc_ethdev.h
+++ b/drivers/net/igc/igc_ethdev.h
@@ -7,6 +7,7 @@
 
 #include <rte_ethdev.h>
 #include <rte_flow.h>
+#include <rte_time.h>
 
 #include "base/igc_osdep.h"
 #include "base/igc_hw.h"
@@ -75,7 +76,8 @@ extern "C" {
 	RTE_ETH_RX_OFFLOAD_SCTP_CKSUM  | \
 	RTE_ETH_RX_OFFLOAD_KEEP_CRC    | \
 	RTE_ETH_RX_OFFLOAD_SCATTER     | \
-	RTE_ETH_RX_OFFLOAD_RSS_HASH)
+	RTE_ETH_RX_OFFLOAD_RSS_HASH    | \
+	RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 
 #define IGC_TX_OFFLOAD_ALL	(    \
 	RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \
diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index c462e91..0236c7f 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -81,7 +81,8 @@
 		RTE_MBUF_F_TX_IP_CKSUM |	\
 		RTE_MBUF_F_TX_L4_MASK |	\
 		RTE_MBUF_F_TX_TCP_SEG |	\
-		RTE_MBUF_F_TX_UDP_SEG)
+		RTE_MBUF_F_TX_UDP_SEG | \
+		RTE_MBUF_F_TX_IEEE1588_TMST)
 
 #define IGC_TX_OFFLOAD_SEG	(RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)
 
@@ -93,6 +94,8 @@
 
 #define IGC_TX_OFFLOAD_NOTSUP_MASK (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
 
+#define IGC_TS_HDR_LEN 16
+
 static inline uint64_t
 rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)
 {
@@ -222,6 +225,9 @@ rx_desc_get_pkt_info(struct igc_rx_queue *rxq, struct rte_mbuf *rxm,
 
 	pkt_flags |= rx_desc_statuserr_to_pkt_flags(staterr);
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		pkt_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+
 	rxm->ol_flags = pkt_flags;
 	pkt_info = rte_le_to_cpu_16(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
 	rxm->packet_type = rx_desc_pkt_info_to_pkt_type(pkt_info);
@@ -328,8 +334,15 @@ igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		rxm = rxe->mbuf;
 		rxe->mbuf = nmb;
 		rxdp->read.hdr_addr = 0;
-		rxdp->read.pkt_addr =
+
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+			rxdp->read.pkt_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)) -
+			IGC_TS_HDR_LEN;
+		else
+			rxdp->read.pkt_addr =
 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+
 		rxm->next = NULL;
 
 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
@@ -340,6 +353,14 @@ igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
 		rx_desc_get_pkt_info(rxq, rxm, &rxd, staterr);
 
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			uint32_t *ts = rte_pktmbuf_mtod_offset(rxm,
+					uint32_t *, -IGC_TS_HDR_LEN);
+			rxq->rx_timestamp = (uint64_t)ts[3] * NSEC_PER_SEC +
+					ts[2];
+			rxm->timesync = rxq->queue_id;
+		}
+
 		/*
 		 * Store the mbuf address into the next entry of the array
 		 * of returned packets.
@@ -472,8 +493,15 @@ igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		rxm = rxe->mbuf;
 		rxe->mbuf = nmb;
 		rxdp->read.hdr_addr = 0;
-		rxdp->read.pkt_addr =
+
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+			rxdp->read.pkt_addr =
+			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)) -
+				IGC_TS_HDR_LEN;
+		else
+			rxdp->read.pkt_addr =
 			rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
+
 		rxm->next = NULL;
 
 		/*
@@ -537,6 +565,14 @@ igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 		rx_desc_get_pkt_info(rxq, first_seg, &rxd, staterr);
 
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+			uint32_t *ts = rte_pktmbuf_mtod_offset(first_seg,
+					uint32_t *, -IGC_TS_HDR_LEN);
+			rxq->rx_timestamp = (uint64_t)ts[3] * NSEC_PER_SEC +
+					ts[2];
+			rxm->timesync = rxq->queue_id;
+		}
+
 		/*
 		 * Store the mbuf address into the next entry of the array
 		 * of returned packets.
@@ -682,7 +718,10 @@ igc_alloc_rx_queue_mbufs(struct igc_rx_queue *rxq)
 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
 		rxd = &rxq->rx_ring[i];
 		rxd->read.hdr_addr = 0;
-		rxd->read.pkt_addr = dma_addr;
+		if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+			rxd->read.pkt_addr = dma_addr - IGC_TS_HDR_LEN;
+		else
+			rxd->read.pkt_addr = dma_addr;
 		rxe[i].mbuf = mbuf;
 	}
 
@@ -985,6 +1024,9 @@ igc_rx_init(struct rte_eth_dev *dev)
 		rxq = dev->data->rx_queues[i];
 		rxq->flags = 0;
 
+		if (offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+			rxq->offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
 		/* Allocate buffers for descriptor rings and set up queue */
 		ret = igc_alloc_rx_queue_mbufs(rxq);
 		if (ret)
diff --git a/drivers/net/igc/igc_txrx.h b/drivers/net/igc/igc_txrx.h
index 5731761..e7272f8 100644
--- a/drivers/net/igc/igc_txrx.h
+++ b/drivers/net/igc/igc_txrx.h
@@ -42,6 +42,7 @@ struct igc_rx_queue {
 	uint8_t             drop_en;    /**< If not 0, set SRRCTL.Drop_En. */
 	uint32_t            flags;      /**< RX flags. */
 	uint64_t            offloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
+	uint64_t            rx_timestamp;
 };
 
 /** Offload features */
-- 
2.9.5


^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [PATCH v2 0/3] net/igc: support PTP timesync
  2023-01-17 13:26 ` [PATCH v2 0/3] net/igc: support PTP timesync Simei Su
                     ` (2 preceding siblings ...)
  2023-01-17 13:26   ` [PATCH v2 3/3] net/igc: support IEEE 1588 PTP Simei Su
@ 2023-01-29 13:05   ` Zhang, Qi Z
  3 siblings, 0 replies; 11+ messages in thread
From: Zhang, Qi Z @ 2023-01-29 13:05 UTC (permalink / raw)
  To: Su, Simei, Guo, Junfeng; +Cc: dev, Wu, Wenjun1



> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Tuesday, January 17, 2023 9:26 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Guo, Junfeng
> <junfeng.guo@intel.com>
> Cc: dev@dpdk.org; Wu, Wenjun1 <wenjun1.wu@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH v2 0/3] net/igc: support PTP timesync
> 
> [PATCH v2 1/3] code refactoring.
> [PATCH v2 2/3] add related definitions for ptp timesync.
> [PATCH v2 3/3] add IEEE1588 API to support timesync.
> 
> v2:
> * Refine commit log.
> * Update the doc/guides/nics/features/igc.ini to add "Timesync" feature.
> * Add release notes.
> 
> Simei Su (3):
>   net/igc: code refactoring
>   net/igc/base: support PTP timesync
>   net/igc: support IEEE 1588 PTP
> 
>  doc/guides/nics/features/igc.ini       |   1 +
>  doc/guides/rel_notes/release_23_03.rst |   3 +
>  drivers/net/igc/base/igc_defines.h     |  11 ++
>  drivers/net/igc/igc_ethdev.c           | 222
> +++++++++++++++++++++++++++++++++
>  drivers/net/igc/igc_ethdev.h           |   4 +-
>  drivers/net/igc/igc_txrx.c             | 166 +++++++-----------------
>  drivers/net/igc/igc_txrx.h             | 116 +++++++++++++++++
>  7 files changed, 401 insertions(+), 122 deletions(-)
> 
> --
> 2.9.5

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Refined PATCH 2/3 's title and commit log as below

net/igc/base: expose timesync registers

Add definitions for timesync related registers.

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2023-01-29 13:05 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-20  3:41 [PATCH 0/3] net/igc: support PTP timesync Simei Su
2022-12-20  3:41 ` [PATCH 1/3] net/igc: code refactoring Simei Su
2023-01-17  2:25   ` Zhang, Qi Z
2023-01-17  4:00     ` Su, Simei
2022-12-20  3:41 ` [PATCH 2/3] net/igc/base: support PTP timesync Simei Su
2022-12-20  3:41 ` [PATCH 3/3] net/igc: support IEEE 1588 PTP Simei Su
2023-01-17 13:26 ` [PATCH v2 0/3] net/igc: support PTP timesync Simei Su
2023-01-17 13:26   ` [PATCH v2 1/3] net/igc: code refactoring Simei Su
2023-01-17 13:26   ` [PATCH v2 2/3] net/igc/base: support PTP timesync Simei Su
2023-01-17 13:26   ` [PATCH v2 3/3] net/igc: support IEEE 1588 PTP Simei Su
2023-01-29 13:05   ` [PATCH v2 0/3] net/igc: support PTP timesync Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).