DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v1 0/3] net/iavf: support Rx timestamp on flex descriptor
@ 2022-04-08  2:13 Simei Su
  2022-04-08  2:13 ` [PATCH v1 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
                   ` (3 more replies)
  0 siblings, 4 replies; 14+ messages in thread
From: Simei Su @ 2022-04-08  2:13 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu, Simei Su

[PATCH v1 1/3] add related ops and structure for Rx timestamp in virtual channel.
[PATCH v1 2/3] add support for Rx timestamp on flex descriptor in driver.
[PATCH v1 3/3] improve performance with Rx timestamp enabled.

Simei Su (2):
  common/iavf: support Rx timestamp in virtual channel
  net/iavf: enable Rx timestamp on Flex Descriptor

Wenjun Wu (1):
  net/iavf: improve performance of Rx timestamp offload

 drivers/common/iavf/virtchnl.h          | 62 ++++++++++++++++++++++--
 drivers/net/iavf/iavf.h                 |  6 +++
 drivers/net/iavf/iavf_ethdev.c          | 34 ++++++++++++++
 drivers/net/iavf/iavf_rxtx.c            | 72 ++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h            | 21 +++++++++
 drivers/net/iavf/iavf_rxtx_vec_common.h |  3 ++
 drivers/net/iavf/iavf_vchnl.c           | 83 ++++++++++++++++++++++++++++-----
 7 files changed, 267 insertions(+), 14 deletions(-)

-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v1 1/3] common/iavf: support Rx timestamp in virtual channel
  2022-04-08  2:13 [PATCH v1 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
@ 2022-04-08  2:13 ` Simei Su
  2022-04-08  2:13 ` [PATCH v1 2/3] net/iavf: enable Rx timestamp on Flex Descriptor Simei Su
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 14+ messages in thread
From: Simei Su @ 2022-04-08  2:13 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu, Simei Su

Add new ops and structures to support VF to support Rx timestamp
on flex descriptor.

"VIRTCHNL_OP_1588_PTP_GET_CAPS" ops is sent by the VF to request PTP
capablilities and responded by the PF with capabilities enabled for
that VF.

"VIRTCHNL_OP_1588_PTP_GET_TIME" ops is sent by the VF to request
the current time of the PHC. The PF will respond by reading the
device time and reporting it back to the VF.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/common/iavf/virtchnl.h | 62 ++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 59 insertions(+), 3 deletions(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 3e44eca..d3a99e9 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -159,6 +159,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
 	VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58,
 	VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59,
+	VIRTCHNL_OP_1588_PTP_GET_CAPS = 60,
+	VIRTCHNL_OP_1588_PTP_GET_TIME = 61,
 	VIRTCHNL_OP_GET_QOS_CAPS = 66,
 	VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67,
 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
@@ -274,6 +276,10 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2";
 	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
 		return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2";
+	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+		return "VIRTCHNL_OP_1588_PTP_GET_CAPS";
+	case VIRTCHNL_OP_1588_PTP_GET_TIME:
+		return "VIRTCHNL_OP_1588_PTP_GET_TIME";
 	case VIRTCHNL_OP_MAX:
 		return "VIRTCHNL_OP_MAX";
 	default:
@@ -409,8 +415,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF		BIT(28)
 #define VIRTCHNL_VF_OFFLOAD_QOS		BIT(29)
 #define VIRTCHNL_VF_CAP_DCF			BIT(30)
-	/* BIT(31) is reserved */
-
+#define VIRTCHNL_VF_CAP_PTP			BIT(31)
 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
 			       VIRTCHNL_VF_OFFLOAD_VLAN | \
 			       VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -496,6 +501,18 @@ enum virtchnl_rx_desc_id_bitmasks {
 	/* 22 through 63 are reserved */
 };
 
+/* virtchnl_rxq_info_flags
+ *
+ * Definition of bits in the flags field of the virtchnl_rxq_info structure.
+ */
+enum virtchnl_rxq_info_flags {
+	/* If the VIRTCHNL_PTP_RX_TSTAMP bit of the flag field is set, this is
+	 * a request to enable Rx timestamp. Other flag bits are currently
+	 * reserved and they may be extended in the future.
+	 */
+	VIRTCHNL_PTP_RX_TSTAMP = BIT(0),
+};
+
 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
  * VF sends this message to set up parameters for one RX queue.
  * External data buffer contains one instance of virtchnl_rxq_info.
@@ -524,7 +541,8 @@ struct virtchnl_rxq_info {
 	 * with VIRTCHNL_RXDID_1_32B_BASE.
 	 */
 	u8 rxdid;
-	u8 pad1[2];
+	u8 flags; /* see virtchnl_rxq_info_flags */
+	u8 pad1;
 	u64 dma_ring_addr;
 
 	/* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
@@ -1978,6 +1996,38 @@ struct virtchnl_queue_vector_maps {
 
 VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
 
+#define VIRTCHNL_1588_PTP_CAP_RX_TSTAMP		BIT(1)
+#define VIRTCHNL_1588_PTP_CAP_READ_PHC          BIT(2)
+
+struct virtchnl_phc_regs {
+	u32 clock_hi;
+	u32 clock_lo;
+	u8 pcie_region;
+	u8 rsvd[15];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_phc_regs);
+
+struct virtchnl_ptp_caps {
+	struct virtchnl_phc_regs phc_regs;
+	u32 caps;
+	s32 max_adj;
+	u8 tx_tstamp_idx;
+	u8 n_ext_ts;
+	u8 n_per_out;
+	u8 n_pins;
+	u8 tx_tstamp_format;
+	u8 rsvd[11];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_ptp_caps);
+
+struct virtchnl_phc_time {
+	uint64_t time;
+	uint8_t rsvd[8];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_time);
 
 /* Since VF messages are limited by u16 size, precalculate the maximum possible
  * values of nested elements in virtchnl structures that virtual channel can
@@ -2271,6 +2321,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
 		valid_len = sizeof(struct virtchnl_vlan_setting);
 		break;
+	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+		valid_len = sizeof(struct virtchnl_ptp_caps);
+		break;
+	case VIRTCHNL_OP_1588_PTP_GET_TIME:
+		valid_len = sizeof(struct virtchnl_phc_time);
+		break;
 	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
 	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
 		valid_len = sizeof(struct virtchnl_del_ena_dis_queues);
-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v1 2/3] net/iavf: enable Rx timestamp on Flex Descriptor
  2022-04-08  2:13 [PATCH v1 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
  2022-04-08  2:13 ` [PATCH v1 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
@ 2022-04-08  2:13 ` Simei Su
  2022-04-08  2:13 ` [PATCH v1 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
  2022-04-24  7:08 ` [PATCH v2 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
  3 siblings, 0 replies; 14+ messages in thread
From: Simei Su @ 2022-04-08  2:13 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu, Simei Su

Dump Rx timestamp value into dynamic mbuf field by flex descriptor.
This feature is turned on by dev config "enable-rx-timestamp".
Currently, it's only supported under scalar path.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/net/iavf/iavf.h                 |  5 ++
 drivers/net/iavf/iavf_ethdev.c          | 26 +++++++++++
 drivers/net/iavf/iavf_rxtx.c            | 58 +++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h            | 22 +++++++++
 drivers/net/iavf/iavf_rxtx_vec_common.h |  3 ++
 drivers/net/iavf/iavf_vchnl.c           | 83 ++++++++++++++++++++++++++++-----
 6 files changed, 186 insertions(+), 11 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e..2838b5e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -257,6 +257,8 @@ struct iavf_info {
 	struct iavf_tm_conf tm_conf;
 
 	struct rte_eth_dev *eth_dev;
+
+	uint32_t ptp_caps;
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -300,6 +302,7 @@ struct iavf_adapter {
 	bool stopped;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
+	uint64_t phc_time;
 };
 
 /* IAVF_DEV_PRIVATE_TO */
@@ -460,4 +463,6 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 		uint8_t *msg, size_t msg_len,
 		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
+int iavf_get_ptp_cap(struct iavf_adapter *adapter);
+int iavf_get_phc_time(struct iavf_adapter *adapter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac..704c174 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -35,6 +35,9 @@
 /* devargs */
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
 
+uint64_t iavf_timestamp_dynflag;
+int iavf_timestamp_dynfield_offset = -1;
+
 static const char * const iavf_valid_args[] = {
 	IAVF_PROTO_XTR_ARG,
 	NULL
@@ -685,6 +688,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	struct rte_eth_dev_data *dev_data = dev->data;
 	uint16_t buf_size, max_pkt_len;
 	uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
+	enum iavf_status err;
 
 	buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
 
@@ -703,6 +707,18 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 		return -EINVAL;
 	}
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		/* Register mbuf field and flag for Rx timestamp */
+		err = rte_mbuf_dyn_rx_timestamp_register(
+			&iavf_timestamp_dynfield_offset,
+			&iavf_timestamp_dynflag);
+		if (err) {
+			PMD_DRV_LOG(ERR,
+				"Cannot register mbuf field/flag for timestamp");
+			return -EINVAL;
+		}
+	}
+
 	rxq->max_pkt_len = max_pkt_len;
 	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    rxq->max_pkt_len > buf_size) {
@@ -945,6 +961,13 @@ iavf_dev_start(struct rte_eth_dev *dev)
 			return -1;
 		}
 
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP) {
+		if (iavf_get_ptp_cap(adapter)) {
+			PMD_INIT_LOG(ERR, "Failed to get ptp capability");
+			return -1;
+		}
+	}
+
 	if (iavf_init_queues(dev) != 0) {
 		PMD_DRV_LOG(ERR, "failed to do Queue init");
 		return -1;
@@ -1087,6 +1110,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
 	if (iavf_ipsec_crypto_supported(adapter)) {
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 764218a..ab5b3de 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1429,6 +1429,11 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 	rx_id = rxq->rx_tail;
 	rx_ring = rxq->rx_ring;
 	ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+	struct iavf_adapter *ad = rxq->vsi->adapter;
+	uint64_t ts_ns;
+
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		rxq->hw_register_set = 1;
 
 	while (nb_rx < nb_pkts) {
 		rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
@@ -1491,6 +1496,21 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 				&rxq->stats.ipsec_crypto);
 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+
+		if (iavf_timestamp_dynflag > 0) {
+			if (rxq->hw_register_set)
+				iavf_get_phc_time(ad);
+
+			rxq->hw_register_set = 0;
+			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+			*RTE_MBUF_DYNFIELD(rxm,
+				iavf_timestamp_dynfield_offset,
+				rte_mbuf_timestamp_t *) = ts_ns;
+			rxm->ol_flags |= iavf_timestamp_dynflag;
+		}
+
 		rxm->ol_flags |= pkt_flags;
 
 		rx_pkts[nb_rx++] = rxm;
@@ -1519,11 +1539,16 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 	uint16_t rx_stat_err0;
 	uint64_t dma_addr;
 	uint64_t pkt_flags;
+	struct iavf_adapter *ad = rxq->vsi->adapter;
+	uint64_t ts_ns;
 
 	volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
 	volatile union iavf_rx_flex_desc *rxdp;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		rxq->hw_register_set = 1;
+
 	while (nb_rx < nb_pkts) {
 		rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
@@ -1636,6 +1661,20 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
+		if (iavf_timestamp_dynflag > 0) {
+			if (rxq->hw_register_set)
+				iavf_get_phc_time(ad);
+
+			rxq->hw_register_set = 0;
+			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+			*RTE_MBUF_DYNFIELD(first_seg,
+				iavf_timestamp_dynfield_offset,
+				rte_mbuf_timestamp_t *) = ts_ns;
+			first_seg->ol_flags |= iavf_timestamp_dynflag;
+		}
+
 		first_seg->ol_flags |= pkt_flags;
 
 		/* Prefetch data of first segment, if configured to do so. */
@@ -1828,6 +1867,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 	int32_t i, j, nb_rx = 0;
 	uint64_t pkt_flags;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+	struct iavf_adapter *ad = rxq->vsi->adapter;
+	uint64_t ts_ns;
 
 	rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
 	rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -1838,6 +1879,9 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 	if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
 		return 0;
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		rxq->hw_register_set = 1;
+
 	/* Scan LOOK_AHEAD descriptors at a time to determine which
 	 * descriptors reference packets that are ready to be received.
 	 */
@@ -1896,6 +1940,20 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
 
+			if (iavf_timestamp_dynflag > 0) {
+				if (rxq->hw_register_set)
+					iavf_get_phc_time(ad);
+
+				rxq->hw_register_set = 0;
+				ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+					rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+
+				*RTE_MBUF_DYNFIELD(mb,
+					iavf_timestamp_dynfield_offset,
+					rte_mbuf_timestamp_t *) = ts_ns;
+				mb->ol_flags |= iavf_timestamp_dynflag;
+			}
+
 			mb->ol_flags |= pkt_flags;
 		}
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebb..37453c4 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -72,6 +72,9 @@
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
 
+extern uint64_t iavf_timestamp_dynflag;
+extern int iavf_timestamp_dynfield_offset;
+
 /**
  * Rx Flex Descriptors
  * These descriptors are used instead of the legacy version descriptors
@@ -219,6 +222,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
+	uint32_t hw_register_set;
 };
 
 struct iavf_tx_entry {
@@ -778,6 +782,24 @@ void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
 	}
 }
 
+static inline
+uint64_t iavf_tstamp_convert_32b_64b(uint64_t time, uint32_t in_timestamp)
+{
+	const uint64_t mask = 0xFFFFFFFF;
+	uint32_t delta;
+	uint64_t ns;
+
+	delta = (in_timestamp - (uint32_t)(time & mask));
+	if (delta > (mask / 2)) {
+		delta = ((uint32_t)(time & mask) - in_timestamp);
+		ns = time - delta;
+	} else {
+		ns = time + delta;
+	}
+
+	return ns;
+}
+
 #ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
 #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
 	iavf_dump_rx_descriptor(rxq, desc, rx_id)
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 1fd37b7..a59cb2c 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -231,6 +231,9 @@ iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq)
 	if (rxq->proto_xtr != IAVF_PROTO_XTR_NONE)
 		return -1;
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		return -1;
+
 	if (rxq->offloads & IAVF_RX_VECTOR_OFFLOAD)
 		return IAVF_VECTOR_OFFLOAD_PATH;
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 169e1f2..0d8a8dd 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -502,7 +502,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
 		VIRTCHNL_VF_OFFLOAD_QOS |
-		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO |
+		VIRTCHNL_VF_CAP_PTP;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -1047,16 +1048,21 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 		vc_qp->rxq.crc_disable = rxq[i]->crc_len != 0 ? 1 : 0;
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 		if (vf->vf_res->vf_cap_flags &
-		    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
-		    vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
-			vc_qp->rxq.rxdid = rxq[i]->rxdid;
-			PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
-				    vc_qp->rxq.rxdid, i);
-		} else {
-			PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
-				    "request default RXDID[%d] in Queue[%d]",
-				    rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
-			vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
+		    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+			if (vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
+				vc_qp->rxq.rxdid = rxq[i]->rxdid;
+				PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
+					    vc_qp->rxq.rxdid, i);
+			} else {
+				PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
+					    "request default RXDID[%d] in Queue[%d]",
+					    rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
+				vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
+			}
+
+			if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP &&
+			    vf->ptp_caps & VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)
+				vc_qp->rxq.flags |= VIRTCHNL_PTP_RX_TSTAMP;
 		}
 #else
 		if (vf->vf_res->vf_cap_flags &
@@ -1805,3 +1811,58 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 
 	return 0;
 }
+
+int
+iavf_get_ptp_cap(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_ptp_caps ptp_caps;
+	struct iavf_cmd_info args;
+	int err;
+
+	ptp_caps.caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+			VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+	args.ops = VIRTCHNL_OP_1588_PTP_GET_CAPS;
+	args.in_args = (uint8_t *)&ptp_caps;
+	args.in_args_size = sizeof(ptp_caps);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of OP_1588_PTP_GET_CAPS");
+		return err;
+	}
+
+	vf->ptp_caps = ((struct virtchnl_ptp_caps *)args.out_buffer)->caps;
+
+	return 0;
+}
+
+int
+iavf_get_phc_time(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_phc_time phc_time;
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_1588_PTP_GET_TIME;
+	args.in_args = (uint8_t *)&phc_time;
+	args.in_args_size = sizeof(phc_time);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
+		return err;
+	}
+
+	adapter->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
+
+	return 0;
+}
-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v1 3/3] net/iavf: improve performance of Rx timestamp offload
  2022-04-08  2:13 [PATCH v1 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
  2022-04-08  2:13 ` [PATCH v1 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
  2022-04-08  2:13 ` [PATCH v1 2/3] net/iavf: enable Rx timestamp on Flex Descriptor Simei Su
@ 2022-04-08  2:13 ` Simei Su
  2022-04-24  7:08 ` [PATCH v2 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
  3 siblings, 0 replies; 14+ messages in thread
From: Simei Su @ 2022-04-08  2:13 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu

From: Wenjun Wu <wenjun1.wu@intel.com>

In this patch, We use CPU ticks instead of HW register
to determin whether low 32 bits timestamp has turned
over. It can avoid requesting register value frequently
and improve receving performance.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  1 +
 drivers/net/iavf/iavf_ethdev.c |  8 +++++++
 drivers/net/iavf/iavf_rxtx.c   | 50 +++++++++++++++++++++++++++---------------
 drivers/net/iavf/iavf_rxtx.h   |  1 -
 4 files changed, 41 insertions(+), 19 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 2838b5e..ad5c0d4 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -303,6 +303,7 @@ struct iavf_adapter {
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 	uint64_t phc_time;
+	uint64_t hw_time_update;
 };
 
 /* IAVF_DEV_PRIVATE_TO */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 704c174..ffdc368 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1014,6 +1014,14 @@ iavf_dev_start(struct rte_eth_dev *dev)
 		goto err_mac;
 	}
 
+	if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		if (iavf_get_phc_time(adapter)) {
+			PMD_DRV_LOG(ERR, "get physical time failed");
+			goto err_mac;
+		}
+		adapter->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+	}
+
 	return 0;
 
 err_mac:
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index ab5b3de..02ee279 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1432,8 +1432,14 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 	struct iavf_adapter *ad = rxq->vsi->adapter;
 	uint64_t ts_ns;
 
-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
-		rxq->hw_register_set = 1;
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		if (sw_cur_time - ad->hw_time_update > 4) {
+			if (iavf_get_phc_time(ad))
+				PMD_DRV_LOG(ERR, "get physical time failed");
+			ad->hw_time_update = sw_cur_time;
+		}
+	}
 
 	while (nb_rx < nb_pkts) {
 		rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
@@ -1498,13 +1504,12 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
 		if (iavf_timestamp_dynflag > 0) {
-			if (rxq->hw_register_set)
-				iavf_get_phc_time(ad);
-
-			rxq->hw_register_set = 0;
 			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 
+			ad->phc_time = ts_ns;
+			ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
 			*RTE_MBUF_DYNFIELD(rxm,
 				iavf_timestamp_dynfield_offset,
 				rte_mbuf_timestamp_t *) = ts_ns;
@@ -1546,8 +1551,14 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 	volatile union iavf_rx_flex_desc *rxdp;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
-		rxq->hw_register_set = 1;
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		if (sw_cur_time - ad->hw_time_update > 4) {
+			if (iavf_get_phc_time(ad))
+				PMD_DRV_LOG(ERR, "get physical time failed");
+			ad->hw_time_update = sw_cur_time;
+		}
+	}
 
 	while (nb_rx < nb_pkts) {
 		rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
@@ -1662,13 +1673,12 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
 		if (iavf_timestamp_dynflag > 0) {
-			if (rxq->hw_register_set)
-				iavf_get_phc_time(ad);
-
-			rxq->hw_register_set = 0;
 			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 
+			ad->phc_time = ts_ns;
+			ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
 			*RTE_MBUF_DYNFIELD(first_seg,
 				iavf_timestamp_dynfield_offset,
 				rte_mbuf_timestamp_t *) = ts_ns;
@@ -1879,8 +1889,14 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 	if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
 		return 0;
 
-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
-		rxq->hw_register_set = 1;
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		if (sw_cur_time - ad->hw_time_update > 4) {
+			if (iavf_get_phc_time(ad))
+				PMD_DRV_LOG(ERR, "get physical time failed");
+			ad->hw_time_update = sw_cur_time;
+		}
+	}
 
 	/* Scan LOOK_AHEAD descriptors at a time to determine which
 	 * descriptors reference packets that are ready to be received.
@@ -1941,12 +1957,10 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
 
 			if (iavf_timestamp_dynflag > 0) {
-				if (rxq->hw_register_set)
-					iavf_get_phc_time(ad);
-
-				rxq->hw_register_set = 0;
 				ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
 					rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+				ad->phc_time = ts_ns;
+				ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
 
 				*RTE_MBUF_DYNFIELD(mb,
 					iavf_timestamp_dynfield_offset,
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 37453c4..642b9a7 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -222,7 +222,6 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
-	uint32_t hw_register_set;
 };
 
 struct iavf_tx_entry {
-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v2 0/3] net/iavf: support Rx timestamp on flex descriptor
  2022-04-08  2:13 [PATCH v1 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
                   ` (2 preceding siblings ...)
  2022-04-08  2:13 ` [PATCH v1 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
@ 2022-04-24  7:08 ` Simei Su
  2022-04-24  7:08   ` [PATCH v2 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
                     ` (3 more replies)
  3 siblings, 4 replies; 14+ messages in thread
From: Simei Su @ 2022-04-24  7:08 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu, Simei Su

[PATCH v2 1/3] add related ops and structure for Rx timestamp in virtual channel.
[PATCH v2 2/3] add support for Rx timestamp on flex descriptor in driver.
[PATCH v2 3/3] improve performance with Rx timestamp enabled.

v2:
* Add release notes and doc update. 

Simei Su (3):
  common/iavf: support Rx timestamp in virtual channel
  net/iavf: enable Rx timestamp on Flex Descriptor
  net/iavf: improve performance of Rx timestamp offload

 doc/guides/nics/features/iavf.ini       |  1 +
 doc/guides/rel_notes/release_22_07.rst  |  1 +
 drivers/common/iavf/virtchnl.h          | 62 +++++++++++++++++++++++-
 drivers/net/iavf/iavf.h                 |  6 +++
 drivers/net/iavf/iavf_ethdev.c          | 35 ++++++++++++++
 drivers/net/iavf/iavf_rxtx.c            | 73 +++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h            | 21 +++++++++
 drivers/net/iavf/iavf_rxtx_vec_common.h |  3 ++
 drivers/net/iavf/iavf_vchnl.c           | 83 ++++++++++++++++++++++++++++-----
 9 files changed, 272 insertions(+), 13 deletions(-)

-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v2 1/3] common/iavf: support Rx timestamp in virtual channel
  2022-04-24  7:08 ` [PATCH v2 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
@ 2022-04-24  7:08   ` Simei Su
  2022-04-24  7:08   ` [PATCH v2 2/3] net/iavf: enable Rx timestamp on Flex Descriptor Simei Su
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 14+ messages in thread
From: Simei Su @ 2022-04-24  7:08 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu, Simei Su

Add new ops and structures to support VF to support Rx timestamp
on flex descriptor.

"VIRTCHNL_OP_1588_PTP_GET_CAPS" ops is sent by the VF to request PTP
capabilities and responded by the PF with capabilities enabled for
that VF.

"VIRTCHNL_OP_1588_PTP_GET_TIME" ops is sent by the VF to request
the current time of the PHC. The PF will respond by reading the
device time and reporting it back to the VF.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/common/iavf/virtchnl.h | 62 ++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 60 insertions(+), 2 deletions(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 249ae6e..2d49f95 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -159,6 +159,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
 	VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58,
 	VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59,
+	VIRTCHNL_OP_1588_PTP_GET_CAPS = 60,
+	VIRTCHNL_OP_1588_PTP_GET_TIME = 61,
 	VIRTCHNL_OP_GET_QOS_CAPS = 66,
 	VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67,
 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
@@ -276,6 +278,10 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2";
 	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
 		return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2";
+	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+		return "VIRTCHNL_OP_1588_PTP_GET_CAPS";
+	case VIRTCHNL_OP_1588_PTP_GET_TIME:
+		return "VIRTCHNL_OP_1588_PTP_GET_TIME";
 	case VIRTCHNL_OP_MAX:
 		return "VIRTCHNL_OP_MAX";
 	default:
@@ -411,7 +417,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF		BIT(28)
 #define VIRTCHNL_VF_OFFLOAD_QOS		BIT(29)
 #define VIRTCHNL_VF_CAP_DCF			BIT(30)
-	/* BIT(31) is reserved */
+#define VIRTCHNL_VF_CAP_PTP			BIT(31)
 
 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
 			       VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -498,6 +504,18 @@ enum virtchnl_rx_desc_id_bitmasks {
 	/* 22 through 63 are reserved */
 };
 
+/* virtchnl_rxq_info_flags
+ *
+ * Definition of bits in the flags field of the virtchnl_rxq_info structure.
+ */
+enum virtchnl_rxq_info_flags {
+	/* If the VIRTCHNL_PTP_RX_TSTAMP bit of the flag field is set, this is
+	 * a request to enable Rx timestamp. Other flag bits are currently
+	 * reserved and they may be extended in the future.
+	 */
+	VIRTCHNL_PTP_RX_TSTAMP = BIT(0),
+};
+
 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
  * VF sends this message to set up parameters for one RX queue.
  * External data buffer contains one instance of virtchnl_rxq_info.
@@ -526,7 +544,8 @@ struct virtchnl_rxq_info {
 	 * with VIRTCHNL_RXDID_1_32B_BASE.
 	 */
 	u8 rxdid;
-	u8 pad1[2];
+	u8 flags; /* see virtchnl_rxq_info_flags */
+	u8 pad1;
 	u64 dma_ring_addr;
 
 	/* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
@@ -2004,6 +2023,39 @@ struct virtchnl_quanta_cfg {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
 
+#define VIRTCHNL_1588_PTP_CAP_RX_TSTAMP		BIT(1)
+#define VIRTCHNL_1588_PTP_CAP_READ_PHC		BIT(2)
+
+struct virtchnl_phc_regs {
+	u32 clock_hi;
+	u32 clock_lo;
+	u8 pcie_region;
+	u8 rsvd[15];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_phc_regs);
+
+struct virtchnl_ptp_caps {
+	struct virtchnl_phc_regs phc_regs;
+	u32 caps;
+	s32 max_adj;
+	u8 tx_tstamp_idx;
+	u8 n_ext_ts;
+	u8 n_per_out;
+	u8 n_pins;
+	u8 tx_tstamp_format;
+	u8 rsvd[11];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_ptp_caps);
+
+struct virtchnl_phc_time {
+	u64 time;
+	u8 rsvd[8];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_time);
+
 /* Since VF messages are limited by u16 size, precalculate the maximum possible
  * values of nested elements in virtchnl structures that virtual channel can
  * possibly handle in a single message.
@@ -2321,6 +2373,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
 		valid_len = sizeof(struct virtchnl_vlan_setting);
 		break;
+	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+		valid_len = sizeof(struct virtchnl_ptp_caps);
+		break;
+	case VIRTCHNL_OP_1588_PTP_GET_TIME:
+		valid_len = sizeof(struct virtchnl_phc_time);
+		break;
 	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
 	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
 		valid_len = sizeof(struct virtchnl_del_ena_dis_queues);
-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v2 2/3] net/iavf: enable Rx timestamp on Flex Descriptor
  2022-04-24  7:08 ` [PATCH v2 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
  2022-04-24  7:08   ` [PATCH v2 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
@ 2022-04-24  7:08   ` Simei Su
  2022-04-24  7:08   ` [PATCH v2 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
  2022-04-28  8:13   ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
  3 siblings, 0 replies; 14+ messages in thread
From: Simei Su @ 2022-04-24  7:08 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu, Simei Su

Dump Rx timestamp value into dynamic mbuf field by flex descriptor.
This feature is turned on by dev config "enable-rx-timestamp".
Currently, it's only supported under scalar path.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 doc/guides/nics/features/iavf.ini       |  1 +
 doc/guides/rel_notes/release_22_07.rst  |  1 +
 drivers/net/iavf/iavf.h                 |  5 ++
 drivers/net/iavf/iavf_ethdev.c          | 26 +++++++++++
 drivers/net/iavf/iavf_rxtx.c            | 58 +++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h            | 22 +++++++++
 drivers/net/iavf/iavf_rxtx_vec_common.h |  3 ++
 drivers/net/iavf/iavf_vchnl.c           | 83 ++++++++++++++++++++++++++++-----
 8 files changed, 188 insertions(+), 11 deletions(-)

diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
index 01f5142..5a0d9d8 100644
--- a/doc/guides/nics/features/iavf.ini
+++ b/doc/guides/nics/features/iavf.ini
@@ -24,6 +24,7 @@ CRC offload          = Y
 VLAN offload         = Y
 L3 checksum offload  = P
 L4 checksum offload  = P
+Timestamp offload    = P
 Packet type parsing  = Y
 Rx descriptor status = Y
 Tx descriptor status = Y
diff --git a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst
index f1b4057..567f23d 100644
--- a/doc/guides/rel_notes/release_22_07.rst
+++ b/doc/guides/rel_notes/release_22_07.rst
@@ -59,6 +59,7 @@ New Features
 
   * Added Tx QoS queue rate limitation support.
   * Added quanta size configuration support.
+  * Added ``DEV_RX_OFFLOAD_TIMESTAMP`` support.
 
 Removed Items
 -------------
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index c0a4a47..3255c93 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -268,6 +268,8 @@ struct iavf_info {
 	struct iavf_tm_conf tm_conf;
 
 	struct rte_eth_dev *eth_dev;
+
+	uint32_t ptp_caps;
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -312,6 +314,7 @@ struct iavf_adapter {
 	bool stopped;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
+	uint64_t phc_time;
 };
 
 /* IAVF_DEV_PRIVATE_TO */
@@ -476,4 +479,6 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 		uint8_t *msg, size_t msg_len,
 		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
+int iavf_get_ptp_cap(struct iavf_adapter *adapter);
+int iavf_get_phc_time(struct iavf_adapter *adapter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 7d093bd..89e4240 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -36,6 +36,9 @@
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
 #define IAVF_QUANTA_SIZE_ARG       "quanta_size"
 
+uint64_t iavf_timestamp_dynflag;
+int iavf_timestamp_dynfield_offset = -1;
+
 static const char * const iavf_valid_args[] = {
 	IAVF_PROTO_XTR_ARG,
 	IAVF_QUANTA_SIZE_ARG,
@@ -687,6 +690,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	struct rte_eth_dev_data *dev_data = dev->data;
 	uint16_t buf_size, max_pkt_len;
 	uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
+	enum iavf_status err;
 
 	buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
 
@@ -705,6 +709,18 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 		return -EINVAL;
 	}
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		/* Register mbuf field and flag for Rx timestamp */
+		err = rte_mbuf_dyn_rx_timestamp_register(
+			&iavf_timestamp_dynfield_offset,
+			&iavf_timestamp_dynflag);
+		if (err) {
+			PMD_DRV_LOG(ERR,
+				    "Cannot register mbuf field/flag for timestamp");
+			return -EINVAL;
+		}
+	}
+
 	rxq->max_pkt_len = max_pkt_len;
 	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    rxq->max_pkt_len > buf_size) {
@@ -947,6 +963,13 @@ iavf_dev_start(struct rte_eth_dev *dev)
 			return -1;
 		}
 
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP) {
+		if (iavf_get_ptp_cap(adapter)) {
+			PMD_INIT_LOG(ERR, "Failed to get ptp capability");
+			return -1;
+		}
+	}
+
 	if (iavf_init_queues(dev) != 0) {
 		PMD_DRV_LOG(ERR, "failed to do Queue init");
 		return -1;
@@ -1092,6 +1115,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
 	if (iavf_ipsec_crypto_supported(adapter)) {
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index c21f818..2d3bafd 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1422,6 +1422,11 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 	uint64_t dma_addr;
 	uint64_t pkt_flags;
 	const uint32_t *ptype_tbl;
+	struct iavf_adapter *ad = rxq->vsi->adapter;
+	uint64_t ts_ns;
+
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		rxq->hw_register_set = 1;
 
 	nb_rx = 0;
 	nb_hold = 0;
@@ -1491,6 +1496,21 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 				&rxq->stats.ipsec_crypto);
 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+
+		if (iavf_timestamp_dynflag > 0) {
+			if (rxq->hw_register_set)
+				iavf_get_phc_time(ad);
+
+			rxq->hw_register_set = 0;
+			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+			*RTE_MBUF_DYNFIELD(rxm,
+				iavf_timestamp_dynfield_offset,
+				rte_mbuf_timestamp_t *) = ts_ns;
+			rxm->ol_flags |= iavf_timestamp_dynflag;
+		}
+
 		rxm->ol_flags |= pkt_flags;
 
 		rx_pkts[nb_rx++] = rxm;
@@ -1519,11 +1539,16 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 	uint16_t rx_stat_err0;
 	uint64_t dma_addr;
 	uint64_t pkt_flags;
+	struct iavf_adapter *ad = rxq->vsi->adapter;
+	uint64_t ts_ns;
 
 	volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
 	volatile union iavf_rx_flex_desc *rxdp;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		rxq->hw_register_set = 1;
+
 	while (nb_rx < nb_pkts) {
 		rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
@@ -1636,6 +1661,20 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
+		if (iavf_timestamp_dynflag > 0) {
+			if (rxq->hw_register_set)
+				iavf_get_phc_time(ad);
+
+			rxq->hw_register_set = 0;
+			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+			*RTE_MBUF_DYNFIELD(first_seg,
+				iavf_timestamp_dynfield_offset,
+				rte_mbuf_timestamp_t *) = ts_ns;
+			first_seg->ol_flags |= iavf_timestamp_dynflag;
+		}
+
 		first_seg->ol_flags |= pkt_flags;
 
 		/* Prefetch data of first segment, if configured to do so. */
@@ -1831,6 +1870,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
 	int32_t nb_staged = 0;
 	uint64_t pkt_flags;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+	struct iavf_adapter *ad = rxq->vsi->adapter;
+	uint64_t ts_ns;
 
 	rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
 	rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -1841,6 +1882,9 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
 	if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
 		return 0;
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		rxq->hw_register_set = 1;
+
 	/* Scan LOOK_AHEAD descriptors at a time to determine which
 	 * descriptors reference packets that are ready to be received.
 	 */
@@ -1897,6 +1941,20 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
 
+			if (iavf_timestamp_dynflag > 0) {
+				if (rxq->hw_register_set)
+					iavf_get_phc_time(ad);
+
+				rxq->hw_register_set = 0;
+				ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+					rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+
+				*RTE_MBUF_DYNFIELD(mb,
+					iavf_timestamp_dynfield_offset,
+					rte_mbuf_timestamp_t *) = ts_ns;
+				mb->ol_flags |= iavf_timestamp_dynflag;
+			}
+
 			mb->ol_flags |= pkt_flags;
 
 			/* Put up to nb_pkts directly into buffers */
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebb..37453c4 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -72,6 +72,9 @@
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
 
+extern uint64_t iavf_timestamp_dynflag;
+extern int iavf_timestamp_dynfield_offset;
+
 /**
  * Rx Flex Descriptors
  * These descriptors are used instead of the legacy version descriptors
@@ -219,6 +222,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
+	uint32_t hw_register_set;
 };
 
 struct iavf_tx_entry {
@@ -778,6 +782,24 @@ void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
 	}
 }
 
+static inline
+uint64_t iavf_tstamp_convert_32b_64b(uint64_t time, uint32_t in_timestamp)
+{
+	const uint64_t mask = 0xFFFFFFFF;
+	uint32_t delta;
+	uint64_t ns;
+
+	delta = (in_timestamp - (uint32_t)(time & mask));
+	if (delta > (mask / 2)) {
+		delta = ((uint32_t)(time & mask) - in_timestamp);
+		ns = time - delta;
+	} else {
+		ns = time + delta;
+	}
+
+	return ns;
+}
+
 #ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
 #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
 	iavf_dump_rx_descriptor(rxq, desc, rx_id)
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 1fd37b7..a59cb2c 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -231,6 +231,9 @@ iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq)
 	if (rxq->proto_xtr != IAVF_PROTO_XTR_NONE)
 		return -1;
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		return -1;
+
 	if (rxq->offloads & IAVF_RX_VECTOR_OFFLOAD)
 		return IAVF_VECTOR_OFFLOAD_PATH;
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index f9452d1..b654433 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -502,7 +502,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
 		VIRTCHNL_VF_OFFLOAD_QOS |
-		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO |
+		VIRTCHNL_VF_CAP_PTP;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -1047,16 +1048,21 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 		vc_qp->rxq.crc_disable = rxq[i]->crc_len != 0 ? 1 : 0;
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 		if (vf->vf_res->vf_cap_flags &
-		    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
-		    vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
-			vc_qp->rxq.rxdid = rxq[i]->rxdid;
-			PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
-				    vc_qp->rxq.rxdid, i);
-		} else {
-			PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
-				    "request default RXDID[%d] in Queue[%d]",
-				    rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
-			vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
+		    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+			if (vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
+				vc_qp->rxq.rxdid = rxq[i]->rxdid;
+				PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
+					    vc_qp->rxq.rxdid, i);
+			} else {
+				PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
+					    "request default RXDID[%d] in Queue[%d]",
+					    rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
+				vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
+			}
+
+			if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP &&
+			    vf->ptp_caps & VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)
+				vc_qp->rxq.flags |= VIRTCHNL_PTP_RX_TSTAMP;
 		}
 #else
 		if (vf->vf_res->vf_cap_flags &
@@ -1859,3 +1865,58 @@ iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 nu
 
 	return 0;
 }
+
+int
+iavf_get_ptp_cap(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_ptp_caps ptp_caps;
+	struct iavf_cmd_info args;
+	int err;
+
+	ptp_caps.caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+			VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+	args.ops = VIRTCHNL_OP_1588_PTP_GET_CAPS;
+	args.in_args = (uint8_t *)&ptp_caps;
+	args.in_args_size = sizeof(ptp_caps);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of OP_1588_PTP_GET_CAPS");
+		return err;
+	}
+
+	vf->ptp_caps = ((struct virtchnl_ptp_caps *)args.out_buffer)->caps;
+
+	return 0;
+}
+
+int
+iavf_get_phc_time(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_phc_time phc_time;
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_1588_PTP_GET_TIME;
+	args.in_args = (uint8_t *)&phc_time;
+	args.in_args_size = sizeof(phc_time);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
+		return err;
+	}
+
+	adapter->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
+
+	return 0;
+}
-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v2 3/3] net/iavf: improve performance of Rx timestamp offload
  2022-04-24  7:08 ` [PATCH v2 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
  2022-04-24  7:08   ` [PATCH v2 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
  2022-04-24  7:08   ` [PATCH v2 2/3] net/iavf: enable Rx timestamp on Flex Descriptor Simei Su
@ 2022-04-24  7:08   ` Simei Su
  2022-04-28  8:13   ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
  3 siblings, 0 replies; 14+ messages in thread
From: Simei Su @ 2022-04-24  7:08 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu, Simei Su

In this patch, We use CPU ticks instead of HW register
to determine whether low 32 bits timestamp has turned
over. It can avoid requesting register value frequently
and improve receiving performance.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  1 +
 drivers/net/iavf/iavf_ethdev.c |  9 ++++++++
 drivers/net/iavf/iavf_rxtx.c   | 51 +++++++++++++++++++++++++++---------------
 drivers/net/iavf/iavf_rxtx.h   |  1 -
 4 files changed, 43 insertions(+), 19 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 3255c93..dd83567 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -315,6 +315,7 @@ struct iavf_adapter {
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 	uint64_t phc_time;
+	uint64_t hw_time_update;
 };
 
 /* IAVF_DEV_PRIVATE_TO */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 89e4240..d1a2b53 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1019,6 +1019,15 @@ iavf_dev_start(struct rte_eth_dev *dev)
 		goto err_mac;
 	}
 
+	if (dev->data->dev_conf.rxmode.offloads &
+	    RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		if (iavf_get_phc_time(adapter)) {
+			PMD_DRV_LOG(ERR, "get physical time failed");
+			goto err_mac;
+		}
+		adapter->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+	}
+
 	return 0;
 
 err_mac:
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 2d3bafd..bb67a9a 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1425,8 +1425,14 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 	struct iavf_adapter *ad = rxq->vsi->adapter;
 	uint64_t ts_ns;
 
-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
-		rxq->hw_register_set = 1;
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		if (sw_cur_time - ad->hw_time_update > 4) {
+			if (iavf_get_phc_time(ad))
+				PMD_DRV_LOG(ERR, "get physical time failed");
+			ad->hw_time_update = sw_cur_time;
+		}
+	}
 
 	nb_rx = 0;
 	nb_hold = 0;
@@ -1498,13 +1504,12 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
 		if (iavf_timestamp_dynflag > 0) {
-			if (rxq->hw_register_set)
-				iavf_get_phc_time(ad);
-
-			rxq->hw_register_set = 0;
 			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 
+			ad->phc_time = ts_ns;
+			ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
 			*RTE_MBUF_DYNFIELD(rxm,
 				iavf_timestamp_dynfield_offset,
 				rte_mbuf_timestamp_t *) = ts_ns;
@@ -1546,8 +1551,14 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 	volatile union iavf_rx_flex_desc *rxdp;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
-		rxq->hw_register_set = 1;
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		if (sw_cur_time - ad->hw_time_update > 4) {
+			if (iavf_get_phc_time(ad))
+				PMD_DRV_LOG(ERR, "get physical time failed");
+			ad->hw_time_update = sw_cur_time;
+		}
+	}
 
 	while (nb_rx < nb_pkts) {
 		rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
@@ -1662,13 +1673,12 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
 		if (iavf_timestamp_dynflag > 0) {
-			if (rxq->hw_register_set)
-				iavf_get_phc_time(ad);
-
-			rxq->hw_register_set = 0;
 			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 
+			ad->phc_time = ts_ns;
+			ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
 			*RTE_MBUF_DYNFIELD(first_seg,
 				iavf_timestamp_dynfield_offset,
 				rte_mbuf_timestamp_t *) = ts_ns;
@@ -1882,8 +1892,14 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
 	if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
 		return 0;
 
-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
-		rxq->hw_register_set = 1;
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		if (sw_cur_time - ad->hw_time_update > 4) {
+			if (iavf_get_phc_time(ad))
+				PMD_DRV_LOG(ERR, "get physical time failed");
+			ad->hw_time_update = sw_cur_time;
+		}
+	}
 
 	/* Scan LOOK_AHEAD descriptors at a time to determine which
 	 * descriptors reference packets that are ready to be received.
@@ -1942,13 +1958,12 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
 
 			if (iavf_timestamp_dynflag > 0) {
-				if (rxq->hw_register_set)
-					iavf_get_phc_time(ad);
-
-				rxq->hw_register_set = 0;
 				ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
 					rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
 
+				ad->phc_time = ts_ns;
+				ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
 				*RTE_MBUF_DYNFIELD(mb,
 					iavf_timestamp_dynfield_offset,
 					rte_mbuf_timestamp_t *) = ts_ns;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 37453c4..642b9a7 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -222,7 +222,6 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
-	uint32_t hw_register_set;
 };
 
 struct iavf_tx_entry {
-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor
  2022-04-24  7:08 ` [PATCH v2 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
                     ` (2 preceding siblings ...)
  2022-04-24  7:08   ` [PATCH v2 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
@ 2022-04-28  8:13   ` Simei Su
  2022-04-28  8:13     ` [PATCH v3 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
                       ` (3 more replies)
  3 siblings, 4 replies; 14+ messages in thread
From: Simei Su @ 2022-04-28  8:13 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu, Simei Su

[PATCH v3 1/3] add related ops and structure for Rx timestamp in virtual channel.
[PATCH v3 2/3] add support for Rx timestamp on flex descriptor in driver.
[PATCH v3 3/3] improve performance with Rx timestamp enabled.

v3:
* Rebase code.
* Fix compile warning.

v2:
* Add release notes and doc update.

Simei Su (3):
  common/iavf: support Rx timestamp in virtual channel
  net/iavf: enable Rx timestamp on Flex Descriptor
  net/iavf: improve performance of Rx timestamp offload

 doc/guides/nics/features/iavf.ini       |  1 +
 doc/guides/rel_notes/release_22_07.rst  |  2 +-
 drivers/common/iavf/virtchnl.h          | 62 +++++++++++++++++++++++-
 drivers/net/iavf/iavf.h                 |  6 +++
 drivers/net/iavf/iavf_ethdev.c          | 35 ++++++++++++++
 drivers/net/iavf/iavf_rxtx.c            | 74 +++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h            | 21 +++++++++
 drivers/net/iavf/iavf_rxtx_vec_common.h |  3 ++
 drivers/net/iavf/iavf_vchnl.c           | 83 ++++++++++++++++++++++++++++-----
 9 files changed, 273 insertions(+), 14 deletions(-)

-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v3 1/3] common/iavf: support Rx timestamp in virtual channel
  2022-04-28  8:13   ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
@ 2022-04-28  8:13     ` Simei Su
  2022-04-28  8:13     ` [PATCH v3 2/3] net/iavf: enable Rx timestamp on Flex Descriptor Simei Su
                       ` (2 subsequent siblings)
  3 siblings, 0 replies; 14+ messages in thread
From: Simei Su @ 2022-04-28  8:13 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu, Simei Su

Add new ops and structures to support VF to support Rx timestamp
on flex descriptor.

"VIRTCHNL_OP_1588_PTP_GET_CAPS" ops is sent by the VF to request PTP
capabilities and responded by the PF with capabilities enabled for
that VF.

"VIRTCHNL_OP_1588_PTP_GET_TIME" ops is sent by the VF to request
the current time of the PHC. The PF will respond by reading the
device time and reporting it back to the VF.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 drivers/common/iavf/virtchnl.h | 62 ++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 60 insertions(+), 2 deletions(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index 249ae6e..2d49f95 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -159,6 +159,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
 	VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58,
 	VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59,
+	VIRTCHNL_OP_1588_PTP_GET_CAPS = 60,
+	VIRTCHNL_OP_1588_PTP_GET_TIME = 61,
 	VIRTCHNL_OP_GET_QOS_CAPS = 66,
 	VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67,
 	VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
@@ -276,6 +278,10 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2";
 	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
 		return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2";
+	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+		return "VIRTCHNL_OP_1588_PTP_GET_CAPS";
+	case VIRTCHNL_OP_1588_PTP_GET_TIME:
+		return "VIRTCHNL_OP_1588_PTP_GET_TIME";
 	case VIRTCHNL_OP_MAX:
 		return "VIRTCHNL_OP_MAX";
 	default:
@@ -411,7 +417,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_FDIR_PF		BIT(28)
 #define VIRTCHNL_VF_OFFLOAD_QOS		BIT(29)
 #define VIRTCHNL_VF_CAP_DCF			BIT(30)
-	/* BIT(31) is reserved */
+#define VIRTCHNL_VF_CAP_PTP			BIT(31)
 
 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
 			       VIRTCHNL_VF_OFFLOAD_VLAN | \
@@ -498,6 +504,18 @@ enum virtchnl_rx_desc_id_bitmasks {
 	/* 22 through 63 are reserved */
 };
 
+/* virtchnl_rxq_info_flags
+ *
+ * Definition of bits in the flags field of the virtchnl_rxq_info structure.
+ */
+enum virtchnl_rxq_info_flags {
+	/* If the VIRTCHNL_PTP_RX_TSTAMP bit of the flag field is set, this is
+	 * a request to enable Rx timestamp. Other flag bits are currently
+	 * reserved and they may be extended in the future.
+	 */
+	VIRTCHNL_PTP_RX_TSTAMP = BIT(0),
+};
+
 /* VIRTCHNL_OP_CONFIG_RX_QUEUE
  * VF sends this message to set up parameters for one RX queue.
  * External data buffer contains one instance of virtchnl_rxq_info.
@@ -526,7 +544,8 @@ struct virtchnl_rxq_info {
 	 * with VIRTCHNL_RXDID_1_32B_BASE.
 	 */
 	u8 rxdid;
-	u8 pad1[2];
+	u8 flags; /* see virtchnl_rxq_info_flags */
+	u8 pad1;
 	u64 dma_ring_addr;
 
 	/* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
@@ -2004,6 +2023,39 @@ struct virtchnl_quanta_cfg {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
 
+#define VIRTCHNL_1588_PTP_CAP_RX_TSTAMP		BIT(1)
+#define VIRTCHNL_1588_PTP_CAP_READ_PHC		BIT(2)
+
+struct virtchnl_phc_regs {
+	u32 clock_hi;
+	u32 clock_lo;
+	u8 pcie_region;
+	u8 rsvd[15];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_phc_regs);
+
+struct virtchnl_ptp_caps {
+	struct virtchnl_phc_regs phc_regs;
+	u32 caps;
+	s32 max_adj;
+	u8 tx_tstamp_idx;
+	u8 n_ext_ts;
+	u8 n_per_out;
+	u8 n_pins;
+	u8 tx_tstamp_format;
+	u8 rsvd[11];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_ptp_caps);
+
+struct virtchnl_phc_time {
+	u64 time;
+	u8 rsvd[8];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_time);
+
 /* Since VF messages are limited by u16 size, precalculate the maximum possible
  * values of nested elements in virtchnl structures that virtual channel can
  * possibly handle in a single message.
@@ -2321,6 +2373,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
 		valid_len = sizeof(struct virtchnl_vlan_setting);
 		break;
+	case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+		valid_len = sizeof(struct virtchnl_ptp_caps);
+		break;
+	case VIRTCHNL_OP_1588_PTP_GET_TIME:
+		valid_len = sizeof(struct virtchnl_phc_time);
+		break;
 	case VIRTCHNL_OP_ENABLE_QUEUES_V2:
 	case VIRTCHNL_OP_DISABLE_QUEUES_V2:
 		valid_len = sizeof(struct virtchnl_del_ena_dis_queues);
-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v3 2/3] net/iavf: enable Rx timestamp on Flex Descriptor
  2022-04-28  8:13   ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
  2022-04-28  8:13     ` [PATCH v3 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
@ 2022-04-28  8:13     ` Simei Su
  2022-04-28  8:13     ` [PATCH v3 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
  2022-04-29  2:56     ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Zhang, Qi Z
  3 siblings, 0 replies; 14+ messages in thread
From: Simei Su @ 2022-04-28  8:13 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu, Simei Su

Dump Rx timestamp value into dynamic mbuf field by flex descriptor.
This feature is turned on by dev config "enable-rx-timestamp".
Currently, it's only supported under scalar path.

Signed-off-by: Simei Su <simei.su@intel.com>
---
 doc/guides/nics/features/iavf.ini       |  1 +
 doc/guides/rel_notes/release_22_07.rst  |  2 +-
 drivers/net/iavf/iavf.h                 |  5 ++
 drivers/net/iavf/iavf_ethdev.c          | 26 +++++++++++
 drivers/net/iavf/iavf_rxtx.c            | 59 +++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h            | 22 +++++++++
 drivers/net/iavf/iavf_rxtx_vec_common.h |  3 ++
 drivers/net/iavf/iavf_vchnl.c           | 83 ++++++++++++++++++++++++++++-----
 8 files changed, 189 insertions(+), 12 deletions(-)

diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
index 01f5142..5a0d9d8 100644
--- a/doc/guides/nics/features/iavf.ini
+++ b/doc/guides/nics/features/iavf.ini
@@ -24,6 +24,7 @@ CRC offload          = Y
 VLAN offload         = Y
 L3 checksum offload  = P
 L4 checksum offload  = P
+Timestamp offload    = P
 Packet type parsing  = Y
 Rx descriptor status = Y
 Tx descriptor status = Y
diff --git a/doc/guides/rel_notes/release_22_07.rst b/doc/guides/rel_notes/release_22_07.rst
index 90123bb..567f23d 100644
--- a/doc/guides/rel_notes/release_22_07.rst
+++ b/doc/guides/rel_notes/release_22_07.rst
@@ -59,7 +59,7 @@ New Features
 
   * Added Tx QoS queue rate limitation support.
   * Added quanta size configuration support.
-
+  * Added ``DEV_RX_OFFLOAD_TIMESTAMP`` support.
 
 Removed Items
 -------------
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index c0a4a47..3255c93 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -268,6 +268,8 @@ struct iavf_info {
 	struct iavf_tm_conf tm_conf;
 
 	struct rte_eth_dev *eth_dev;
+
+	uint32_t ptp_caps;
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -312,6 +314,7 @@ struct iavf_adapter {
 	bool stopped;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
+	uint64_t phc_time;
 };
 
 /* IAVF_DEV_PRIVATE_TO */
@@ -476,4 +479,6 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 		uint8_t *msg, size_t msg_len,
 		uint8_t *resp_msg, size_t resp_msg_len);
 extern const struct rte_tm_ops iavf_tm_ops;
+int iavf_get_ptp_cap(struct iavf_adapter *adapter);
+int iavf_get_phc_time(struct iavf_adapter *adapter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 7d093bd..89e4240 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -36,6 +36,9 @@
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
 #define IAVF_QUANTA_SIZE_ARG       "quanta_size"
 
+uint64_t iavf_timestamp_dynflag;
+int iavf_timestamp_dynfield_offset = -1;
+
 static const char * const iavf_valid_args[] = {
 	IAVF_PROTO_XTR_ARG,
 	IAVF_QUANTA_SIZE_ARG,
@@ -687,6 +690,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 	struct rte_eth_dev_data *dev_data = dev->data;
 	uint16_t buf_size, max_pkt_len;
 	uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
+	enum iavf_status err;
 
 	buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
 
@@ -705,6 +709,18 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
 		return -EINVAL;
 	}
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		/* Register mbuf field and flag for Rx timestamp */
+		err = rte_mbuf_dyn_rx_timestamp_register(
+			&iavf_timestamp_dynfield_offset,
+			&iavf_timestamp_dynflag);
+		if (err) {
+			PMD_DRV_LOG(ERR,
+				    "Cannot register mbuf field/flag for timestamp");
+			return -EINVAL;
+		}
+	}
+
 	rxq->max_pkt_len = max_pkt_len;
 	if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
 	    rxq->max_pkt_len > buf_size) {
@@ -947,6 +963,13 @@ iavf_dev_start(struct rte_eth_dev *dev)
 			return -1;
 		}
 
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP) {
+		if (iavf_get_ptp_cap(adapter)) {
+			PMD_INIT_LOG(ERR, "Failed to get ptp capability");
+			return -1;
+		}
+	}
+
 	if (iavf_init_queues(dev) != 0) {
 		PMD_DRV_LOG(ERR, "failed to do Queue init");
 		return -1;
@@ -1092,6 +1115,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
+		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
 	if (iavf_ipsec_crypto_supported(adapter)) {
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index c21f818..4c731e7 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1430,6 +1430,12 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 	rx_ring = rxq->rx_ring;
 	ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
+	struct iavf_adapter *ad = rxq->vsi->adapter;
+	uint64_t ts_ns;
+
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		rxq->hw_register_set = 1;
+
 	while (nb_rx < nb_pkts) {
 		rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
@@ -1491,6 +1497,21 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 				&rxq->stats.ipsec_crypto);
 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+
+		if (iavf_timestamp_dynflag > 0) {
+			if (rxq->hw_register_set)
+				iavf_get_phc_time(ad);
+
+			rxq->hw_register_set = 0;
+			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+			*RTE_MBUF_DYNFIELD(rxm,
+				iavf_timestamp_dynfield_offset,
+				rte_mbuf_timestamp_t *) = ts_ns;
+			rxm->ol_flags |= iavf_timestamp_dynflag;
+		}
+
 		rxm->ol_flags |= pkt_flags;
 
 		rx_pkts[nb_rx++] = rxm;
@@ -1519,11 +1540,16 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 	uint16_t rx_stat_err0;
 	uint64_t dma_addr;
 	uint64_t pkt_flags;
+	struct iavf_adapter *ad = rxq->vsi->adapter;
+	uint64_t ts_ns;
 
 	volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
 	volatile union iavf_rx_flex_desc *rxdp;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		rxq->hw_register_set = 1;
+
 	while (nb_rx < nb_pkts) {
 		rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
 		rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
@@ -1636,6 +1662,20 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
+		if (iavf_timestamp_dynflag > 0) {
+			if (rxq->hw_register_set)
+				iavf_get_phc_time(ad);
+
+			rxq->hw_register_set = 0;
+			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+			*RTE_MBUF_DYNFIELD(first_seg,
+				iavf_timestamp_dynfield_offset,
+				rte_mbuf_timestamp_t *) = ts_ns;
+			first_seg->ol_flags |= iavf_timestamp_dynflag;
+		}
+
 		first_seg->ol_flags |= pkt_flags;
 
 		/* Prefetch data of first segment, if configured to do so. */
@@ -1831,6 +1871,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
 	int32_t nb_staged = 0;
 	uint64_t pkt_flags;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+	struct iavf_adapter *ad = rxq->vsi->adapter;
+	uint64_t ts_ns;
 
 	rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
 	rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -1841,6 +1883,9 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
 	if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
 		return 0;
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		rxq->hw_register_set = 1;
+
 	/* Scan LOOK_AHEAD descriptors at a time to determine which
 	 * descriptors reference packets that are ready to be received.
 	 */
@@ -1897,6 +1942,20 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
 			stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
 
+			if (iavf_timestamp_dynflag > 0) {
+				if (rxq->hw_register_set)
+					iavf_get_phc_time(ad);
+
+				rxq->hw_register_set = 0;
+				ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+					rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+
+				*RTE_MBUF_DYNFIELD(mb,
+					iavf_timestamp_dynfield_offset,
+					rte_mbuf_timestamp_t *) = ts_ns;
+				mb->ol_flags |= iavf_timestamp_dynflag;
+			}
+
 			mb->ol_flags |= pkt_flags;
 
 			/* Put up to nb_pkts directly into buffers */
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebb..37453c4 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -72,6 +72,9 @@
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
 
+extern uint64_t iavf_timestamp_dynflag;
+extern int iavf_timestamp_dynfield_offset;
+
 /**
  * Rx Flex Descriptors
  * These descriptors are used instead of the legacy version descriptors
@@ -219,6 +222,7 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
+	uint32_t hw_register_set;
 };
 
 struct iavf_tx_entry {
@@ -778,6 +782,24 @@ void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
 	}
 }
 
+static inline
+uint64_t iavf_tstamp_convert_32b_64b(uint64_t time, uint32_t in_timestamp)
+{
+	const uint64_t mask = 0xFFFFFFFF;
+	uint32_t delta;
+	uint64_t ns;
+
+	delta = (in_timestamp - (uint32_t)(time & mask));
+	if (delta > (mask / 2)) {
+		delta = ((uint32_t)(time & mask) - in_timestamp);
+		ns = time - delta;
+	} else {
+		ns = time + delta;
+	}
+
+	return ns;
+}
+
 #ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
 #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
 	iavf_dump_rx_descriptor(rxq, desc, rx_id)
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 1fd37b7..a59cb2c 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -231,6 +231,9 @@ iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq)
 	if (rxq->proto_xtr != IAVF_PROTO_XTR_NONE)
 		return -1;
 
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+		return -1;
+
 	if (rxq->offloads & IAVF_RX_VECTOR_OFFLOAD)
 		return IAVF_VECTOR_OFFLOAD_PATH;
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index f9452d1..b654433 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -502,7 +502,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
 		VIRTCHNL_VF_LARGE_NUM_QPAIRS |
 		VIRTCHNL_VF_OFFLOAD_QOS |
-		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
+		VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO |
+		VIRTCHNL_VF_CAP_PTP;
 
 	args.in_args = (uint8_t *)&caps;
 	args.in_args_size = sizeof(caps);
@@ -1047,16 +1048,21 @@ iavf_configure_queues(struct iavf_adapter *adapter,
 		vc_qp->rxq.crc_disable = rxq[i]->crc_len != 0 ? 1 : 0;
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 		if (vf->vf_res->vf_cap_flags &
-		    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
-		    vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
-			vc_qp->rxq.rxdid = rxq[i]->rxdid;
-			PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
-				    vc_qp->rxq.rxdid, i);
-		} else {
-			PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
-				    "request default RXDID[%d] in Queue[%d]",
-				    rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
-			vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
+		    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+			if (vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
+				vc_qp->rxq.rxdid = rxq[i]->rxdid;
+				PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
+					    vc_qp->rxq.rxdid, i);
+			} else {
+				PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
+					    "request default RXDID[%d] in Queue[%d]",
+					    rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
+				vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
+			}
+
+			if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP &&
+			    vf->ptp_caps & VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)
+				vc_qp->rxq.flags |= VIRTCHNL_PTP_RX_TSTAMP;
 		}
 #else
 		if (vf->vf_res->vf_cap_flags &
@@ -1859,3 +1865,58 @@ iavf_set_vf_quanta_size(struct iavf_adapter *adapter, u16 start_queue_id, u16 nu
 
 	return 0;
 }
+
+int
+iavf_get_ptp_cap(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_ptp_caps ptp_caps;
+	struct iavf_cmd_info args;
+	int err;
+
+	ptp_caps.caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+			VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+	args.ops = VIRTCHNL_OP_1588_PTP_GET_CAPS;
+	args.in_args = (uint8_t *)&ptp_caps;
+	args.in_args_size = sizeof(ptp_caps);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of OP_1588_PTP_GET_CAPS");
+		return err;
+	}
+
+	vf->ptp_caps = ((struct virtchnl_ptp_caps *)args.out_buffer)->caps;
+
+	return 0;
+}
+
+int
+iavf_get_phc_time(struct iavf_adapter *adapter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_phc_time phc_time;
+	struct iavf_cmd_info args;
+	int err;
+
+	args.ops = VIRTCHNL_OP_1588_PTP_GET_TIME;
+	args.in_args = (uint8_t *)&phc_time;
+	args.in_args_size = sizeof(phc_time);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR,
+			    "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
+		return err;
+	}
+
+	adapter->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
+
+	return 0;
+}
-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH v3 3/3] net/iavf: improve performance of Rx timestamp offload
  2022-04-28  8:13   ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
  2022-04-28  8:13     ` [PATCH v3 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
  2022-04-28  8:13     ` [PATCH v3 2/3] net/iavf: enable Rx timestamp on Flex Descriptor Simei Su
@ 2022-04-28  8:13     ` Simei Su
  2022-05-10 16:00       ` Thomas Monjalon
  2022-04-29  2:56     ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Zhang, Qi Z
  3 siblings, 1 reply; 14+ messages in thread
From: Simei Su @ 2022-04-28  8:13 UTC (permalink / raw)
  To: qi.z.zhang, qiming.yang; +Cc: dev, wenjun1.wu, Simei Su

In this patch, We use CPU ticks instead of HW register
to determine whether low 32 bits timestamp has turned
over. It can avoid requesting register value frequently
and improve receiving performance.

Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>
---
 drivers/net/iavf/iavf.h        |  1 +
 drivers/net/iavf/iavf_ethdev.c |  9 ++++++++
 drivers/net/iavf/iavf_rxtx.c   | 51 +++++++++++++++++++++++++++---------------
 drivers/net/iavf/iavf_rxtx.h   |  1 -
 4 files changed, 43 insertions(+), 19 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 3255c93..dd83567 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -315,6 +315,7 @@ struct iavf_adapter {
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 	uint64_t phc_time;
+	uint64_t hw_time_update;
 };
 
 /* IAVF_DEV_PRIVATE_TO */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 89e4240..d1a2b53 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1019,6 +1019,15 @@ iavf_dev_start(struct rte_eth_dev *dev)
 		goto err_mac;
 	}
 
+	if (dev->data->dev_conf.rxmode.offloads &
+	    RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		if (iavf_get_phc_time(adapter)) {
+			PMD_DRV_LOG(ERR, "get physical time failed");
+			goto err_mac;
+		}
+		adapter->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+	}
+
 	return 0;
 
 err_mac:
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 4c731e7..345f6ae 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1433,8 +1433,14 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 	struct iavf_adapter *ad = rxq->vsi->adapter;
 	uint64_t ts_ns;
 
-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
-		rxq->hw_register_set = 1;
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		if (sw_cur_time - ad->hw_time_update > 4) {
+			if (iavf_get_phc_time(ad))
+				PMD_DRV_LOG(ERR, "get physical time failed");
+			ad->hw_time_update = sw_cur_time;
+		}
+	}
 
 	while (nb_rx < nb_pkts) {
 		rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
@@ -1499,13 +1505,12 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
 		if (iavf_timestamp_dynflag > 0) {
-			if (rxq->hw_register_set)
-				iavf_get_phc_time(ad);
-
-			rxq->hw_register_set = 0;
 			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 
+			ad->phc_time = ts_ns;
+			ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
 			*RTE_MBUF_DYNFIELD(rxm,
 				iavf_timestamp_dynfield_offset,
 				rte_mbuf_timestamp_t *) = ts_ns;
@@ -1547,8 +1552,14 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 	volatile union iavf_rx_flex_desc *rxdp;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
-		rxq->hw_register_set = 1;
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		if (sw_cur_time - ad->hw_time_update > 4) {
+			if (iavf_get_phc_time(ad))
+				PMD_DRV_LOG(ERR, "get physical time failed");
+			ad->hw_time_update = sw_cur_time;
+		}
+	}
 
 	while (nb_rx < nb_pkts) {
 		rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
@@ -1663,13 +1674,12 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 		pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
 
 		if (iavf_timestamp_dynflag > 0) {
-			if (rxq->hw_register_set)
-				iavf_get_phc_time(ad);
-
-			rxq->hw_register_set = 0;
 			ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
 				rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
 
+			ad->phc_time = ts_ns;
+			ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
 			*RTE_MBUF_DYNFIELD(first_seg,
 				iavf_timestamp_dynfield_offset,
 				rte_mbuf_timestamp_t *) = ts_ns;
@@ -1883,8 +1893,14 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
 	if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
 		return 0;
 
-	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
-		rxq->hw_register_set = 1;
+	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+		if (sw_cur_time - ad->hw_time_update > 4) {
+			if (iavf_get_phc_time(ad))
+				PMD_DRV_LOG(ERR, "get physical time failed");
+			ad->hw_time_update = sw_cur_time;
+		}
+	}
 
 	/* Scan LOOK_AHEAD descriptors at a time to determine which
 	 * descriptors reference packets that are ready to be received.
@@ -1943,13 +1959,12 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
 			pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
 
 			if (iavf_timestamp_dynflag > 0) {
-				if (rxq->hw_register_set)
-					iavf_get_phc_time(ad);
-
-				rxq->hw_register_set = 0;
 				ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
 					rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
 
+				ad->phc_time = ts_ns;
+				ad->hw_time_update = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
+
 				*RTE_MBUF_DYNFIELD(mb,
 					iavf_timestamp_dynfield_offset,
 					rte_mbuf_timestamp_t *) = ts_ns;
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 37453c4..642b9a7 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -222,7 +222,6 @@ struct iavf_rx_queue {
 		/* flexible descriptor metadata extraction offload flag */
 	struct iavf_rx_queue_stats stats;
 	uint64_t offloads;
-	uint32_t hw_register_set;
 };
 
 struct iavf_tx_entry {
-- 
2.9.5


^ permalink raw reply	[flat|nested] 14+ messages in thread

* RE: [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor
  2022-04-28  8:13   ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
                       ` (2 preceding siblings ...)
  2022-04-28  8:13     ` [PATCH v3 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
@ 2022-04-29  2:56     ` Zhang, Qi Z
  3 siblings, 0 replies; 14+ messages in thread
From: Zhang, Qi Z @ 2022-04-29  2:56 UTC (permalink / raw)
  To: Su, Simei, Yang, Qiming; +Cc: dev, Wu, Wenjun1



> -----Original Message-----
> From: Su, Simei <simei.su@intel.com>
> Sent: Thursday, April 28, 2022 4:14 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Wu, Wenjun1 <wenjun1.wu@intel.com>; Su, Simei
> <simei.su@intel.com>
> Subject: [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor
> 
> [PATCH v3 1/3] add related ops and structure for Rx timestamp in virtual
> channel.
> [PATCH v3 2/3] add support for Rx timestamp on flex descriptor in driver.
> [PATCH v3 3/3] improve performance with Rx timestamp enabled.
> 
> v3:
> * Rebase code.
> * Fix compile warning.
> 
> v2:
> * Add release notes and doc update.
> 
> Simei Su (3):
>   common/iavf: support Rx timestamp in virtual channel
>   net/iavf: enable Rx timestamp on Flex Descriptor
>   net/iavf: improve performance of Rx timestamp offload
> 
>  doc/guides/nics/features/iavf.ini       |  1 +
>  doc/guides/rel_notes/release_22_07.rst  |  2 +-
>  drivers/common/iavf/virtchnl.h          | 62 +++++++++++++++++++++++-
>  drivers/net/iavf/iavf.h                 |  6 +++
>  drivers/net/iavf/iavf_ethdev.c          | 35 ++++++++++++++
>  drivers/net/iavf/iavf_rxtx.c            | 74 +++++++++++++++++++++++++++++
>  drivers/net/iavf/iavf_rxtx.h            | 21 +++++++++
>  drivers/net/iavf/iavf_rxtx_vec_common.h |  3 ++
>  drivers/net/iavf/iavf_vchnl.c           | 83 ++++++++++++++++++++++++++++----
> -
>  9 files changed, 273 insertions(+), 14 deletions(-)
> 
> --
> 2.9.5

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH v3 3/3] net/iavf: improve performance of Rx timestamp offload
  2022-04-28  8:13     ` [PATCH v3 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
@ 2022-05-10 16:00       ` Thomas Monjalon
  0 siblings, 0 replies; 14+ messages in thread
From: Thomas Monjalon @ 2022-05-10 16:00 UTC (permalink / raw)
  To: qi.z.zhang, wenjun1.wu, Simei Su; +Cc: qiming.yang, dev, Simei Su

28/04/2022 10:13, Simei Su:
> In this patch, We use CPU ticks instead of HW register
> to determine whether low 32 bits timestamp has turned
> over. It can avoid requesting register value frequently
> and improve receiving performance.
> 
> Signed-off-by: Wenjun Wu <wenjun1.wu@intel.com>

It seems you've lost original authorship.
The patch is implicitly assigned to Simei Su.
I'll restore it to Wenjun Wu.




^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2022-05-10 16:00 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-08  2:13 [PATCH v1 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
2022-04-08  2:13 ` [PATCH v1 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
2022-04-08  2:13 ` [PATCH v1 2/3] net/iavf: enable Rx timestamp on Flex Descriptor Simei Su
2022-04-08  2:13 ` [PATCH v1 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
2022-04-24  7:08 ` [PATCH v2 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
2022-04-24  7:08   ` [PATCH v2 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
2022-04-24  7:08   ` [PATCH v2 2/3] net/iavf: enable Rx timestamp on Flex Descriptor Simei Su
2022-04-24  7:08   ` [PATCH v2 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
2022-04-28  8:13   ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
2022-04-28  8:13     ` [PATCH v3 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
2022-04-28  8:13     ` [PATCH v3 2/3] net/iavf: enable Rx timestamp on Flex Descriptor Simei Su
2022-04-28  8:13     ` [PATCH v3 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
2022-05-10 16:00       ` Thomas Monjalon
2022-04-29  2:56     ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).