automatic DPDK test reports
 help / color / mirror / Atom feed
* [dpdk-test-report] |WARNING| pw99850 [PATCH] [v6] net/ice: support IEEE 1588 PTP
@ 2021-10-06  0:14 dpdklab
  0 siblings, 0 replies; only message in thread
From: dpdklab @ 2021-10-06  0:14 UTC (permalink / raw)
  To: test-report; +Cc: dpdk-test-reports

[-- Attachment #1: Type: text/plain, Size: 14242 bytes --]

Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/99850

_apply patch failure_

Submitter: Simei Su <simei.su@intel.com>
Date: Tuesday, September 28 2021 06:27:53 
Applied on: CommitID:2700326085033fd13339a8de31f58a95d1ee9c3f
Apply patch set 99850 failed:

Checking patch doc/guides/nics/features/ice.ini...
Hunk #1 succeeded at 45 (offset 2 lines).
Checking patch doc/guides/rel_notes/release_21_11.rst...
error: while searching for:
  * Added 1PPS out support by a devargs.
  * Added IPv4 and L4(TCP/UDP/SCTP) checksum hash support in RSS flow.
  * Added DEV_RX_OFFLOAD_TIMESTAMP support.

* **Updated Marvell cnxk ethdev driver.**


error: patch failed: doc/guides/rel_notes/release_21_11.rst:82
Checking patch drivers/net/ice/ice_ethdev.c...
error: while searching for:
#include "base/ice_flow.h"
#include "base/ice_dcb.h"
#include "base/ice_common.h"

#include "rte_pmd_ice.h"
#include "ice_ethdev.h"

error: patch failed: drivers/net/ice/ice_ethdev.c:18
error: while searching for:
#define ICE_ONE_PPS_OUT_ARG       "pps_out"
#define ICE_RX_LOW_LATENCY_ARG    "rx_low_latency"

uint64_t ice_timestamp_dynflag;
int ice_timestamp_dynfield_offset = -1;


error: patch failed: drivers/net/ice/ice_ethdev.c:32
error: while searching for:
	NULL
};

#define NSEC_PER_SEC      1000000000
#define PPS_OUT_DELAY_NS  1

static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {

error: patch failed: drivers/net/ice/ice_ethdev.c:45
error: while searching for:
			struct rte_eth_udp_tunnel *udp_tunnel);
static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
			struct rte_eth_udp_tunnel *udp_tunnel);

static const struct rte_pci_id pci_id_ice_map[] = {
	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },

error: patch failed: drivers/net/ice/ice_ethdev.c:151
error: while searching for:
	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
	.tx_done_cleanup              = ice_tx_done_cleanup,
	.get_monitor_addr             = ice_get_monitor_addr,
};

/* store statistics names and its offset in stats structure */

error: patch failed: drivers/net/ice/ice_ethdev.c:234
Hunk #6 succeeded at 5686 (offset 178 lines).
Checking patch drivers/net/ice/ice_ethdev.h...
error: while searching for:
#define _ICE_ETHDEV_H_

#include <rte_kvargs.h>

#include <ethdev_driver.h>


error: patch failed: drivers/net/ice/ice_ethdev.h:6
error: while searching for:
	struct ice_devargs devargs;
	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
	uint16_t fdir_ref_cnt;
#ifdef RTE_ARCH_X86
	bool rx_use_avx2;
	bool rx_use_avx512;

error: patch failed: drivers/net/ice/ice_ethdev.h:502
Checking patch drivers/net/ice/ice_rxtx.c...
error: while searching for:
	struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
	uint32_t regval;

	/* Set buffer size as the head split is disabled. */
	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -

error: patch failed: drivers/net/ice/ice_rxtx.c:270
error: while searching for:
	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
		QRXFLXP_CNTXT_RXDID_PRIO_M;

	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
		regval |= QRXFLXP_CNTXT_TS_M;

	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);

error: patch failed: drivers/net/ice/ice_rxtx.c:366
Hunk #3 succeeded at 886 (offset 181 lines).
error: while searching for:
	struct ice_vsi *vsi = rxq->vsi;
	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
	uint64_t ts_ns;

	rxdp = &rxq->rx_ring[rxq->rx_tail];
	rxep = &rxq->sw_ring[rxq->rx_tail];

error: patch failed: drivers/net/ice/ice_rxtx.c:1564
error: while searching for:
				}
			}

			mb->ol_flags |= pkt_flags;
		}


error: patch failed: drivers/net/ice/ice_rxtx.c:1618
error: while searching for:
	struct ice_vsi *vsi = rxq->vsi;
	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
	uint64_t ts_ns;

	while (nb_rx < nb_pkts) {
		rxdp = &rx_ring[rx_id];

error: patch failed: drivers/net/ice/ice_rxtx.c:1804
error: while searching for:
			}
		}

		first_seg->ol_flags |= pkt_flags;
		/* Prefetch data of first segment, if configured to do so. */
		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,

error: patch failed: drivers/net/ice/ice_rxtx.c:1926
error: while searching for:
	struct ice_vsi *vsi = rxq->vsi;
	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
	uint64_t ts_ns;

	while (nb_rx < nb_pkts) {
		rxdp = &rx_ring[rx_id];

error: patch failed: drivers/net/ice/ice_rxtx.c:2284
error: while searching for:
			}
		}

		rxm->ol_flags |= pkt_flags;
		/* copy old mbuf to rx_pkts */
		rx_pkts[nb_rx++] = rxm;

error: patch failed: drivers/net/ice/ice_rxtx.c:2347
error: while searching for:
	static uint64_t mask = PKT_TX_TCP_SEG |
		PKT_TX_QINQ |
		PKT_TX_OUTER_IP_CKSUM |
		PKT_TX_TUNNEL_MASK;

	return (flags & mask) ? 1 : 0;
}

error: patch failed: drivers/net/ice/ice_rxtx.c:2558
error: while searching for:
			if (ol_flags & PKT_TX_TCP_SEG)
				cd_type_cmd_tso_mss |=
					ice_set_tso_ctx(tx_pkt, tx_offload);

			ctx_txd->tunneling_params =
				rte_cpu_to_le_32(cd_tunneling_params);

error: patch failed: drivers/net/ice/ice_rxtx.c:2726
error: while searching for:
		ad->rx_use_avx512 = false;
		ad->rx_use_avx2 = false;
		rx_check_ret = ice_rx_vec_dev_check(dev);
		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
			ad->rx_vec_allowed = true;

error: patch failed: drivers/net/ice/ice_rxtx.c:3127
Checking patch drivers/net/ice/ice_rxtx.h...
error: while searching for:
	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
	ice_rx_release_mbufs_t rx_rel_mbufs;
	uint64_t offloads;
};

struct ice_tx_entry {

error: patch failed: drivers/net/ice/ice_rxtx.h:92
Applied patch doc/guides/nics/features/ice.ini cleanly.
Applying patch doc/guides/rel_notes/release_21_11.rst with 1 reject...
Rejected hunk #1.
Applying patch drivers/net/ice/ice_ethdev.c with 5 rejects...
Rejected hunk #1.
Rejected hunk #2.
Rejected hunk #3.
Rejected hunk #4.
Rejected hunk #5.
Hunk #6 applied cleanly.
Applying patch drivers/net/ice/ice_ethdev.h with 2 rejects...
Rejected hunk #1.
Rejected hunk #2.
Applying patch drivers/net/ice/ice_rxtx.c with 11 rejects...
Rejected hunk #1.
Rejected hunk #2.
Hunk #3 applied cleanly.
Rejected hunk #4.
Rejected hunk #5.
Rejected hunk #6.
Rejected hunk #7.
Rejected hunk #8.
Rejected hunk #9.
Rejected hunk #10.
Rejected hunk #11.
Rejected hunk #12.
Applying patch drivers/net/ice/ice_rxtx.h with 1 reject...
Rejected hunk #1.
diff a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst	(rejected hunks)
@@ -82,6 +82,7 @@ New Features
   * Added 1PPS out support by a devargs.
   * Added IPv4 and L4(TCP/UDP/SCTP) checksum hash support in RSS flow.
   * Added DEV_RX_OFFLOAD_TIMESTAMP support.
+  * Added timesync API support under scalar path.
 
 * **Updated Marvell cnxk ethdev driver.**
 
diff a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c	(rejected hunks)
@@ -18,6 +18,7 @@
 #include "base/ice_flow.h"
 #include "base/ice_dcb.h"
 #include "base/ice_common.h"
+#include "base/ice_ptp_hw.h"
 
 #include "rte_pmd_ice.h"
 #include "ice_ethdev.h"
@@ -32,6 +33,8 @@
 #define ICE_ONE_PPS_OUT_ARG       "pps_out"
 #define ICE_RX_LOW_LATENCY_ARG    "rx_low_latency"
 
+#define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
+
 uint64_t ice_timestamp_dynflag;
 int ice_timestamp_dynfield_offset = -1;
 
@@ -45,7 +48,6 @@ static const char * const ice_valid_args[] = {
 	NULL
 };
 
-#define NSEC_PER_SEC      1000000000
 #define PPS_OUT_DELAY_NS  1
 
 static const struct rte_mbuf_dynfield ice_proto_xtr_metadata_param = {
@@ -151,6 +153,18 @@ static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
 static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
 			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_timesync_enable(struct rte_eth_dev *dev);
+static int ice_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp,
+					  uint32_t flags);
+static int ice_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+					  struct timespec *timestamp);
+static int ice_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
+static int ice_timesync_read_time(struct rte_eth_dev *dev,
+				  struct timespec *timestamp);
+static int ice_timesync_write_time(struct rte_eth_dev *dev,
+				   const struct timespec *timestamp);
+static int ice_timesync_disable(struct rte_eth_dev *dev);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
@@ -234,6 +248,13 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 	.tx_done_cleanup              = ice_tx_done_cleanup,
 	.get_monitor_addr             = ice_get_monitor_addr,
+	.timesync_enable              = ice_timesync_enable,
+	.timesync_read_rx_timestamp   = ice_timesync_read_rx_timestamp,
+	.timesync_read_tx_timestamp   = ice_timesync_read_tx_timestamp,
+	.timesync_adjust_time         = ice_timesync_adjust_time,
+	.timesync_read_time           = ice_timesync_read_time,
+	.timesync_write_time          = ice_timesync_write_time,
+	.timesync_disable             = ice_timesync_disable,
 };
 
 /* store statistics names and its offset in stats structure */
diff a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h	(rejected hunks)
@@ -6,6 +6,7 @@
 #define _ICE_ETHDEV_H_
 
 #include <rte_kvargs.h>
+#include <rte_time.h>
 
 #include <ethdev_driver.h>
 
@@ -502,6 +503,11 @@ struct ice_adapter {
 	struct ice_devargs devargs;
 	enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
 	uint16_t fdir_ref_cnt;
+	/* For PTP */
+	struct rte_timecounter systime_tc;
+	struct rte_timecounter rx_tstamp_tc;
+	struct rte_timecounter tx_tstamp_tc;
+	bool ptp_ena;
 #ifdef RTE_ARCH_X86
 	bool rx_use_avx2;
 	bool rx_use_avx512;
diff a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c	(rejected hunks)
@@ -270,6 +270,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
 	uint32_t rxdid = ICE_RXDID_COMMS_OVS;
 	uint32_t regval;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	/* Set buffer size as the head split is disabled. */
 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
@@ -366,7 +367,7 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
 	regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
 		QRXFLXP_CNTXT_RXDID_PRIO_M;
 
-	if (rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+	if (ad->ptp_ena || rxq->offloads & DEV_RX_OFFLOAD_TIMESTAMP)
 		regval |= QRXFLXP_CNTXT_TS_M;
 
 	ICE_WRITE_REG(hw, QRXFLXP_CNTXT(rxq->reg_idx), regval);
@@ -1564,6 +1566,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	rxdp = &rxq->rx_ring[rxq->rx_tail];
 	rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -1618,6 +1621,14 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
 				}
 			}
 
+			if (ad->ptp_ena && ((mb->packet_type &
+			    RTE_PTYPE_L2_MASK) == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+				rxq->time_high =
+				   rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high);
+				mb->timesync = rxq->queue_id;
+				pkt_flags |= PKT_RX_IEEE1588_PTP;
+			}
+
 			mb->ol_flags |= pkt_flags;
 		}
 
@@ -1804,6 +1815,7 @@ ice_recv_scattered_pkts(void *rx_queue,
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
@@ -1926,6 +1938,14 @@ ice_recv_scattered_pkts(void *rx_queue,
 			}
 		}
 
+		if (ad->ptp_ena && ((first_seg->packet_type & RTE_PTYPE_L2_MASK)
+		    == RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+			rxq->time_high =
+			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+			first_seg->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		first_seg->ol_flags |= pkt_flags;
 		/* Prefetch data of first segment, if configured to do so. */
 		rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -2284,6 +2304,7 @@ ice_recv_pkts(void *rx_queue,
 	struct ice_vsi *vsi = rxq->vsi;
 	struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
 	uint64_t ts_ns;
+	struct ice_adapter *ad = rxq->vsi->adapter;
 
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
@@ -2347,6 +2368,14 @@ ice_recv_pkts(void *rx_queue,
 			}
 		}
 
+		if (ad->ptp_ena && ((rxm->packet_type & RTE_PTYPE_L2_MASK) ==
+		    RTE_PTYPE_L2_ETHER_TIMESYNC)) {
+			rxq->time_high =
+			   rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
+			rxm->timesync = rxq->queue_id;
+			pkt_flags |= PKT_RX_IEEE1588_PTP;
+		}
+
 		rxm->ol_flags |= pkt_flags;
 		/* copy old mbuf to rx_pkts */
 		rx_pkts[nb_rx++] = rxm;
@@ -2558,7 +2587,8 @@ ice_calc_context_desc(uint64_t flags)
 	static uint64_t mask = PKT_TX_TCP_SEG |
 		PKT_TX_QINQ |
 		PKT_TX_OUTER_IP_CKSUM |
-		PKT_TX_TUNNEL_MASK;
+		PKT_TX_TUNNEL_MASK |
+		PKT_TX_IEEE1588_TMST;
 
 	return (flags & mask) ? 1 : 0;
 }
@@ -2726,6 +2756,10 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			if (ol_flags & PKT_TX_TCP_SEG)
 				cd_type_cmd_tso_mss |=
 					ice_set_tso_ctx(tx_pkt, tx_offload);
+			else if (ol_flags & PKT_TX_IEEE1588_TMST)
+				cd_type_cmd_tso_mss |=
+					((uint64_t)ICE_TX_CTX_DESC_TSYN <<
+					ICE_TXD_CTX_QW1_CMD_S);
 
 			ctx_txd->tunneling_params =
 				rte_cpu_to_le_32(cd_tunneling_params);
@@ -3127,6 +3161,8 @@ ice_set_rx_function(struct rte_eth_dev *dev)
 		ad->rx_use_avx512 = false;
 		ad->rx_use_avx2 = false;
 		rx_check_ret = ice_rx_vec_dev_check(dev);
+		if (ad->ptp_ena)
+			rx_check_ret = -1;
 		if (rx_check_ret >= 0 && ad->rx_bulk_alloc_allowed &&
 		    rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
 			ad->rx_vec_allowed = true;
diff a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h	(rejected hunks)
@@ -92,6 +92,7 @@ struct ice_rx_queue {
 	ice_rxd_to_pkt_fields_t rxd_to_pkt_fields; /* handle FlexiMD by RXDID */
 	ice_rx_release_mbufs_t rx_rel_mbufs;
 	uint64_t offloads;
+	uint32_t time_high;
 };
 
 struct ice_tx_entry {

https://lab.dpdk.org/results/dashboard/patchsets/19006/

UNH-IOL DPDK Community Lab

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2021-10-06  0:14 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-06  0:14 [dpdk-test-report] |WARNING| pw99850 [PATCH] [v6] net/ice: support IEEE 1588 PTP dpdklab

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).