From: Simei Su <simei.su@intel.com>
To: qi.z.zhang@intel.com, qiming.yang@intel.com
Cc: dev@dpdk.org, wenjun1.wu@intel.com, Simei Su <simei.su@intel.com>
Subject: [PATCH v1 2/3] net/iavf: enable Rx timestamp on Flex Descriptor
Date: Fri, 8 Apr 2022 10:13:06 +0800 [thread overview]
Message-ID: <20220408021307.272746-3-simei.su@intel.com> (raw)
In-Reply-To: <20220408021307.272746-1-simei.su@intel.com>
Dump Rx timestamp value into dynamic mbuf field by flex descriptor.
This feature is turned on by dev config "enable-rx-timestamp".
Currently, it's only supported under scalar path.
Signed-off-by: Simei Su <simei.su@intel.com>
---
drivers/net/iavf/iavf.h | 5 ++
drivers/net/iavf/iavf_ethdev.c | 26 +++++++++++
drivers/net/iavf/iavf_rxtx.c | 58 +++++++++++++++++++++++
drivers/net/iavf/iavf_rxtx.h | 22 +++++++++
drivers/net/iavf/iavf_rxtx_vec_common.h | 3 ++
drivers/net/iavf/iavf_vchnl.c | 83 ++++++++++++++++++++++++++++-----
6 files changed, 186 insertions(+), 11 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index a01d18e..2838b5e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -257,6 +257,8 @@ struct iavf_info {
struct iavf_tm_conf tm_conf;
struct rte_eth_dev *eth_dev;
+
+ uint32_t ptp_caps;
};
#define IAVF_MAX_PKT_TYPE 1024
@@ -300,6 +302,7 @@ struct iavf_adapter {
bool stopped;
uint16_t fdir_ref_cnt;
struct iavf_devargs devargs;
+ uint64_t phc_time;
};
/* IAVF_DEV_PRIVATE_TO */
@@ -460,4 +463,6 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
uint8_t *msg, size_t msg_len,
uint8_t *resp_msg, size_t resp_msg_len);
extern const struct rte_tm_ops iavf_tm_ops;
+int iavf_get_ptp_cap(struct iavf_adapter *adapter);
+int iavf_get_phc_time(struct iavf_adapter *adapter);
#endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d6190ac..704c174 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -35,6 +35,9 @@
/* devargs */
#define IAVF_PROTO_XTR_ARG "proto_xtr"
+uint64_t iavf_timestamp_dynflag;
+int iavf_timestamp_dynfield_offset = -1;
+
static const char * const iavf_valid_args[] = {
IAVF_PROTO_XTR_ARG,
NULL
@@ -685,6 +688,7 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
struct rte_eth_dev_data *dev_data = dev->data;
uint16_t buf_size, max_pkt_len;
uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
+ enum iavf_status err;
buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
@@ -703,6 +707,18 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
return -EINVAL;
}
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
+ /* Register mbuf field and flag for Rx timestamp */
+ err = rte_mbuf_dyn_rx_timestamp_register(
+ &iavf_timestamp_dynfield_offset,
+ &iavf_timestamp_dynflag);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Cannot register mbuf field/flag for timestamp");
+ return -EINVAL;
+ }
+ }
+
rxq->max_pkt_len = max_pkt_len;
if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
rxq->max_pkt_len > buf_size) {
@@ -945,6 +961,13 @@ iavf_dev_start(struct rte_eth_dev *dev)
return -1;
}
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP) {
+ if (iavf_get_ptp_cap(adapter)) {
+ PMD_INIT_LOG(ERR, "Failed to get ptp capability");
+ return -1;
+ }
+ }
+
if (iavf_init_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "failed to do Queue init");
return -1;
@@ -1087,6 +1110,9 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP)
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
+
if (iavf_ipsec_crypto_supported(adapter)) {
dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SECURITY;
dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 764218a..ab5b3de 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -1429,6 +1429,11 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ struct iavf_adapter *ad = rxq->vsi->adapter;
+ uint64_t ts_ns;
+
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ rxq->hw_register_set = 1;
while (nb_rx < nb_pkts) {
rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
@@ -1491,6 +1496,21 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
&rxq->stats.ipsec_crypto);
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, rxm, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+
+ if (iavf_timestamp_dynflag > 0) {
+ if (rxq->hw_register_set)
+ iavf_get_phc_time(ad);
+
+ rxq->hw_register_set = 0;
+ ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+ *RTE_MBUF_DYNFIELD(rxm,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ rxm->ol_flags |= iavf_timestamp_dynflag;
+ }
+
rxm->ol_flags |= pkt_flags;
rx_pkts[nb_rx++] = rxm;
@@ -1519,11 +1539,16 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t rx_stat_err0;
uint64_t dma_addr;
uint64_t pkt_flags;
+ struct iavf_adapter *ad = rxq->vsi->adapter;
+ uint64_t ts_ns;
volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
volatile union iavf_rx_flex_desc *rxdp;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ rxq->hw_register_set = 1;
+
while (nb_rx < nb_pkts) {
rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
@@ -1636,6 +1661,20 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
rxd_to_pkt_fields_ops[rxq->rxdid](rxq, first_seg, &rxd);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
+ if (iavf_timestamp_dynflag > 0) {
+ if (rxq->hw_register_set)
+ iavf_get_phc_time(ad);
+
+ rxq->hw_register_set = 0;
+ ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high));
+
+ *RTE_MBUF_DYNFIELD(first_seg,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ first_seg->ol_flags |= iavf_timestamp_dynflag;
+ }
+
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
@@ -1828,6 +1867,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags;
const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ struct iavf_adapter *ad = rxq->vsi->adapter;
+ uint64_t ts_ns;
rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -1838,6 +1879,9 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
return 0;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ rxq->hw_register_set = 1;
+
/* Scan LOOK_AHEAD descriptors at a time to determine which
* descriptors reference packets that are ready to be received.
*/
@@ -1896,6 +1940,20 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
+ if (iavf_timestamp_dynflag > 0) {
+ if (rxq->hw_register_set)
+ iavf_get_phc_time(ad);
+
+ rxq->hw_register_set = 0;
+ ts_ns = iavf_tstamp_convert_32b_64b(ad->phc_time,
+ rte_le_to_cpu_32(rxdp[j].wb.flex_ts.ts_high));
+
+ *RTE_MBUF_DYNFIELD(mb,
+ iavf_timestamp_dynfield_offset,
+ rte_mbuf_timestamp_t *) = ts_ns;
+ mb->ol_flags |= iavf_timestamp_dynflag;
+ }
+
mb->ol_flags |= pkt_flags;
}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index bf8aebb..37453c4 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -72,6 +72,9 @@
#define IAVF_TX_OFFLOAD_NOTSUP_MASK \
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
+extern uint64_t iavf_timestamp_dynflag;
+extern int iavf_timestamp_dynfield_offset;
+
/**
* Rx Flex Descriptors
* These descriptors are used instead of the legacy version descriptors
@@ -219,6 +222,7 @@ struct iavf_rx_queue {
/* flexible descriptor metadata extraction offload flag */
struct iavf_rx_queue_stats stats;
uint64_t offloads;
+ uint32_t hw_register_set;
};
struct iavf_tx_entry {
@@ -778,6 +782,24 @@ void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
}
}
+static inline
+uint64_t iavf_tstamp_convert_32b_64b(uint64_t time, uint32_t in_timestamp)
+{
+ const uint64_t mask = 0xFFFFFFFF;
+ uint32_t delta;
+ uint64_t ns;
+
+ delta = (in_timestamp - (uint32_t)(time & mask));
+ if (delta > (mask / 2)) {
+ delta = ((uint32_t)(time & mask) - in_timestamp);
+ ns = time - delta;
+ } else {
+ ns = time + delta;
+ }
+
+ return ns;
+}
+
#ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
#define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
iavf_dump_rx_descriptor(rxq, desc, rx_id)
diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h
index 1fd37b7..a59cb2c 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/iavf/iavf_rxtx_vec_common.h
@@ -231,6 +231,9 @@ iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq)
if (rxq->proto_xtr != IAVF_PROTO_XTR_NONE)
return -1;
+ if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ return -1;
+
if (rxq->offloads & IAVF_RX_VECTOR_OFFLOAD)
return IAVF_VECTOR_OFFLOAD_PATH;
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 169e1f2..0d8a8dd 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -502,7 +502,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
VIRTCHNL_VF_LARGE_NUM_QPAIRS |
VIRTCHNL_VF_OFFLOAD_QOS |
- VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO;
+ VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO |
+ VIRTCHNL_VF_CAP_PTP;
args.in_args = (uint8_t *)∩︀
args.in_args_size = sizeof(caps);
@@ -1047,16 +1048,21 @@ iavf_configure_queues(struct iavf_adapter *adapter,
vc_qp->rxq.crc_disable = rxq[i]->crc_len != 0 ? 1 : 0;
#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
if (vf->vf_res->vf_cap_flags &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
- vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
- vc_qp->rxq.rxdid = rxq[i]->rxdid;
- PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
- vc_qp->rxq.rxdid, i);
- } else {
- PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
- "request default RXDID[%d] in Queue[%d]",
- rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
- vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ if (vf->supported_rxdid & BIT(rxq[i]->rxdid)) {
+ vc_qp->rxq.rxdid = rxq[i]->rxdid;
+ PMD_DRV_LOG(NOTICE, "request RXDID[%d] in Queue[%d]",
+ vc_qp->rxq.rxdid, i);
+ } else {
+ PMD_DRV_LOG(NOTICE, "RXDID[%d] is not supported, "
+ "request default RXDID[%d] in Queue[%d]",
+ rxq[i]->rxdid, IAVF_RXDID_LEGACY_1, i);
+ vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
+ }
+
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP &&
+ vf->ptp_caps & VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)
+ vc_qp->rxq.flags |= VIRTCHNL_PTP_RX_TSTAMP;
}
#else
if (vf->vf_res->vf_cap_flags &
@@ -1805,3 +1811,58 @@ iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
return 0;
}
+
+int
+iavf_get_ptp_cap(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_ptp_caps ptp_caps;
+ struct iavf_cmd_info args;
+ int err;
+
+ ptp_caps.caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+ VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+ args.ops = VIRTCHNL_OP_1588_PTP_GET_CAPS;
+ args.in_args = (uint8_t *)&ptp_caps;
+ args.in_args_size = sizeof(ptp_caps);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args, 0);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of OP_1588_PTP_GET_CAPS");
+ return err;
+ }
+
+ vf->ptp_caps = ((struct virtchnl_ptp_caps *)args.out_buffer)->caps;
+
+ return 0;
+}
+
+int
+iavf_get_phc_time(struct iavf_adapter *adapter)
+{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_phc_time phc_time;
+ struct iavf_cmd_info args;
+ int err;
+
+ args.ops = VIRTCHNL_OP_1588_PTP_GET_TIME;
+ args.in_args = (uint8_t *)&phc_time;
+ args.in_args_size = sizeof(phc_time);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = IAVF_AQ_BUF_SZ;
+
+ err = iavf_execute_vf_cmd(adapter, &args, 0);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL_OP_1588_PTP_GET_TIME");
+ return err;
+ }
+
+ adapter->phc_time = ((struct virtchnl_phc_time *)args.out_buffer)->time;
+
+ return 0;
+}
--
2.9.5
next prev parent reply other threads:[~2022-04-08 2:18 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-04-08 2:13 [PATCH v1 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
2022-04-08 2:13 ` [PATCH v1 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
2022-04-08 2:13 ` Simei Su [this message]
2022-04-08 2:13 ` [PATCH v1 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
2022-04-24 7:08 ` [PATCH v2 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
2022-04-24 7:08 ` [PATCH v2 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
2022-04-24 7:08 ` [PATCH v2 2/3] net/iavf: enable Rx timestamp on Flex Descriptor Simei Su
2022-04-24 7:08 ` [PATCH v2 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
2022-04-28 8:13 ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Simei Su
2022-04-28 8:13 ` [PATCH v3 1/3] common/iavf: support Rx timestamp in virtual channel Simei Su
2022-04-28 8:13 ` [PATCH v3 2/3] net/iavf: enable Rx timestamp on Flex Descriptor Simei Su
2022-04-28 8:13 ` [PATCH v3 3/3] net/iavf: improve performance of Rx timestamp offload Simei Su
2022-05-10 16:00 ` Thomas Monjalon
2022-04-29 2:56 ` [PATCH v3 0/3] net/iavf: support Rx timestamp on flex descriptor Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220408021307.272746-3-simei.su@intel.com \
--to=simei.su@intel.com \
--cc=dev@dpdk.org \
--cc=qi.z.zhang@intel.com \
--cc=qiming.yang@intel.com \
--cc=wenjun1.wu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).