DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases
@ 2022-08-12 16:52 peng1x.zhang
  2022-08-12 16:52 ` [PATCH 2/2] net/iavf: support inner and outer checksum offload peng1x.zhang
                   ` (2 more replies)
  0 siblings, 3 replies; 20+ messages in thread
From: peng1x.zhang @ 2022-08-12 16:52 UTC (permalink / raw)
  To: dev; +Cc: beilei.xing, jingjing.wu, Peng Zhang

From: Peng Zhang <peng1x.zhang@intel.com>

Hardware limits that max buffer size per Tx descriptor should be (16K-1)B.
So when TSO enabled under unencrypt scenario, the mbuf data size may exceed
the limit and cause malicious behavior to the NIC.

This patch supports Tx descriptors for this kind of large buffer.

Signed-off-by: Peng Zhang <peng1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_rxtx.c | 66 ++++++++++++++++++++++++++++++++----
 1 file changed, 60 insertions(+), 6 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index dfd021889e..adec58e90a 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2642,6 +2642,47 @@ iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
 	return NULL;
 }
 
+/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
+#define IAVF_MAX_DATA_PER_TXD \
+	(IAVF_TXD_QW1_TX_BUF_SZ_MASK >> IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+static inline void
+iavf_fill_unencrypt_desc(volatile struct iavf_tx_desc *txd, struct rte_mbuf *m,
+			 volatile uint64_t desc_template, struct iavf_tx_entry *txe,
+			 volatile struct iavf_tx_desc *txr, struct iavf_tx_entry *txe_ring,
+			 int desc_idx_last)
+{
+		/* Setup TX Descriptor */
+		int desc_idx;
+		uint16_t slen = m->data_len;
+		uint64_t buf_dma_addr = rte_mbuf_data_iova(m);
+		struct iavf_tx_entry *txn = &txe_ring[txe->next_id];
+
+		while ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
+			unlikely(slen > IAVF_MAX_DATA_PER_TXD)) {
+			txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
+			txd->cmd_type_offset_bsz =
+			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
+			(uint64_t)IAVF_MAX_DATA_PER_TXD <<
+			IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) | desc_template;
+
+			buf_dma_addr += IAVF_MAX_DATA_PER_TXD;
+			slen -= IAVF_MAX_DATA_PER_TXD;
+
+			txe->last_id = desc_idx_last;
+			desc_idx = txe->next_id;
+			txe = txn;
+			txd = &txr[desc_idx];
+			txn = &txe_ring[txe->next_id];
+		}
+
+		txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+		txd->cmd_type_offset_bsz =
+			rte_cpu_to_le_64((uint64_t)slen << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) |
+				desc_template;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2650,6 +2691,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	volatile struct iavf_tx_desc *txr = txq->tx_ring;
 	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
+	volatile struct iavf_tx_desc *txd;
 	struct rte_mbuf *mb, *mb_seg;
 	uint16_t desc_idx, desc_idx_last;
 	uint16_t idx;
@@ -2781,6 +2823,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			ddesc = (volatile struct iavf_tx_desc *)
 					&txr[desc_idx];
 
+			txd = &txr[desc_idx];
 			txn = &txe_ring[txe->next_id];
 			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
 
@@ -2788,10 +2831,16 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				rte_pktmbuf_free_seg(txe->mbuf);
 
 			txe->mbuf = mb_seg;
-			iavf_fill_data_desc(ddesc, mb_seg,
-					ddesc_template, tlen, ipseclen);
 
-			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+			if (nb_desc_ipsec) {
+				iavf_fill_data_desc(ddesc, mb_seg,
+					ddesc_template, tlen, ipseclen);
+				IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+			} else {
+				iavf_fill_unencrypt_desc(txd, mb_seg,
+					ddesc_template, txe, txr, txe_ring, desc_idx_last);
+				IAVF_DUMP_TX_DESC(txq, txd, desc_idx);
+			}
 
 			txe->last_id = desc_idx_last;
 			desc_idx = txe->next_id;
@@ -2816,10 +2865,15 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			txq->nb_used = 0;
 		}
 
-		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+		if (nb_desc_ipsec) {
+			ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
 				IAVF_TXD_DATA_QW1_CMD_SHIFT);
-
-		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
+			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
+		} else {
+			txd->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+				IAVF_TXD_DATA_QW1_CMD_SHIFT);
+			IAVF_DUMP_TX_DESC(txq, txd, desc_idx - 1);
+		}
 	}
 
 end_of_tx:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 2/2] net/iavf: support inner and outer checksum offload
  2022-08-12 16:52 [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases peng1x.zhang
@ 2022-08-12 16:52 ` peng1x.zhang
  2022-08-30  8:12   ` Yang, Qiming
  2022-09-01  9:33   ` [PATCH v2] net/iavf: enable inner and outer Tx " Peng Zhang
  2022-08-30  7:52 ` [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases Yang, Qiming
  2022-09-26  5:17 ` [PATCH v2] net/iavf: fix TSO offload for tunnel case Zhichao Zeng
  2 siblings, 2 replies; 20+ messages in thread
From: peng1x.zhang @ 2022-08-12 16:52 UTC (permalink / raw)
  To: dev; +Cc: beilei.xing, jingjing.wu, Peng Zhang

From: Peng Zhang <peng1x.zhang@intel.com>

Add the support of inner and outer Tx checksum offload for tunneling
packets by configuring tunneling parameters in Tx descriptors,
including outer L3/L4 checksum offload.

Signed-off-by: Peng Zhang <peng1x.zhang@intel.com>
---
 drivers/net/iavf/iavf_ethdev.c |  3 +-
 drivers/net/iavf/iavf_rxtx.c   | 51 +++++++++++++++++++++++++++++++---
 drivers/net/iavf/iavf_rxtx.h   |  8 +++++-
 3 files changed, 56 insertions(+), 6 deletions(-)

diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 506fcff6e3..59238ecceb 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1140,7 +1140,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
 		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
-		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
 
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index adec58e90a..7cbebafc09 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2334,7 +2334,7 @@ static inline uint16_t
 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
 {
 	if (flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
-			RTE_MBUF_F_TX_TUNNEL_MASK))
+	    RTE_MBUF_F_TX_TUNNEL_MASK | RTE_MBUF_F_TX_OUTER_IP_CKSUM))
 		return 1;
 	if (flags & RTE_MBUF_F_TX_VLAN &&
 	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
@@ -2399,6 +2399,44 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 	break;
 	}
 
+	/* L4TUNT: L4 Tunneling Type */
+	switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+	case RTE_MBUF_F_TX_TUNNEL_IPIP:
+		/* for non UDP / GRE tunneling, set to 00b */
+		break;
+	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+	case RTE_MBUF_F_TX_TUNNEL_GTP:
+	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
+		eip_typ |= IAVF_TXD_CTX_UDP_TUNNELING;
+		break;
+	case RTE_MBUF_F_TX_TUNNEL_GRE:
+		eip_typ |= IAVF_TXD_CTX_GRE_TUNNELING;
+		break;
+	default:
+		PMD_TX_LOG(ERR, "Tunnel type not supported");
+		return;
+	}
+
+	/* L4TUNLEN: L4 Tunneling Length, in Words
+	 *
+	 * We depend on app to set rte_mbuf.l2_len correctly.
+	 * For IP in GRE it should be set to the length of the GRE
+	 * header;
+	 * For MAC in GRE or MAC in UDP it should be set to the length
+	 * of the GRE or UDP headers plus the inner MAC up to including
+	 * its last Ethertype.
+	 * If MPLS labels exists, it should include them as well.
+	 */
+	eip_typ |= (m->l2_len >> 1) << IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
+
+	/**
+	 * Calculate the tunneling UDP checksum.
+	 * Shall be set only if L4TUNT = 01b and EIPT is not zero
+	 */
+	if (!(eip_typ & IAVF_TX_CTX_EXT_IP_NONE) &&
+	    (eip_typ & IAVF_TXD_CTX_UDP_TUNNELING))
+		eip_typ |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
+
 	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
 		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
 		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
@@ -2417,7 +2455,7 @@ iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
 		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
 		if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
-			total_length -= m->outer_l3_len;
+			total_length -= m->outer_l3_len +  m->outer_l2_len;
 	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
@@ -2535,8 +2573,13 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 	}
 
 	/* Set MACLEN */
-	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
-
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+		offset |= (m->outer_l2_len >> 1)
+			<< IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+	else
+		offset |= (m->l2_len >> 1)
+			<< IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+
 	/* Enable L3 checksum offloading inner */
 	if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
 		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 1695e43cd5..4b40ad3615 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -26,6 +26,8 @@
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
 		RTE_ETH_TX_OFFLOAD_TCP_TSO |		 \
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |    \
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |	\
 		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
@@ -56,7 +58,8 @@
 #define IAVF_TX_CKSUM_OFFLOAD_MASK (		 \
 		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
-		RTE_MBUF_F_TX_TCP_SEG)
+		RTE_MBUF_F_TX_TCP_SEG |          \
+		RTE_MBUF_F_TX_OUTER_IP_CKSUM)
 
 #define IAVF_TX_OFFLOAD_MASK (  \
 		RTE_MBUF_F_TX_OUTER_IPV6 |		 \
@@ -67,6 +70,9 @@
 		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
 		RTE_MBUF_F_TX_TCP_SEG |		 \
+		RTE_MBUF_F_TX_TUNNEL_MASK |	\
+		RTE_MBUF_F_TX_OUTER_IP_CKSUM |  \
+		RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \
 		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
-- 
2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases
  2022-08-12 16:52 [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases peng1x.zhang
  2022-08-12 16:52 ` [PATCH 2/2] net/iavf: support inner and outer checksum offload peng1x.zhang
@ 2022-08-30  7:52 ` Yang, Qiming
  2022-09-26  5:17 ` [PATCH v2] net/iavf: fix TSO offload for tunnel case Zhichao Zeng
  2 siblings, 0 replies; 20+ messages in thread
From: Yang, Qiming @ 2022-08-30  7:52 UTC (permalink / raw)
  To: Zhang, Peng1X, dev; +Cc: Xing, Beilei, Wu, Jingjing, Zhang, Peng1X

Please retest: TCP/UDP/tunnel-TCP/tunnel-UDP packet

> -----Original Message-----
> From: peng1x.zhang@intel.com <peng1x.zhang@intel.com>
> Sent: Saturday, August 13, 2022 12:52 AM
> To: dev@dpdk.org
> Cc: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Zhang, Peng1X <peng1x.zhang@intel.com>
> Subject: [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases

Should be a bug fix patch. 

> 
> From: Peng Zhang <peng1x.zhang@intel.com>
> 
No need this line.

> Hardware limits that max buffer size per Tx descriptor should be (16K-1)B.
> So when TSO enabled under unencrypt scenario, the mbuf data size may
> exceed the limit and cause malicious behavior to the NIC.

So this patch is fixing the tunnel TSO not enabling.

> 
> This patch supports Tx descriptors for this kind of large buffer.
> 
> Signed-off-by: Peng Zhang <peng1x.zhang@intel.com>
> ---
>  drivers/net/iavf/iavf_rxtx.c | 66 ++++++++++++++++++++++++++++++++----
>  1 file changed, 60 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> dfd021889e..adec58e90a 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -2642,6 +2642,47 @@ iavf_ipsec_crypto_get_pkt_metadata(const struct
> iavf_tx_queue *txq,
>  	return NULL;
>  }
> 
> +/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
> +#define IAVF_MAX_DATA_PER_TXD \
> +	(IAVF_TXD_QW1_TX_BUF_SZ_MASK >>
> IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
> +
> +static inline void
> +iavf_fill_unencrypt_desc(volatile struct iavf_tx_desc *txd, struct rte_mbuf
> *m,
> +			 volatile uint64_t desc_template, struct iavf_tx_entry
> *txe,
> +			 volatile struct iavf_tx_desc *txr, struct iavf_tx_entry
> *txe_ring,
> +			 int desc_idx_last)
> +{
> +		/* Setup TX Descriptor */
> +		int desc_idx;
> +		uint16_t slen = m->data_len;
> +		uint64_t buf_dma_addr = rte_mbuf_data_iova(m);
> +		struct iavf_tx_entry *txn = &txe_ring[txe->next_id];
> +
> +		while ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&

??? lack of UDP?

> +			unlikely(slen > IAVF_MAX_DATA_PER_TXD)) {
> +			txd->buffer_addr =
> rte_cpu_to_le_64(buf_dma_addr);
> +
> +			txd->cmd_type_offset_bsz =
> +			rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
> +			(uint64_t)IAVF_MAX_DATA_PER_TXD <<
> +			IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) |
> desc_template;
> +
> +			buf_dma_addr += IAVF_MAX_DATA_PER_TXD;
> +			slen -= IAVF_MAX_DATA_PER_TXD;
> +
> +			txe->last_id = desc_idx_last;
> +			desc_idx = txe->next_id;
> +			txe = txn;
> +			txd = &txr[desc_idx];
> +			txn = &txe_ring[txe->next_id];
> +		}
> +
> +		txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
> +		txd->cmd_type_offset_bsz =
> +			rte_cpu_to_le_64((uint64_t)slen <<
> IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) |
> +				desc_template;
> +}
> +
>  /* TX function */
>  uint16_t
>  iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> @@ -2650,6 +2691,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  	volatile struct iavf_tx_desc *txr = txq->tx_ring;
>  	struct iavf_tx_entry *txe_ring = txq->sw_ring;
>  	struct iavf_tx_entry *txe, *txn;
> +	volatile struct iavf_tx_desc *txd;
>  	struct rte_mbuf *mb, *mb_seg;
>  	uint16_t desc_idx, desc_idx_last;
>  	uint16_t idx;
> @@ -2781,6 +2823,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  			ddesc = (volatile struct iavf_tx_desc *)
>  					&txr[desc_idx];
> 
> +			txd = &txr[desc_idx];
>  			txn = &txe_ring[txe->next_id];
>  			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
> 
> @@ -2788,10 +2831,16 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  				rte_pktmbuf_free_seg(txe->mbuf);
> 
>  			txe->mbuf = mb_seg;
> -			iavf_fill_data_desc(ddesc, mb_seg,
> -					ddesc_template, tlen, ipseclen);
> 
> -			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
> +			if (nb_desc_ipsec) {
> +				iavf_fill_data_desc(ddesc, mb_seg,
> +					ddesc_template, tlen, ipseclen);
> +				IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
> +			} else {
> +				iavf_fill_unencrypt_desc(txd, mb_seg,
> +					ddesc_template, txe, txr, txe_ring,
> desc_idx_last);
> +				IAVF_DUMP_TX_DESC(txq, txd, desc_idx);
> +			}
> 
>  			txe->last_id = desc_idx_last;
>  			desc_idx = txe->next_id;
> @@ -2816,10 +2865,15 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  			txq->nb_used = 0;
>  		}
> 
> -		ddesc->cmd_type_offset_bsz |=
> rte_cpu_to_le_64(ddesc_cmd <<
> +		if (nb_desc_ipsec) {
> +			ddesc->cmd_type_offset_bsz |=
> rte_cpu_to_le_64(ddesc_cmd <<
>  				IAVF_TXD_DATA_QW1_CMD_SHIFT);
> -
> -		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
> +			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
> +		} else {
> +			txd->cmd_type_offset_bsz |=
> rte_cpu_to_le_64(ddesc_cmd <<
> +				IAVF_TXD_DATA_QW1_CMD_SHIFT);
> +			IAVF_DUMP_TX_DESC(txq, txd, desc_idx - 1);
> +		}
>  	}
> 
>  end_of_tx:
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH 2/2] net/iavf: support inner and outer checksum offload
  2022-08-12 16:52 ` [PATCH 2/2] net/iavf: support inner and outer checksum offload peng1x.zhang
@ 2022-08-30  8:12   ` Yang, Qiming
  2022-09-01  9:33   ` [PATCH v2] net/iavf: enable inner and outer Tx " Peng Zhang
  1 sibling, 0 replies; 20+ messages in thread
From: Yang, Qiming @ 2022-08-30  8:12 UTC (permalink / raw)
  To: Zhang, Peng1X, dev; +Cc: Xing, Beilei, Wu, Jingjing, Zhang, Peng1X



> -----Original Message-----
> From: peng1x.zhang@intel.com <peng1x.zhang@intel.com>
> Sent: Saturday, August 13, 2022 12:52 AM
> To: dev@dpdk.org
> Cc: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Zhang, Peng1X <peng1x.zhang@intel.com>
> Subject: [PATCH 2/2] net/iavf: support inner and outer checksum offload
> 
> From: Peng Zhang <peng1x.zhang@intel.com>

No need this line.

> 
> Add the support of inner and outer Tx checksum offload for tunneling
> packets by configuring tunneling parameters in Tx descriptors, including
> outer L3/L4 checksum offload.

Enable inner and outer Tx checksum offload for tunnel patch by configure ol_flags.

> 
> Signed-off-by: Peng Zhang <peng1x.zhang@intel.com>
> ---
>  drivers/net/iavf/iavf_ethdev.c |  3 +-
>  drivers/net/iavf/iavf_rxtx.c   | 51 +++++++++++++++++++++++++++++++---
>  drivers/net/iavf/iavf_rxtx.h   |  8 +++++-
>  3 files changed, 56 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
> index 506fcff6e3..59238ecceb 100644
> --- a/drivers/net/iavf/iavf_ethdev.c
> +++ b/drivers/net/iavf/iavf_ethdev.c
> @@ -1140,7 +1140,8 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
>  		RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
>  		RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
>  		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
> -		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
> +		RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
> +		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;

Add this line after outer_ipv4_chsum

> 
>  	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
>  		dev_info->rx_offload_capa |=
> RTE_ETH_RX_OFFLOAD_KEEP_CRC; diff --git a/drivers/net/iavf/iavf_rxtx.c
> b/drivers/net/iavf/iavf_rxtx.c index adec58e90a..7cbebafc09 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -2334,7 +2334,7 @@ static inline uint16_t
> iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)  {
>  	if (flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
> -			RTE_MBUF_F_TX_TUNNEL_MASK))
> +	    RTE_MBUF_F_TX_TUNNEL_MASK |
> RTE_MBUF_F_TX_OUTER_IP_CKSUM))

OUTER_UDP_CKSUM?

>  		return 1;
>  	if (flags & RTE_MBUF_F_TX_VLAN &&
>  	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
> @@ -2399,6 +2399,44 @@ iavf_fill_ctx_desc_tunnelling_field(volatile
> uint64_t *qw0,
>  	break;
>  	}
> 
> +	/* L4TUNT: L4 Tunneling Type */
> +	switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
> +	case RTE_MBUF_F_TX_TUNNEL_IPIP:
> +		/* for non UDP / GRE tunneling, set to 00b */
> +		break;
> +	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
> +	case RTE_MBUF_F_TX_TUNNEL_GTP:
> +	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
> +		eip_typ |= IAVF_TXD_CTX_UDP_TUNNELING;
> +		break;
> +	case RTE_MBUF_F_TX_TUNNEL_GRE:
> +		eip_typ |= IAVF_TXD_CTX_GRE_TUNNELING;
> +		break;
> +	default:
> +		PMD_TX_LOG(ERR, "Tunnel type not supported");
> +		return;
> +	}
> +
> +	/* L4TUNLEN: L4 Tunneling Length, in Words
> +	 *
> +	 * We depend on app to set rte_mbuf.l2_len correctly.
> +	 * For IP in GRE it should be set to the length of the GRE
> +	 * header;
> +	 * For MAC in GRE or MAC in UDP it should be set to the length
> +	 * of the GRE or UDP headers plus the inner MAC up to including
> +	 * its last Ethertype.
> +	 * If MPLS labels exists, it should include them as well.
> +	 */
> +	eip_typ |= (m->l2_len >> 1) << IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
> +
> +	/**
> +	 * Calculate the tunneling UDP checksum.
> +	 * Shall be set only if L4TUNT = 01b and EIPT is not zero
> +	 */
> +	if (!(eip_typ & IAVF_TX_CTX_EXT_IP_NONE) &&
> +	    (eip_typ & IAVF_TXD_CTX_UDP_TUNNELING))
> +		eip_typ |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
> +
>  	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
>  		eip_len <<
> IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
>  		eip_noinc <<
> IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
> @@ -2417,7 +2455,7 @@ iavf_fill_ctx_desc_segmentation_field(volatile
> uint64_t *field,
>  		total_length = m->pkt_len - (m->l2_len + m->l3_len + m-
> >l4_len);
> 
>  		if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
> -			total_length -= m->outer_l3_len;
> +			total_length -= m->outer_l3_len +  m->outer_l2_len;

Not related, delete

>  	}
> 
>  #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
> @@ -2535,8 +2573,13 @@ iavf_build_data_desc_cmd_offset_fields(volatile
> uint64_t *qw1,
>  	}
> 
>  	/* Set MACLEN */
> -	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
> -
> +	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
> +		offset |= (m->outer_l2_len >> 1)
> +			<< IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
> +	else
> +		offset |= (m->l2_len >> 1)
> +			<< IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
> +
>  	/* Enable L3 checksum offloading inner */
>  	if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
>  		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) { diff --git
> a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index
> 1695e43cd5..4b40ad3615 100644
> --- a/drivers/net/iavf/iavf_rxtx.h
> +++ b/drivers/net/iavf/iavf_rxtx.h
> @@ -26,6 +26,8 @@
>  #define IAVF_TX_NO_VECTOR_FLAGS (				 \
>  		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
>  		RTE_ETH_TX_OFFLOAD_TCP_TSO |		 \
> +		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |    \
> +		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |	\
>  		RTE_ETH_TX_OFFLOAD_SECURITY)
> 
>  #define IAVF_TX_VECTOR_OFFLOAD (				 \
> @@ -56,7 +58,8 @@
>  #define IAVF_TX_CKSUM_OFFLOAD_MASK (		 \
>  		RTE_MBUF_F_TX_IP_CKSUM |		 \
>  		RTE_MBUF_F_TX_L4_MASK |		 \
> -		RTE_MBUF_F_TX_TCP_SEG)
> +		RTE_MBUF_F_TX_TCP_SEG |          \

UDP

> +		RTE_MBUF_F_TX_OUTER_IP_CKSUM)
> 
>  #define IAVF_TX_OFFLOAD_MASK (  \
>  		RTE_MBUF_F_TX_OUTER_IPV6 |		 \
> @@ -67,6 +70,9 @@
>  		RTE_MBUF_F_TX_IP_CKSUM |		 \
>  		RTE_MBUF_F_TX_L4_MASK |		 \
>  		RTE_MBUF_F_TX_TCP_SEG |		 \
> +		RTE_MBUF_F_TX_TUNNEL_MASK |	\
> +		RTE_MBUF_F_TX_OUTER_IP_CKSUM |  \
> +		RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \
>  		RTE_ETH_TX_OFFLOAD_SECURITY)
> 
>  #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH v2] net/iavf: enable inner and outer Tx checksum offload
  2022-08-12 16:52 ` [PATCH 2/2] net/iavf: support inner and outer checksum offload peng1x.zhang
  2022-08-30  8:12   ` Yang, Qiming
@ 2022-09-01  9:33   ` Peng Zhang
  2022-09-01 11:04     ` Zhang, Qi Z
                       ` (2 more replies)
  1 sibling, 3 replies; 20+ messages in thread
From: Peng Zhang @ 2022-09-01  9:33 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, qi.z.zhang, Peng Zhang

Enable inner and outer Tx checksum offload for tunnel packet by configure
ol_flags.

Signed-off-by: Peng Zhang <peng1x.zhang@intel.com>

---
v2: add outer udp cksum flag and remove unrelated code
---
 drivers/net/iavf/iavf_ethdev.c |  1 +
 drivers/net/iavf/iavf_rxtx.c   | 48 ++++++++++++++++++++++++++++++++--
 drivers/net/iavf/iavf_rxtx.h   |  9 ++++++-
 3 files changed, 55 insertions(+), 3 deletions(-)

diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 506fcff6e3..fa040766e5 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1134,6 +1134,7 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
 		RTE_ETH_TX_OFFLOAD_TCP_TSO |
 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 3deabe1d7e..b784c5cc18 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2334,7 +2334,8 @@ static inline uint16_t
 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
 {
 	if (flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
-			RTE_MBUF_F_TX_TUNNEL_MASK))
+	    RTE_MBUF_F_TX_TUNNEL_MASK | RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+	    RTE_MBUF_F_TX_OUTER_UDP_CKSUM))
 		return 1;
 	if (flags & RTE_MBUF_F_TX_VLAN &&
 	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
@@ -2399,6 +2400,44 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 	break;
 	}
 
+	/* L4TUNT: L4 Tunneling Type */
+	switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+	case RTE_MBUF_F_TX_TUNNEL_IPIP:
+		/* for non UDP / GRE tunneling, set to 00b */
+		break;
+	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+	case RTE_MBUF_F_TX_TUNNEL_GTP:
+	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
+		eip_typ |= IAVF_TXD_CTX_UDP_TUNNELING;
+		break;
+	case RTE_MBUF_F_TX_TUNNEL_GRE:
+		eip_typ |= IAVF_TXD_CTX_GRE_TUNNELING;
+		break;
+	default:
+		PMD_TX_LOG(ERR, "Tunnel type not supported");
+		return;
+	}
+
+	/* L4TUNLEN: L4 Tunneling Length, in Words
+	 *
+	 * We depend on app to set rte_mbuf.l2_len correctly.
+	 * For IP in GRE it should be set to the length of the GRE
+	 * header;
+	 * For MAC in GRE or MAC in UDP it should be set to the length
+	 * of the GRE or UDP headers plus the inner MAC up to including
+	 * its last Ethertype.
+	 * If MPLS labels exists, it should include them as well.
+	 */
+	eip_typ |= (m->l2_len >> 1) << IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
+
+	/**
+	 * Calculate the tunneling UDP checksum.
+	 * Shall be set only if L4TUNT = 01b and EIPT is not zero
+	 */
+	if (!(eip_typ & IAVF_TX_CTX_EXT_IP_NONE) &&
+	    (eip_typ & IAVF_TXD_CTX_UDP_TUNNELING))
+		eip_typ |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
+
 	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
 		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
 		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
@@ -2535,7 +2574,12 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 	}
 
 	/* Set MACLEN */
-	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+		offset |= (m->outer_l2_len >> 1)
+			<< IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+	else
+		offset |= (m->l2_len >> 1)
+			<< IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
 
 	/* Enable L3 checksum offloading inner */
 	if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 1695e43cd5..66e832713c 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -26,6 +26,8 @@
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
 		RTE_ETH_TX_OFFLOAD_TCP_TSO |		 \
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |    \
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |	\
 		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
@@ -56,7 +58,9 @@
 #define IAVF_TX_CKSUM_OFFLOAD_MASK (		 \
 		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
-		RTE_MBUF_F_TX_TCP_SEG)
+		RTE_MBUF_F_TX_TCP_SEG |          \
+		RTE_MBUF_F_TX_OUTER_IP_CKSUM |   \
+		RTE_MBUF_F_TX_OUTER_UDP_CKSUM)
 
 #define IAVF_TX_OFFLOAD_MASK (  \
 		RTE_MBUF_F_TX_OUTER_IPV6 |		 \
@@ -67,6 +71,9 @@
 		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
 		RTE_MBUF_F_TX_TCP_SEG |		 \
+		RTE_MBUF_F_TX_TUNNEL_MASK |	\
+		RTE_MBUF_F_TX_OUTER_IP_CKSUM |  \
+		RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \
 		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
-- 
2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH v2] net/iavf: enable inner and outer Tx checksum offload
  2022-09-01  9:33   ` [PATCH v2] net/iavf: enable inner and outer Tx " Peng Zhang
@ 2022-09-01 11:04     ` Zhang, Qi Z
  2022-09-05  2:25     ` Yang, Qiming
  2022-09-20  9:14     ` [PATCH v3] " Zhichao Zeng
  2 siblings, 0 replies; 20+ messages in thread
From: Zhang, Qi Z @ 2022-09-01 11:04 UTC (permalink / raw)
  To: Zhang, Peng1X, dev; +Cc: Yang, Qiming



> -----Original Message-----
> From: Zhang, Peng1X <peng1x.zhang@intel.com>
> Sent: Thursday, September 1, 2022 5:33 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Zhang, Peng1X <peng1x.zhang@intel.com>
> Subject: [PATCH v2] net/iavf: enable inner and outer Tx checksum offload
> 
> Enable inner and outer Tx checksum offload for tunnel packet by configure
> ol_flags.
>

Please also update the doc/guide/nices/feature/iavf.ini.

I assume Inner l3/l4 checksum should be added.
 


^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH v2] net/iavf: enable inner and outer Tx checksum offload
  2022-09-01  9:33   ` [PATCH v2] net/iavf: enable inner and outer Tx " Peng Zhang
  2022-09-01 11:04     ` Zhang, Qi Z
@ 2022-09-05  2:25     ` Yang, Qiming
  2022-09-20  9:14     ` [PATCH v3] " Zhichao Zeng
  2 siblings, 0 replies; 20+ messages in thread
From: Yang, Qiming @ 2022-09-05  2:25 UTC (permalink / raw)
  To: Zhang, Peng1X, dev; +Cc: Zhang, Qi Z

Test fail.
Nacked-by: Qiming Yang <qiming.yang@intel.com>

> -----Original Message-----
> From: Zhang, Peng1X <peng1x.zhang@intel.com>
> Sent: Thursday, September 1, 2022 5:33 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Zhang, Peng1X <peng1x.zhang@intel.com>
> Subject: [PATCH v2] net/iavf: enable inner and outer Tx checksum offload
> 
> Enable inner and outer Tx checksum offload for tunnel packet by configure
> ol_flags.
> 
> Signed-off-by: Peng Zhang <peng1x.zhang@intel.com>
> 
> ---
> v2: add outer udp cksum flag and remove unrelated code
> ---
>  drivers/net/iavf/iavf_ethdev.c |  1 +
>  drivers/net/iavf/iavf_rxtx.c   | 48 ++++++++++++++++++++++++++++++++--
>  drivers/net/iavf/iavf_rxtx.h   |  9 ++++++-
>  3 files changed, 55 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
> index 506fcff6e3..fa040766e5 100644
> --- a/drivers/net/iavf/iavf_ethdev.c
> +++ b/drivers/net/iavf/iavf_ethdev.c
> @@ -1134,6 +1134,7 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
>  		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
>  		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
>  		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
> +		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
>  		RTE_ETH_TX_OFFLOAD_TCP_TSO |
>  		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
>  		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> 3deabe1d7e..b784c5cc18 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -2334,7 +2334,8 @@ static inline uint16_t
> iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)  {
>  	if (flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
> -			RTE_MBUF_F_TX_TUNNEL_MASK))
> +	    RTE_MBUF_F_TX_TUNNEL_MASK |
> RTE_MBUF_F_TX_OUTER_IP_CKSUM |
> +	    RTE_MBUF_F_TX_OUTER_UDP_CKSUM))
>  		return 1;
>  	if (flags & RTE_MBUF_F_TX_VLAN &&
>  	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
> @@ -2399,6 +2400,44 @@ iavf_fill_ctx_desc_tunnelling_field(volatile
> uint64_t *qw0,
>  	break;
>  	}
> 
> +	/* L4TUNT: L4 Tunneling Type */
> +	switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
> +	case RTE_MBUF_F_TX_TUNNEL_IPIP:
> +		/* for non UDP / GRE tunneling, set to 00b */
> +		break;
> +	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
> +	case RTE_MBUF_F_TX_TUNNEL_GTP:
> +	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
> +		eip_typ |= IAVF_TXD_CTX_UDP_TUNNELING;
> +		break;
> +	case RTE_MBUF_F_TX_TUNNEL_GRE:
> +		eip_typ |= IAVF_TXD_CTX_GRE_TUNNELING;
> +		break;
> +	default:
> +		PMD_TX_LOG(ERR, "Tunnel type not supported");
> +		return;
> +	}
> +
> +	/* L4TUNLEN: L4 Tunneling Length, in Words
> +	 *
> +	 * We depend on app to set rte_mbuf.l2_len correctly.
> +	 * For IP in GRE it should be set to the length of the GRE
> +	 * header;
> +	 * For MAC in GRE or MAC in UDP it should be set to the length
> +	 * of the GRE or UDP headers plus the inner MAC up to including
> +	 * its last Ethertype.
> +	 * If MPLS labels exists, it should include them as well.
> +	 */
> +	eip_typ |= (m->l2_len >> 1) << IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
> +
> +	/**
> +	 * Calculate the tunneling UDP checksum.
> +	 * Shall be set only if L4TUNT = 01b and EIPT is not zero
> +	 */
> +	if (!(eip_typ & IAVF_TX_CTX_EXT_IP_NONE) &&
> +	    (eip_typ & IAVF_TXD_CTX_UDP_TUNNELING))
> +		eip_typ |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
> +
>  	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
>  		eip_len <<
> IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
>  		eip_noinc <<
> IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
> @@ -2535,7 +2574,12 @@ iavf_build_data_desc_cmd_offset_fields(volatile
> uint64_t *qw1,
>  	}
> 
>  	/* Set MACLEN */
> -	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
> +	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
> +		offset |= (m->outer_l2_len >> 1)
> +			<< IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
> +	else
> +		offset |= (m->l2_len >> 1)
> +			<< IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
> 
>  	/* Enable L3 checksum offloading inner */
>  	if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { diff --git
> a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h index
> 1695e43cd5..66e832713c 100644
> --- a/drivers/net/iavf/iavf_rxtx.h
> +++ b/drivers/net/iavf/iavf_rxtx.h
> @@ -26,6 +26,8 @@
>  #define IAVF_TX_NO_VECTOR_FLAGS (				 \
>  		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
>  		RTE_ETH_TX_OFFLOAD_TCP_TSO |		 \
> +		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |    \
> +		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |	\
>  		RTE_ETH_TX_OFFLOAD_SECURITY)
> 
>  #define IAVF_TX_VECTOR_OFFLOAD (				 \
> @@ -56,7 +58,9 @@
>  #define IAVF_TX_CKSUM_OFFLOAD_MASK (		 \
>  		RTE_MBUF_F_TX_IP_CKSUM |		 \
>  		RTE_MBUF_F_TX_L4_MASK |		 \
> -		RTE_MBUF_F_TX_TCP_SEG)
> +		RTE_MBUF_F_TX_TCP_SEG |          \
> +		RTE_MBUF_F_TX_OUTER_IP_CKSUM |   \
> +		RTE_MBUF_F_TX_OUTER_UDP_CKSUM)
> 
>  #define IAVF_TX_OFFLOAD_MASK (  \
>  		RTE_MBUF_F_TX_OUTER_IPV6 |		 \
> @@ -67,6 +71,9 @@
>  		RTE_MBUF_F_TX_IP_CKSUM |		 \
>  		RTE_MBUF_F_TX_L4_MASK |		 \
>  		RTE_MBUF_F_TX_TCP_SEG |		 \
> +		RTE_MBUF_F_TX_TUNNEL_MASK |	\
> +		RTE_MBUF_F_TX_OUTER_IP_CKSUM |  \
> +		RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \
>  		RTE_ETH_TX_OFFLOAD_SECURITY)
> 
>  #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH v3] net/iavf: enable inner and outer Tx checksum offload
  2022-09-01  9:33   ` [PATCH v2] net/iavf: enable inner and outer Tx " Peng Zhang
  2022-09-01 11:04     ` Zhang, Qi Z
  2022-09-05  2:25     ` Yang, Qiming
@ 2022-09-20  9:14     ` Zhichao Zeng
  2022-09-22  9:02       ` Xu, Ke1
  2 siblings, 1 reply; 20+ messages in thread
From: Zhichao Zeng @ 2022-09-20  9:14 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, yidingx.zhou, qi.z.zhang, Zhichao Zeng, Peng Zhang,
	Jingjing Wu, Beilei Xing

This patch is to enable scalar path inner and outer Tx checksum offload
for tunnel packet by configure ol_flags.

Signed-off-by: Peng Zhang <peng1x.zhang@intel.com>
Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>

---
v2: add outer udp cksum flag and remove unrelated code
---
v3: specify the patch scope and update document
---
 doc/guides/nics/features/iavf.ini |  2 ++
 drivers/net/iavf/iavf_ethdev.c    |  1 +
 drivers/net/iavf/iavf_rxtx.c      | 48 +++++++++++++++++++++++++++++--
 drivers/net/iavf/iavf_rxtx.h      |  9 +++++-
 4 files changed, 57 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/iavf.ini b/doc/guides/nics/features/iavf.ini
index dfaa82b83d..eeda6b7210 100644
--- a/doc/guides/nics/features/iavf.ini
+++ b/doc/guides/nics/features/iavf.ini
@@ -25,6 +25,8 @@ VLAN offload         = Y
 L3 checksum offload  = P
 L4 checksum offload  = P
 Timestamp offload    = P
+Inner L3 checksum    = P
+Inner L4 checksum    = P
 Packet type parsing  = Y
 Rx descriptor status = Y
 Tx descriptor status = Y
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 506fcff6e3..fa040766e5 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1134,6 +1134,7 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
 		RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
 		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |
 		RTE_ETH_TX_OFFLOAD_TCP_TSO |
 		RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
 		RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 3deabe1d7e..b784c5cc18 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2334,7 +2334,8 @@ static inline uint16_t
 iavf_calc_context_desc(uint64_t flags, uint8_t vlan_flag)
 {
 	if (flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
-			RTE_MBUF_F_TX_TUNNEL_MASK))
+	    RTE_MBUF_F_TX_TUNNEL_MASK | RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+	    RTE_MBUF_F_TX_OUTER_UDP_CKSUM))
 		return 1;
 	if (flags & RTE_MBUF_F_TX_VLAN &&
 	    vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
@@ -2399,6 +2400,44 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 	break;
 	}
 
+	/* L4TUNT: L4 Tunneling Type */
+	switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+	case RTE_MBUF_F_TX_TUNNEL_IPIP:
+		/* for non UDP / GRE tunneling, set to 00b */
+		break;
+	case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+	case RTE_MBUF_F_TX_TUNNEL_GTP:
+	case RTE_MBUF_F_TX_TUNNEL_GENEVE:
+		eip_typ |= IAVF_TXD_CTX_UDP_TUNNELING;
+		break;
+	case RTE_MBUF_F_TX_TUNNEL_GRE:
+		eip_typ |= IAVF_TXD_CTX_GRE_TUNNELING;
+		break;
+	default:
+		PMD_TX_LOG(ERR, "Tunnel type not supported");
+		return;
+	}
+
+	/* L4TUNLEN: L4 Tunneling Length, in Words
+	 *
+	 * We depend on app to set rte_mbuf.l2_len correctly.
+	 * For IP in GRE it should be set to the length of the GRE
+	 * header;
+	 * For MAC in GRE or MAC in UDP it should be set to the length
+	 * of the GRE or UDP headers plus the inner MAC up to including
+	 * its last Ethertype.
+	 * If MPLS labels exists, it should include them as well.
+	 */
+	eip_typ |= (m->l2_len >> 1) << IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
+
+	/**
+	 * Calculate the tunneling UDP checksum.
+	 * Shall be set only if L4TUNT = 01b and EIPT is not zero
+	 */
+	if (!(eip_typ & IAVF_TX_CTX_EXT_IP_NONE) &&
+	    (eip_typ & IAVF_TXD_CTX_UDP_TUNNELING))
+		eip_typ |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
+
 	*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
 		eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
 		eip_noinc << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIP_NOINC_SHIFT;
@@ -2535,7 +2574,12 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 	}
 
 	/* Set MACLEN */
-	offset |= (m->l2_len >> 1) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+		offset |= (m->outer_l2_len >> 1)
+			<< IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
+	else
+		offset |= (m->l2_len >> 1)
+			<< IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
 
 	/* Enable L3 checksum offloading inner */
 	if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 1695e43cd5..66e832713c 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -26,6 +26,8 @@
 #define IAVF_TX_NO_VECTOR_FLAGS (				 \
 		RTE_ETH_TX_OFFLOAD_MULTI_SEGS |		 \
 		RTE_ETH_TX_OFFLOAD_TCP_TSO |		 \
+		RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |    \
+		RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM |	\
 		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_VECTOR_OFFLOAD (				 \
@@ -56,7 +58,9 @@
 #define IAVF_TX_CKSUM_OFFLOAD_MASK (		 \
 		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
-		RTE_MBUF_F_TX_TCP_SEG)
+		RTE_MBUF_F_TX_TCP_SEG |          \
+		RTE_MBUF_F_TX_OUTER_IP_CKSUM |   \
+		RTE_MBUF_F_TX_OUTER_UDP_CKSUM)
 
 #define IAVF_TX_OFFLOAD_MASK (  \
 		RTE_MBUF_F_TX_OUTER_IPV6 |		 \
@@ -67,6 +71,9 @@
 		RTE_MBUF_F_TX_IP_CKSUM |		 \
 		RTE_MBUF_F_TX_L4_MASK |		 \
 		RTE_MBUF_F_TX_TCP_SEG |		 \
+		RTE_MBUF_F_TX_TUNNEL_MASK |	\
+		RTE_MBUF_F_TX_OUTER_IP_CKSUM |  \
+		RTE_MBUF_F_TX_OUTER_UDP_CKSUM | \
 		RTE_ETH_TX_OFFLOAD_SECURITY)
 
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
-- 
2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH v3] net/iavf: enable inner and outer Tx checksum offload
  2022-09-20  9:14     ` [PATCH v3] " Zhichao Zeng
@ 2022-09-22  9:02       ` Xu, Ke1
  2022-09-25  5:58         ` Zhang, Qi Z
  0 siblings, 1 reply; 20+ messages in thread
From: Xu, Ke1 @ 2022-09-22  9:02 UTC (permalink / raw)
  To: Zeng, ZhichaoX, dev
  Cc: Yang, Qiming, Zhou, YidingX, Zhang, Qi Z, Zeng, ZhichaoX, Zhang,
	Peng1X, Wu, Jingjing, Xing, Beilei


> -----Original Message-----
> From: Zhichao Zeng <zhichaox.zeng@intel.com>
> Sent: Tuesday, September 20, 2022 5:15 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX
> <yidingx.zhou@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zeng,
> ZhichaoX <zhichaox.zeng@intel.com>; Zhang, Peng1X
> <peng1x.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing,
> Beilei <beilei.xing@intel.com>
> Subject: [PATCH v3] net/iavf: enable inner and outer Tx checksum offload
> 
> This patch is to enable scalar path inner and outer Tx checksum offload for
> tunnel packet by configure ol_flags.
> 
> Signed-off-by: Peng Zhang <peng1x.zhang@intel.com>
> Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
> 
> ---
> v2: add outer udp cksum flag and remove unrelated code
> ---
> v3: specify the patch scope and update document
> ---

TX Vector path is not covered in this patch;
VXLAN needs port config, can only be covered by DCF, not a bug;
TSO not implemented yet, which will be tracked in another story.

Functions in this patch is validated and passed.

Tested-by: Ke Xu <ke1.xu@intel.com>

>  doc/guides/nics/features/iavf.ini |  2 ++
>  drivers/net/iavf/iavf_ethdev.c    |  1 +
>  drivers/net/iavf/iavf_rxtx.c      | 48 +++++++++++++++++++++++++++++--
>  drivers/net/iavf/iavf_rxtx.h      |  9 +++++-
>  4 files changed, 57 insertions(+), 3 deletions(-)
> 


^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH v3] net/iavf: enable inner and outer Tx checksum offload
  2022-09-22  9:02       ` Xu, Ke1
@ 2022-09-25  5:58         ` Zhang, Qi Z
  0 siblings, 0 replies; 20+ messages in thread
From: Zhang, Qi Z @ 2022-09-25  5:58 UTC (permalink / raw)
  To: Xu, Ke1, Zeng, ZhichaoX, dev
  Cc: Yang, Qiming, Zhou, YidingX, Zeng, ZhichaoX, Zhang,  Peng1X, Wu,
	Jingjing, Xing, Beilei



> -----Original Message-----
> From: Xu, Ke1 <ke1.xu@intel.com>
> Sent: Thursday, September 22, 2022 5:03 PM
> To: Zeng, ZhichaoX <zhichaox.zeng@intel.com>; dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX
> <yidingx.zhou@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zeng,
> ZhichaoX <zhichaox.zeng@intel.com>; Zhang, Peng1X
> <peng1x.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing,
> Beilei <beilei.xing@intel.com>
> Subject: RE: [PATCH v3] net/iavf: enable inner and outer Tx checksum offload
> 
> 
> > -----Original Message-----
> > From: Zhichao Zeng <zhichaox.zeng@intel.com>
> > Sent: Tuesday, September 20, 2022 5:15 PM
> > To: dev@dpdk.org
> > Cc: Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX
> > <yidingx.zhou@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zeng,
> > ZhichaoX <zhichaox.zeng@intel.com>; Zhang, Peng1X
> > <peng1x.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing,
> > Beilei <beilei.xing@intel.com>
> > Subject: [PATCH v3] net/iavf: enable inner and outer Tx checksum
> > offload
> >
> > This patch is to enable scalar path inner and outer Tx checksum
> > offload for tunnel packet by configure ol_flags.
> >
> > Signed-off-by: Peng Zhang <peng1x.zhang@intel.com>
> > Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
> >
> > ---
> > v2: add outer udp cksum flag and remove unrelated code
> > ---
> > v3: specify the patch scope and update document
> > ---
> 
> TX Vector path is not covered in this patch; VXLAN needs port config, can only
> be covered by DCF, not a bug; TSO not implemented yet, which will be
> tracked in another story.
> 
> Functions in this patch is validated and passed.
> 
> Tested-by: Ke Xu <ke1.xu@intel.com>

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH v2] net/iavf: fix TSO offload for tunnel case
  2022-08-12 16:52 [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases peng1x.zhang
  2022-08-12 16:52 ` [PATCH 2/2] net/iavf: support inner and outer checksum offload peng1x.zhang
  2022-08-30  7:52 ` [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases Yang, Qiming
@ 2022-09-26  5:17 ` Zhichao Zeng
  2022-09-26  9:48   ` Xu, Ke1
                     ` (2 more replies)
  2 siblings, 3 replies; 20+ messages in thread
From: Zhichao Zeng @ 2022-09-26  5:17 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, yidingx.zhou, qi.z.zhang, Zhichao Zeng, Jingjing Wu,
	Beilei Xing, Abhijit Sinha, Declan Doherty, Radu Nicolau

This patch is to fix the tunnel TSO not enabling issue, simplify
the logic of calculating 'Tx Buffer Size' of data descriptor with IPSec
and fix handling that the mbuf size exceeds the TX descriptor
hardware limit(1B-16KB) which causes malicious behavior to the NIC.

Fixes: 1e728b01120c ("net/iavf: rework Tx path")

---
v2: rework patch

Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
---
 drivers/common/iavf/iavf_osdep.h |  2 +
 drivers/net/iavf/iavf_rxtx.c     | 95 +++++++++++++++++++-------------
 2 files changed, 59 insertions(+), 38 deletions(-)

diff --git a/drivers/common/iavf/iavf_osdep.h b/drivers/common/iavf/iavf_osdep.h
index 31d3d809f9..bf1436dfc6 100644
--- a/drivers/common/iavf/iavf_osdep.h
+++ b/drivers/common/iavf/iavf_osdep.h
@@ -126,6 +126,8 @@ writeq(uint64_t value, volatile void *addr)
 #define iavf_memset(a, b, c, d) memset((a), (b), (c))
 #define iavf_memcpy(a, b, c, d) rte_memcpy((a), (b), (c))
 
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
 #define iavf_usec_delay(x) rte_delay_us_sleep(x)
 #define iavf_msec_delay(x) iavf_usec_delay(1000 * (x))
 
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 109ba756f8..a06d9d3da6 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2417,7 +2417,7 @@ iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
 		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
 		if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
-			total_length -= m->outer_l3_len;
+			total_length -= m->outer_l3_len + m->outer_l2_len;
 	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
@@ -2581,50 +2581,39 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
+/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
+#define IAVF_MAX_DATA_PER_TXD \
+	(IAVF_TXD_QW1_TX_BUF_SZ_MASK >> IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+/* Calculate the number of TX descriptors needed for each pkt */
+static inline uint16_t
+iavf_calc_pkt_desc(struct rte_mbuf *tx_pkt)
+{
+	struct rte_mbuf *txd = tx_pkt;
+	uint16_t count = 0;
+
+	while (txd != NULL) {
+		count += DIV_ROUND_UP(txd->data_len, IAVF_MAX_DATA_PER_TXD);
+		txd = txd->next;
+	}
+
+	return count;
+}
+
 static inline void
 iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
-	struct rte_mbuf *m, uint64_t desc_template,
-	uint16_t tlen, uint16_t ipseclen)
+	uint64_t desc_template,	uint16_t buffsz,
+	uint64_t buffer_addr)
 {
-	uint32_t hdrlen = m->l2_len;
-	uint32_t bufsz = 0;
-
 	/* fill data descriptor qw1 from template */
 	desc->cmd_type_offset_bsz = desc_template;
 
-	/* set data buffer address */
-	desc->buffer_addr = rte_mbuf_data_iova(m);
-
-	/* calculate data buffer size less set header lengths */
-	if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
-			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
-					RTE_MBUF_F_TX_UDP_SEG))) {
-		hdrlen += m->outer_l3_len;
-		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
-			hdrlen += m->l3_len + m->l4_len;
-		else
-			hdrlen += m->l3_len;
-		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
-			hdrlen += ipseclen;
-		bufsz = hdrlen + tlen;
-	} else if ((m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
-			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
-					RTE_MBUF_F_TX_UDP_SEG))) {
-		hdrlen += m->outer_l3_len + m->l3_len + ipseclen;
-		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
-			hdrlen += m->l4_len;
-		bufsz = hdrlen + tlen;
-
-	} else {
-		bufsz = m->data_len;
-	}
-
 	/* set data buffer size */
 	desc->cmd_type_offset_bsz |=
-		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		(((uint64_t)buffsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
 		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 
-	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->buffer_addr = rte_cpu_to_le_64(buffer_addr);
 	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
@@ -2649,8 +2638,10 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
 	struct rte_mbuf *mb, *mb_seg;
+	uint64_t buf_dma_addr;
 	uint16_t desc_idx, desc_idx_last;
 	uint16_t idx;
+	uint16_t slen;
 
 
 	/* Check if the descriptor ring needs to be cleaned. */
@@ -2689,8 +2680,14 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
+		 * Recalculate the needed tx descs when TSO enabled in case
+		 * the mbuf data size exceeds max data size that hw allows
+		 * per tx desc.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
+		if (mb->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+			nb_desc_required = iavf_calc_pkt_desc(mb) + nb_desc_ctx + nb_desc_ipsec;
+		else
+			nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2786,8 +2783,30 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				rte_pktmbuf_free_seg(txe->mbuf);
 
 			txe->mbuf = mb_seg;
-			iavf_fill_data_desc(ddesc, mb_seg,
-					ddesc_template, tlen, ipseclen);
+			slen = mb_seg->data_len;
+			if (mb_seg->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+				slen += ipseclen;
+			buf_dma_addr = rte_mbuf_data_iova(mb_seg);
+			while ((mb_seg->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
+					RTE_MBUF_F_TX_UDP_SEG)) &&
+					unlikely(slen > IAVF_MAX_DATA_PER_TXD)) {
+				iavf_fill_data_desc(ddesc, ddesc_template,
+					IAVF_MAX_DATA_PER_TXD, buf_dma_addr);
+
+				IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+				buf_dma_addr += IAVF_MAX_DATA_PER_TXD;
+				slen -= IAVF_MAX_DATA_PER_TXD;
+
+				txe->last_id = desc_idx_last;
+				desc_idx = txe->next_id;
+				txe = txn;
+				ddesc = &txr[desc_idx];
+				txn = &txe_ring[txe->next_id];
+			}
+
+			iavf_fill_data_desc(ddesc, ddesc_template,
+					slen, buf_dma_addr);
 
 			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH v2] net/iavf: fix TSO offload for tunnel case
  2022-09-26  5:17 ` [PATCH v2] net/iavf: fix TSO offload for tunnel case Zhichao Zeng
@ 2022-09-26  9:48   ` Xu, Ke1
  2022-09-27  2:33   ` Zhang, Qi Z
  2022-09-27  9:56   ` [PATCH v3] " Zhichao Zeng
  2 siblings, 0 replies; 20+ messages in thread
From: Xu, Ke1 @ 2022-09-26  9:48 UTC (permalink / raw)
  To: Zeng, ZhichaoX, dev
  Cc: Yang, Qiming, Zhou, YidingX, Zhang, Qi Z, Zeng, ZhichaoX, Wu,
	Jingjing, Xing, Beilei, Sinha, Abhijit, Doherty, Declan, Nicolau,
	Radu



> -----Original Message-----
> From: Zhichao Zeng <zhichaox.zeng@intel.com>
> Sent: Monday, September 26, 2022 1:17 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX
> <yidingx.zhou@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zeng,
> ZhichaoX <zhichaox.zeng@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; Sinha, Abhijit <abhijit.sinha@intel.com>;
> Doherty, Declan <declan.doherty@intel.com>; Nicolau, Radu
> <radu.nicolau@intel.com>
> Subject: [PATCH v2] net/iavf: fix TSO offload for tunnel case
> 
> This patch is to fix the tunnel TSO not enabling issue, simplify the logic of
> calculating 'Tx Buffer Size' of data descriptor with IPSec and fix handling that
> the mbuf size exceeds the TX descriptor hardware limit(1B-16KB) which
> causes malicious behavior to the NIC.
> 
> Fixes: 1e728b01120c ("net/iavf: rework Tx path")
> 
> ---
> v2: rework patch
> 
> Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>

Tested and passed.
Tested-bu: Ke Xu <ke1.xu@intel.com>

> ---
>  drivers/common/iavf/iavf_osdep.h |  2 +
>  drivers/net/iavf/iavf_rxtx.c     | 95 +++++++++++++++++++-------------
>  2 files changed, 59 insertions(+), 38 deletions(-)



^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH v2] net/iavf: fix TSO offload for tunnel case
  2022-09-26  5:17 ` [PATCH v2] net/iavf: fix TSO offload for tunnel case Zhichao Zeng
  2022-09-26  9:48   ` Xu, Ke1
@ 2022-09-27  2:33   ` Zhang, Qi Z
  2022-09-27  9:56   ` [PATCH v3] " Zhichao Zeng
  2 siblings, 0 replies; 20+ messages in thread
From: Zhang, Qi Z @ 2022-09-27  2:33 UTC (permalink / raw)
  To: Zeng, ZhichaoX, dev
  Cc: Yang, Qiming, Zhou, YidingX, Wu, Jingjing, Xing, Beilei, Sinha,
	Abhijit, Doherty, Declan, Nicolau, Radu



> -----Original Message-----
> From: Zeng, ZhichaoX <zhichaox.zeng@intel.com>
> Sent: Monday, September 26, 2022 1:17 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX
> <yidingx.zhou@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zeng,
> ZhichaoX <zhichaox.zeng@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; Sinha, Abhijit <abhijit.sinha@intel.com>;
> Doherty, Declan <declan.doherty@intel.com>; Nicolau, Radu
> <radu.nicolau@intel.com>
> Subject: [PATCH v2] net/iavf: fix TSO offload for tunnel case
> 
> This patch is to fix the tunnel TSO not enabling issue, simplify the logic of
> calculating 'Tx Buffer Size' of data descriptor with IPSec and fix handling that
> the mbuf size exceeds the TX descriptor hardware limit(1B-16KB) which
> causes malicious behavior to the NIC.
> 
> Fixes: 1e728b01120c ("net/iavf: rework Tx path")
> 
> ---
> v2: rework patch
> 
> Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
> ---
>  drivers/common/iavf/iavf_osdep.h |  2 +
>  drivers/net/iavf/iavf_rxtx.c     | 95 +++++++++++++++++++-------------
>  2 files changed, 59 insertions(+), 38 deletions(-)
> 
> diff --git a/drivers/common/iavf/iavf_osdep.h
> b/drivers/common/iavf/iavf_osdep.h
> index 31d3d809f9..bf1436dfc6 100644
> --- a/drivers/common/iavf/iavf_osdep.h
> +++ b/drivers/common/iavf/iavf_osdep.h
> @@ -126,6 +126,8 @@ writeq(uint64_t value, volatile void *addr)  #define
> iavf_memset(a, b, c, d) memset((a), (b), (c))  #define iavf_memcpy(a, b, c, d)
> rte_memcpy((a), (b), (c))
> 
> +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
> +

This looks like not necessary be added in osdep.h
Can we simply  make it local or at some header file in net/iavf, so we don't need to have a patch that cross the modules.

>  #define iavf_usec_delay(x) rte_delay_us_sleep(x)  #define
> iavf_msec_delay(x) iavf_usec_delay(1000 * (x))
> 
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> 109ba756f8..a06d9d3da6 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -2417,7 +2417,7 @@ iavf_fill_ctx_desc_segmentation_field(volatile
> uint64_t *field,
>  		total_length = m->pkt_len - (m->l2_len + m->l3_len + m-
> >l4_len);
> 
>  		if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
> -			total_length -= m->outer_l3_len;
> +			total_length -= m->outer_l3_len + m->outer_l2_len;
>  	}
> 
>  #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
> @@ -2581,50 +2581,39 @@ iavf_build_data_desc_cmd_offset_fields(volatile
> uint64_t *qw1,
>  		((uint64_t)l2tag1 <<
> IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));  }
> 
> +/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
> +#define IAVF_MAX_DATA_PER_TXD \
> +	(IAVF_TXD_QW1_TX_BUF_SZ_MASK >>
> IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
> +
> +/* Calculate the number of TX descriptors needed for each pkt */ static
> +inline uint16_t iavf_calc_pkt_desc(struct rte_mbuf *tx_pkt) {
> +	struct rte_mbuf *txd = tx_pkt;
> +	uint16_t count = 0;
> +
> +	while (txd != NULL) {
> +		count += DIV_ROUND_UP(txd->data_len,
> IAVF_MAX_DATA_PER_TXD);
> +		txd = txd->next;
> +	}
> +
> +	return count;
> +}
> +
>  static inline void
>  iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
> -	struct rte_mbuf *m, uint64_t desc_template,
> -	uint16_t tlen, uint16_t ipseclen)
> +	uint64_t desc_template,	uint16_t buffsz,
> +	uint64_t buffer_addr)
>  {
> -	uint32_t hdrlen = m->l2_len;
> -	uint32_t bufsz = 0;
> -
>  	/* fill data descriptor qw1 from template */
>  	desc->cmd_type_offset_bsz = desc_template;
> 
> -	/* set data buffer address */
> -	desc->buffer_addr = rte_mbuf_data_iova(m);
> -
> -	/* calculate data buffer size less set header lengths */
> -	if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
> -			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
> -					RTE_MBUF_F_TX_UDP_SEG))) {
> -		hdrlen += m->outer_l3_len;
> -		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
> -			hdrlen += m->l3_len + m->l4_len;
> -		else
> -			hdrlen += m->l3_len;
> -		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
> -			hdrlen += ipseclen;
> -		bufsz = hdrlen + tlen;
> -	} else if ((m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
> -			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
> -					RTE_MBUF_F_TX_UDP_SEG))) {
> -		hdrlen += m->outer_l3_len + m->l3_len + ipseclen;
> -		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
> -			hdrlen += m->l4_len;
> -		bufsz = hdrlen + tlen;
> -
> -	} else {
> -		bufsz = m->data_len;
> -	}
> -
>  	/* set data buffer size */
>  	desc->cmd_type_offset_bsz |=
> -		(((uint64_t)bufsz <<
> IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
> +		(((uint64_t)buffsz <<
> IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
>  		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
> 
> -	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
> +	desc->buffer_addr = rte_cpu_to_le_64(buffer_addr);
>  	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc-
> >cmd_type_offset_bsz);
>  }
> 
> @@ -2649,8 +2638,10 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  	struct iavf_tx_entry *txe_ring = txq->sw_ring;
>  	struct iavf_tx_entry *txe, *txn;
>  	struct rte_mbuf *mb, *mb_seg;
> +	uint64_t buf_dma_addr;
>  	uint16_t desc_idx, desc_idx_last;
>  	uint16_t idx;
> +	uint16_t slen;
> 
> 
>  	/* Check if the descriptor ring needs to be cleaned. */ @@ -2689,8
> +2680,14 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
> uint16_t nb_pkts)
>  		 * The number of descriptors that must be allocated for
>  		 * a packet equals to the number of the segments of that
>  		 * packet plus the context and ipsec descriptors if needed.
> +		 * Recalculate the needed tx descs when TSO enabled in case
> +		 * the mbuf data size exceeds max data size that hw allows
> +		 * per tx desc.
>  		 */
> -		nb_desc_required = nb_desc_data + nb_desc_ctx +
> nb_desc_ipsec;
> +		if (mb->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
> +			nb_desc_required = iavf_calc_pkt_desc(mb) +
> nb_desc_ctx + nb_desc_ipsec;
> +		else
> +			nb_desc_required = nb_desc_data + nb_desc_ctx +
> nb_desc_ipsec;
> 
>  		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
> 
> @@ -2786,8 +2783,30 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  				rte_pktmbuf_free_seg(txe->mbuf);
> 
>  			txe->mbuf = mb_seg;
> -			iavf_fill_data_desc(ddesc, mb_seg,
> -					ddesc_template, tlen, ipseclen);
> +			slen = mb_seg->data_len;
> +			if (mb_seg->ol_flags &
> RTE_MBUF_F_TX_SEC_OFFLOAD)
> +				slen += ipseclen;
> +			buf_dma_addr = rte_mbuf_data_iova(mb_seg);
> +			while ((mb_seg->ol_flags &
> (RTE_MBUF_F_TX_TCP_SEG |
> +					RTE_MBUF_F_TX_UDP_SEG)) &&
> +					unlikely(slen >
> IAVF_MAX_DATA_PER_TXD)) {
> +				iavf_fill_data_desc(ddesc, ddesc_template,
> +					IAVF_MAX_DATA_PER_TXD,
> buf_dma_addr);
> +
> +				IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
> +
> +				buf_dma_addr +=
> IAVF_MAX_DATA_PER_TXD;
> +				slen -= IAVF_MAX_DATA_PER_TXD;
> +
> +				txe->last_id = desc_idx_last;
> +				desc_idx = txe->next_id;
> +				txe = txn;
> +				ddesc = &txr[desc_idx];
> +				txn = &txe_ring[txe->next_id];
> +			}
> +
> +			iavf_fill_data_desc(ddesc, ddesc_template,
> +					slen, buf_dma_addr);
> 
>  			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
> 
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH v3] net/iavf: fix TSO offload for tunnel case
  2022-09-26  5:17 ` [PATCH v2] net/iavf: fix TSO offload for tunnel case Zhichao Zeng
  2022-09-26  9:48   ` Xu, Ke1
  2022-09-27  2:33   ` Zhang, Qi Z
@ 2022-09-27  9:56   ` Zhichao Zeng
  2022-09-29  5:27     ` [PATCH v4] " Zhichao Zeng
  2 siblings, 1 reply; 20+ messages in thread
From: Zhichao Zeng @ 2022-09-27  9:56 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, yidingx.zhou, qi.z.zhang, Zhichao Zeng, Jingjing Wu,
	Beilei Xing, Abhijit Sinha, Radu Nicolau, Declan Doherty

This patch is to fix the tunnel TSO not enabling issue, simplify
the logic of calculating 'Tx Buffer Size' of data descriptor with IPSec,
and fix handling that the mbuf size exceeds the TX descriptor
hardware limit(1B-16KB) which causes malicious behavior to the NIC.

Fixes: 1e728b01120c ("net/iavf: rework Tx path")

Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>

---
v3: move macros to iavf header file
---
v2: rework patch
---
 drivers/net/iavf/iavf_rxtx.c | 92 +++++++++++++++++++++---------------
 drivers/net/iavf/iavf_rxtx.h |  4 ++
 2 files changed, 58 insertions(+), 38 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 3deabe1d7e..7e3bffb6f8 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2417,7 +2417,7 @@ iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
 		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
 		if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
-			total_length -= m->outer_l3_len;
+			total_length -= m->outer_l3_len + m->outer_l2_len;
 	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
@@ -2583,50 +2583,36 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
+/* Calculate the number of TX descriptors needed for each pkt */
+static inline uint16_t
+iavf_calc_pkt_desc(struct rte_mbuf *tx_pkt)
+{
+	struct rte_mbuf *txd = tx_pkt;
+	uint16_t count = 0;
+
+	while (txd != NULL) {
+		count += (txd->data_len + IAVF_MAX_DATA_PER_TXD - 1) /
+			IAVF_MAX_DATA_PER_TXD;
+		txd = txd->next;
+	}
+
+	return count;
+}
+
 static inline void
 iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
-	struct rte_mbuf *m, uint64_t desc_template,
-	uint16_t tlen, uint16_t ipseclen)
+	uint64_t desc_template,	uint16_t buffsz,
+	uint64_t buffer_addr)
 {
-	uint32_t hdrlen = m->l2_len;
-	uint32_t bufsz = 0;
-
 	/* fill data descriptor qw1 from template */
 	desc->cmd_type_offset_bsz = desc_template;
 
-	/* set data buffer address */
-	desc->buffer_addr = rte_mbuf_data_iova(m);
-
-	/* calculate data buffer size less set header lengths */
-	if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
-			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
-					RTE_MBUF_F_TX_UDP_SEG))) {
-		hdrlen += m->outer_l3_len;
-		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
-			hdrlen += m->l3_len + m->l4_len;
-		else
-			hdrlen += m->l3_len;
-		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
-			hdrlen += ipseclen;
-		bufsz = hdrlen + tlen;
-	} else if ((m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
-			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
-					RTE_MBUF_F_TX_UDP_SEG))) {
-		hdrlen += m->outer_l3_len + m->l3_len + ipseclen;
-		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
-			hdrlen += m->l4_len;
-		bufsz = hdrlen + tlen;
-
-	} else {
-		bufsz = m->data_len;
-	}
-
 	/* set data buffer size */
 	desc->cmd_type_offset_bsz |=
-		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		(((uint64_t)buffsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
 		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 
-	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->buffer_addr = rte_cpu_to_le_64(buffer_addr);
 	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
@@ -2651,8 +2637,10 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
 	struct rte_mbuf *mb, *mb_seg;
+	uint64_t buf_dma_addr;
 	uint16_t desc_idx, desc_idx_last;
 	uint16_t idx;
+	uint16_t slen;
 
 
 	/* Check if the descriptor ring needs to be cleaned. */
@@ -2691,8 +2679,14 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
+		 * Recalculate the needed tx descs when TSO enabled in case
+		 * the mbuf data size exceeds max data size that hw allows
+		 * per tx desc.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
+		if (mb->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+			nb_desc_required = iavf_calc_pkt_desc(mb) + nb_desc_ctx + nb_desc_ipsec;
+		else
+			nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2788,8 +2782,30 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				rte_pktmbuf_free_seg(txe->mbuf);
 
 			txe->mbuf = mb_seg;
-			iavf_fill_data_desc(ddesc, mb_seg,
-					ddesc_template, tlen, ipseclen);
+			slen = mb_seg->data_len;
+			if (mb_seg->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
+				slen += ipseclen;
+			buf_dma_addr = rte_mbuf_data_iova(mb_seg);
+			while ((mb_seg->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
+					RTE_MBUF_F_TX_UDP_SEG)) &&
+					unlikely(slen > IAVF_MAX_DATA_PER_TXD)) {
+				iavf_fill_data_desc(ddesc, ddesc_template,
+					IAVF_MAX_DATA_PER_TXD, buf_dma_addr);
+
+				IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+				buf_dma_addr += IAVF_MAX_DATA_PER_TXD;
+				slen -= IAVF_MAX_DATA_PER_TXD;
+
+				txe->last_id = desc_idx_last;
+				desc_idx = txe->next_id;
+				txe = txn;
+				ddesc = &txr[desc_idx];
+				txn = &txe_ring[txe->next_id];
+			}
+
+			iavf_fill_data_desc(ddesc, ddesc_template,
+					slen, buf_dma_addr);
 
 			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 1695e43cd5..81b1418db1 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -72,6 +72,10 @@
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
 
+/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
+#define IAVF_MAX_DATA_PER_TXD \
+	(IAVF_TXD_QW1_TX_BUF_SZ_MASK >> IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+
 extern uint64_t iavf_timestamp_dynflag;
 extern int iavf_timestamp_dynfield_offset;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH v4] net/iavf: fix TSO offload for tunnel case
  2022-09-27  9:56   ` [PATCH v3] " Zhichao Zeng
@ 2022-09-29  5:27     ` Zhichao Zeng
  2022-09-30  3:46       ` Xu, Ke1
  2022-09-30  9:05       ` Nicolau, Radu
  0 siblings, 2 replies; 20+ messages in thread
From: Zhichao Zeng @ 2022-09-29  5:27 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, yidingx.zhou, qi.z.zhang, Zhichao Zeng, Jingjing Wu,
	Beilei Xing, Radu Nicolau, Abhijit Sinha, Declan Doherty

This patch is to fix the tunnel TSO not enabling issue, simplify
the logic of calculating 'Tx Buffer Size' of data descriptor with IPSec,
and fix handling that the mbuf size exceeds the TX descriptor
hardware limit(1B-16KB) which causes malicious behavior to the NIC.

Fixes: 1e728b01120c ("net/iavf: rework Tx path")

Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>

---
v4: fix the IPsec TSO issue
---
v3: move macros to iavf header file
---
v2: rework patch
---
 drivers/net/iavf/iavf_rxtx.c | 99 ++++++++++++++++++++++--------------
 drivers/net/iavf/iavf_rxtx.h |  4 ++
 2 files changed, 65 insertions(+), 38 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index fc5d9e38cc..f317399ca2 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2463,7 +2463,7 @@ iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
 		total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
 
 		if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
-			total_length -= m->outer_l3_len;
+			total_length -= m->outer_l3_len + m->outer_l2_len;
 	}
 
 #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
@@ -2634,50 +2634,36 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
 		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
 }
 
+/* Calculate the number of TX descriptors needed for each pkt */
+static inline uint16_t
+iavf_calc_pkt_desc(struct rte_mbuf *tx_pkt)
+{
+	struct rte_mbuf *txd = tx_pkt;
+	uint16_t count = 0;
+
+	while (txd != NULL) {
+		count += (txd->data_len + IAVF_MAX_DATA_PER_TXD - 1) /
+			IAVF_MAX_DATA_PER_TXD;
+		txd = txd->next;
+	}
+
+	return count;
+}
+
 static inline void
 iavf_fill_data_desc(volatile struct iavf_tx_desc *desc,
-	struct rte_mbuf *m, uint64_t desc_template,
-	uint16_t tlen, uint16_t ipseclen)
+	uint64_t desc_template,	uint16_t buffsz,
+	uint64_t buffer_addr)
 {
-	uint32_t hdrlen = m->l2_len;
-	uint32_t bufsz = 0;
-
 	/* fill data descriptor qw1 from template */
 	desc->cmd_type_offset_bsz = desc_template;
 
-	/* set data buffer address */
-	desc->buffer_addr = rte_mbuf_data_iova(m);
-
-	/* calculate data buffer size less set header lengths */
-	if ((m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) &&
-			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
-					RTE_MBUF_F_TX_UDP_SEG))) {
-		hdrlen += m->outer_l3_len;
-		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
-			hdrlen += m->l3_len + m->l4_len;
-		else
-			hdrlen += m->l3_len;
-		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
-			hdrlen += ipseclen;
-		bufsz = hdrlen + tlen;
-	} else if ((m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
-			(m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
-					RTE_MBUF_F_TX_UDP_SEG))) {
-		hdrlen += m->outer_l3_len + m->l3_len + ipseclen;
-		if (m->ol_flags & RTE_MBUF_F_TX_L4_MASK)
-			hdrlen += m->l4_len;
-		bufsz = hdrlen + tlen;
-
-	} else {
-		bufsz = m->data_len;
-	}
-
 	/* set data buffer size */
 	desc->cmd_type_offset_bsz |=
-		(((uint64_t)bufsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
+		(((uint64_t)buffsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
 		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
 
-	desc->buffer_addr = rte_cpu_to_le_64(desc->buffer_addr);
+	desc->buffer_addr = rte_cpu_to_le_64(buffer_addr);
 	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
 }
 
@@ -2702,8 +2688,10 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	struct iavf_tx_entry *txe_ring = txq->sw_ring;
 	struct iavf_tx_entry *txe, *txn;
 	struct rte_mbuf *mb, *mb_seg;
+	uint64_t buf_dma_addr;
 	uint16_t desc_idx, desc_idx_last;
 	uint16_t idx;
+	uint16_t slen;
 
 
 	/* Check if the descriptor ring needs to be cleaned. */
@@ -2742,8 +2730,14 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus the context and ipsec descriptors if needed.
+		 * Recalculate the needed tx descs when TSO enabled in case
+		 * the mbuf data size exceeds max data size that hw allows
+		 * per tx desc.
 		 */
-		nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
+		if (mb->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+			nb_desc_required = iavf_calc_pkt_desc(mb) + nb_desc_ctx + nb_desc_ipsec;
+		else
+			nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
 
 		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
 
@@ -2839,8 +2833,37 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 				rte_pktmbuf_free_seg(txe->mbuf);
 
 			txe->mbuf = mb_seg;
-			iavf_fill_data_desc(ddesc, mb_seg,
-					ddesc_template, tlen, ipseclen);
+
+			if (mb_seg->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+				slen = tlen + mb_seg->l2_len + mb_seg->l3_len +
+						mb_seg->outer_l3_len + ipseclen;
+				if (mb_seg->ol_flags & RTE_MBUF_F_TX_L4_MASK)
+					slen += mb_seg->l4_len;
+			} else {
+				slen = mb_seg->data_len;
+			}
+
+			buf_dma_addr = rte_mbuf_data_iova(mb_seg);
+			while ((mb_seg->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
+					RTE_MBUF_F_TX_UDP_SEG)) &&
+					unlikely(slen > IAVF_MAX_DATA_PER_TXD)) {
+				iavf_fill_data_desc(ddesc, ddesc_template,
+					IAVF_MAX_DATA_PER_TXD, buf_dma_addr);
+
+				IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+
+				buf_dma_addr += IAVF_MAX_DATA_PER_TXD;
+				slen -= IAVF_MAX_DATA_PER_TXD;
+
+				txe->last_id = desc_idx_last;
+				desc_idx = txe->next_id;
+				txe = txn;
+				ddesc = &txr[desc_idx];
+				txn = &txe_ring[txe->next_id];
+			}
+
+			iavf_fill_data_desc(ddesc, ddesc_template,
+					slen, buf_dma_addr);
 
 			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 66e832713c..ae871467ab 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -79,6 +79,10 @@
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
 		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
 
+/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
+#define IAVF_MAX_DATA_PER_TXD \
+	(IAVF_TXD_QW1_TX_BUF_SZ_MASK >> IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+
 extern uint64_t iavf_timestamp_dynflag;
 extern int iavf_timestamp_dynfield_offset;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH v4] net/iavf: fix TSO offload for tunnel case
  2022-09-29  5:27     ` [PATCH v4] " Zhichao Zeng
@ 2022-09-30  3:46       ` Xu, Ke1
  2022-09-30  9:05       ` Nicolau, Radu
  1 sibling, 0 replies; 20+ messages in thread
From: Xu, Ke1 @ 2022-09-30  3:46 UTC (permalink / raw)
  To: Zeng, ZhichaoX, dev
  Cc: Yang, Qiming, Zhou, YidingX, Zhang, Qi Z, Zeng, ZhichaoX, Wu,
	Jingjing, Xing, Beilei, Nicolau, Radu, Sinha, Abhijit, Doherty,
	Declan



> -----Original Message-----
> From: Zhichao Zeng <zhichaox.zeng@intel.com>
> Sent: Thursday, September 29, 2022 1:27 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX
> <yidingx.zhou@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Zeng,
> ZhichaoX <zhichaox.zeng@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>; Nicolau, Radu
> <radu.nicolau@intel.com>; Sinha, Abhijit <abhijit.sinha@intel.com>; Doherty,
> Declan <declan.doherty@intel.com>
> Subject: [PATCH v4] net/iavf: fix TSO offload for tunnel case
> 
> This patch is to fix the tunnel TSO not enabling issue, simplify the logic of
> calculating 'Tx Buffer Size' of data descriptor with IPSec, and fix handling that
> the mbuf size exceeds the TX descriptor hardware limit(1B-16KB) which
> causes malicious behavior to the NIC.
> 
> Fixes: 1e728b01120c ("net/iavf: rework Tx path")
> 
> Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>

Tested and passed.
Tested-by: Ke Xu <ke1.xu@intel.com>

> ---
> v4: fix the IPsec TSO issue
> ---
> v3: move macros to iavf header file
> ---
> v2: rework patch
> ---
>  drivers/net/iavf/iavf_rxtx.c | 99 ++++++++++++++++++++++--------------
>  drivers/net/iavf/iavf_rxtx.h |  4 ++
>  2 files changed, 65 insertions(+), 38 deletions(-)
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH v4] net/iavf: fix TSO offload for tunnel case
  2022-09-29  5:27     ` [PATCH v4] " Zhichao Zeng
  2022-09-30  3:46       ` Xu, Ke1
@ 2022-09-30  9:05       ` Nicolau, Radu
  2022-10-08  7:55         ` Zhang, Qi Z
  1 sibling, 1 reply; 20+ messages in thread
From: Nicolau, Radu @ 2022-09-30  9:05 UTC (permalink / raw)
  To: Zhichao Zeng, dev
  Cc: qiming.yang, yidingx.zhou, qi.z.zhang, Jingjing Wu, Beilei Xing,
	Abhijit Sinha, Declan Doherty


On 9/29/2022 6:27 AM, Zhichao Zeng wrote:
> This patch is to fix the tunnel TSO not enabling issue, simplify
> the logic of calculating 'Tx Buffer Size' of data descriptor with IPSec,
> and fix handling that the mbuf size exceeds the TX descriptor
> hardware limit(1B-16KB) which causes malicious behavior to the NIC.
>
> Fixes: 1e728b01120c ("net/iavf: rework Tx path")
>
> Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
>
> ---
Acked-by: Radu Nicolau <radu.nicolau@intel.com>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* RE: [PATCH v4] net/iavf: fix TSO offload for tunnel case
  2022-09-30  9:05       ` Nicolau, Radu
@ 2022-10-08  7:55         ` Zhang, Qi Z
  0 siblings, 0 replies; 20+ messages in thread
From: Zhang, Qi Z @ 2022-10-08  7:55 UTC (permalink / raw)
  To: Nicolau, Radu, Zeng, ZhichaoX, dev
  Cc: Yang, Qiming, Zhou, YidingX, Wu, Jingjing, Xing, Beilei, Sinha,
	Abhijit, Doherty, Declan



> -----Original Message-----
> From: Nicolau, Radu <radu.nicolau@intel.com>
> Sent: Friday, September 30, 2022 5:05 PM
> To: Zeng, ZhichaoX <zhichaox.zeng@intel.com>; dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX
> <yidingx.zhou@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Sinha, Abhijit
> <abhijit.sinha@intel.com>; Doherty, Declan <declan.doherty@intel.com>
> Subject: Re: [PATCH v4] net/iavf: fix TSO offload for tunnel case
> 
> 
> On 9/29/2022 6:27 AM, Zhichao Zeng wrote:
> > This patch is to fix the tunnel TSO not enabling issue, simplify the
> > logic of calculating 'Tx Buffer Size' of data descriptor with IPSec,
> > and fix handling that the mbuf size exceeds the TX descriptor hardware
> > limit(1B-16KB) which causes malicious behavior to the NIC.
> >
> > Fixes: 1e728b01120c ("net/iavf: rework Tx path")
> >
> > Signed-off-by: Zhichao Zeng <zhichaox.zeng@intel.com>
> >
> > ---
> Acked-by: Radu Nicolau <radu.nicolau@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases
@ 2022-08-30  2:22 Xu, Ke1
  0 siblings, 0 replies; 20+ messages in thread
From: Xu, Ke1 @ 2022-08-30  2:22 UTC (permalink / raw)
  To: Zhang, Peng1X; +Cc: Xing, Beilei, dev, Wu, Jingjing


> Subject: [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases
> Date: Sat, 13 Aug 2022 00:52:22 +0800
> Message-ID: <20220812165223.470777-1-peng1x.zhang@intel.com> (raw)
>
> From: Peng Zhang <peng1x.zhang@intel.com>
>
> Hardware limits that max buffer size per Tx descriptor should be (16K-1)B.
> So when TSO enabled under unencrypt scenario, the mbuf data size may exceed
> the limit and cause malicious behavior to the NIC.
>
> This patch supports Tx descriptors for this kind of large buffer.
>
> Signed-off-by: Peng Zhang <peng1x.zhang@intel.com>

Tested and passed.

Regards,
Tested-by: Ke Xu <ke1.xu@intel.com>

> ---
>  drivers/net/iavf/iavf_rxtx.c | 66 ++++++++++++++++++++++++++++++++----
>  1 file changed, 60 insertions(+), 6 deletions(-)

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases
@ 2022-08-26 14:37 Buckley, Daniel M
  0 siblings, 0 replies; 20+ messages in thread
From: Buckley, Daniel M @ 2022-08-26 14:37 UTC (permalink / raw)
  To: Jiang, YuX, dev

[-- Attachment #1: Type: text/plain, Size: 10900 bytes --]

From patchwork Fri Aug 12 16:52:22 2022
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
X-Patchwork-Submitter: "Zhang, Peng1X" <peng1x.zhang@intel.com>
X-Patchwork-Id: 114858
X-Patchwork-Delegate: qi.z.zhang@intel.com
Return-Path: <dev-bounces@dpdk.org>
X-Original-To: patchwork@inbox.dpdk.org
Delivered-To: patchwork@inbox.dpdk.org
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
      by inbox.dpdk.org (Postfix) with ESMTP id F30E1A0543;
      Fri, 12 Aug 2022 11:02:17 +0200 (CEST)
Received: from [217.70.189.124] (localhost [127.0.0.1])
      by mails.dpdk.org (Postfix) with ESMTP id A76A040A7D;
      Fri, 12 Aug 2022 11:02:17 +0200 (CEST)
Received: from mga11.intel.com (mga11.intel.com [192.55.52.93])
 by mails.dpdk.org (Postfix) with ESMTP id AA3CB406A2
 for <dev@dpdk.org>; Fri, 12 Aug 2022 11:02:15 +0200 (CEST)
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple;
 d=intel.com; i=@intel.com; q=dns/txt; s=Intel;
 t=1660294935; x=1691830935;
 h=from:to:cc:subject:date:message-id:mime-version:
 content-transfer-encoding;
 bh=QjzGaVZvnEkF2FllVFU+xR8Y5DYCqb7NHx81eAGH2xU=;
 b=nvX/giSyIEaEGzx/ohuqZY1N0eibgtx/4jxW1rUSWVasoU/e7fGz2iw2
 KJwvpm8fGWrGF4p8joSgBP7UM++xo/D/ogAsc9W2HLRYgbFd2ckvvQxsq
 gnD1LsI3zO5+J5AKJTzu5Kohlcwmkb3kurwZI56MHThZIVnFFYD9RTQ+c
 mUTgBaiWvKNGAvnM4BEj2OXxHPwgy0TadKLMKX9fssQUQZ/95V1wAXCyS
 kPwqzX5wNQeb4nEHGrw6H3jc5my/E5bOwga+A3K/cGd1Kv2MvZqL1u2l4
 BfrZGZWkbejkuM0i8WfIPiMFXC/7GdTuBwVf/oe7S1hh0qSFarxYo0Ll6 g==;
X-IronPort-AV: E=McAfee;i="6400,9594,10436"; a="289127450"
X-IronPort-AV: E=Sophos;i="5.93,231,1654585200"; d="scan'208";a="289127450"
Received: from fmsmga008.fm.intel.com ([10.253.24.58])
 by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 12 Aug 2022 02:02:14 -0700
X-IronPort-AV: E=Sophos;i="5.93,231,1654585200"; d="scan'208";a="665750825"
Received: from unknown (HELO localhost.localdomain) ([10.239.252.253])
 by fmsmga008-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;
 12 Aug 2022 02:02:13 -0700
From: peng1x.zhang@intel.com
To: dev@dpdk.org
Cc: beilei.xing@intel.com, jingjing.wu@intel.com,
 Peng Zhang <peng1x.zhang@intel.com>
Subject: [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases
Date: Sat, 13 Aug 2022 00:52:22 +0800
Message-Id: <20220812165223.470777-1-peng1x.zhang@intel.com>
X-Mailer: git-send-email 2.25.1
MIME-Version: 1.0
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

From: Peng Zhang <peng1x.zhang@intel.com>

Hardware limits that max buffer size per Tx descriptor should be (16K-1)B.
So when TSO enabled under unencrypt scenario, the mbuf data size may exceed
the limit and cause malicious behavior to the NIC.

This patch supports Tx descriptors for this kind of large buffer.

Signed-off-by: Peng Zhang <peng1x.zhang@intel.com>
---
Tested-by: Daniel M Buckley <daniel.m.buckley@intel.com>

 drivers/net/iavf/iavf_rxtx.c | 66 ++++++++++++++++++++++++++++++++----
 1 file changed, 60 insertions(+), 6 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index dfd021889e..adec58e90a 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2642,6 +2642,47 @@ iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
      return NULL;
 }

+/* HW requires that TX buffer size ranges from 1B up to (16K-1)B. */
+#define IAVF_MAX_DATA_PER_TXD \
+     (IAVF_TXD_QW1_TX_BUF_SZ_MASK >> IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+static inline void
+iavf_fill_unencrypt_desc(volatile struct iavf_tx_desc *txd, struct rte_mbuf *m,
+                  volatile uint64_t desc_template, struct iavf_tx_entry *txe,
+                  volatile struct iavf_tx_desc *txr, struct iavf_tx_entry *txe_ring,
+                  int desc_idx_last)
+{
+           /* Setup TX Descriptor */
+           int desc_idx;
+           uint16_t slen = m->data_len;
+           uint64_t buf_dma_addr = rte_mbuf_data_iova(m);
+           struct iavf_tx_entry *txn = &txe_ring[txe->next_id];
+
+           while ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
+                 unlikely(slen > IAVF_MAX_DATA_PER_TXD)) {
+                 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
+                 txd->cmd_type_offset_bsz =
+                 rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DATA |
+                 (uint64_t)IAVF_MAX_DATA_PER_TXD <<
+                 IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) | desc_template;
+
+                 buf_dma_addr += IAVF_MAX_DATA_PER_TXD;
+                 slen -= IAVF_MAX_DATA_PER_TXD;
+
+                 txe->last_id = desc_idx_last;
+                 desc_idx = txe->next_id;
+                 txe = txn;
+                 txd = &txr[desc_idx];
+                 txn = &txe_ring[txe->next_id];
+           }
+
+           txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+           txd->cmd_type_offset_bsz =
+                 rte_cpu_to_le_64((uint64_t)slen << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) |
+                       desc_template;
+}
+
 /* TX function */
 uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
@@ -2650,6 +2691,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
      volatile struct iavf_tx_desc *txr = txq->tx_ring;
      struct iavf_tx_entry *txe_ring = txq->sw_ring;
      struct iavf_tx_entry *txe, *txn;
+     volatile struct iavf_tx_desc *txd;
      struct rte_mbuf *mb, *mb_seg;
      uint16_t desc_idx, desc_idx_last;
      uint16_t idx;
@@ -2781,6 +2823,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                  ddesc = (volatile struct iavf_tx_desc *)
                              &txr[desc_idx];

+                 txd = &txr[desc_idx];
                  txn = &txe_ring[txe->next_id];
                  RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);

@@ -2788,10 +2831,16 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        rte_pktmbuf_free_seg(txe->mbuf);

                  txe->mbuf = mb_seg;
-                 iavf_fill_data_desc(ddesc, mb_seg,
-                             ddesc_template, tlen, ipseclen);

-                 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+                 if (nb_desc_ipsec) {
+                       iavf_fill_data_desc(ddesc, mb_seg,
+                             ddesc_template, tlen, ipseclen);
+                       IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
+                 } else {
+                       iavf_fill_unencrypt_desc(txd, mb_seg,
+                             ddesc_template, txe, txr, txe_ring, desc_idx_last);
+                       IAVF_DUMP_TX_DESC(txq, txd, desc_idx);
+                 }

                  txe->last_id = desc_idx_last;
                  desc_idx = txe->next_id;
@@ -2816,10 +2865,15 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                  txq->nb_used = 0;
            }

-           ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+           if (nb_desc_ipsec) {
+                 ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
                        IAVF_TXD_DATA_QW1_CMD_SHIFT);
-
-           IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
+                 IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
+           } else {
+                 txd->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
+                       IAVF_TXD_DATA_QW1_CMD_SHIFT);
+                 IAVF_DUMP_TX_DESC(txq, txd, desc_idx - 1);
+           }
      }

 end_of_tx:

--------------------------------------------------------------
Intel Research and Development Ireland Limited
Registered in Ireland
Registered Office: Collinstown Industrial Park, Leixlip, County Kildare
Registered Number: 308263


This e-mail and any attachments may contain confidential material for the sole
use of the intended recipient(s). Any review or distribution by others is
strictly prohibited. If you are not the intended recipient, please contact the
sender and delete all copies.

[-- Attachment #2: Type: text/html, Size: 13963 bytes --]

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2022-10-08  7:55 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-12 16:52 [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases peng1x.zhang
2022-08-12 16:52 ` [PATCH 2/2] net/iavf: support inner and outer checksum offload peng1x.zhang
2022-08-30  8:12   ` Yang, Qiming
2022-09-01  9:33   ` [PATCH v2] net/iavf: enable inner and outer Tx " Peng Zhang
2022-09-01 11:04     ` Zhang, Qi Z
2022-09-05  2:25     ` Yang, Qiming
2022-09-20  9:14     ` [PATCH v3] " Zhichao Zeng
2022-09-22  9:02       ` Xu, Ke1
2022-09-25  5:58         ` Zhang, Qi Z
2022-08-30  7:52 ` [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases Yang, Qiming
2022-09-26  5:17 ` [PATCH v2] net/iavf: fix TSO offload for tunnel case Zhichao Zeng
2022-09-26  9:48   ` Xu, Ke1
2022-09-27  2:33   ` Zhang, Qi Z
2022-09-27  9:56   ` [PATCH v3] " Zhichao Zeng
2022-09-29  5:27     ` [PATCH v4] " Zhichao Zeng
2022-09-30  3:46       ` Xu, Ke1
2022-09-30  9:05       ` Nicolau, Radu
2022-10-08  7:55         ` Zhang, Qi Z
2022-08-26 14:37 [PATCH 1/2] net/iavf: enable TSO offloading for tunnel cases Buckley, Daniel M
2022-08-30  2:22 Xu, Ke1

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).