patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH v2] net/i40e: fix TSO pkt exceeds allowed buf size issue
@ 2019-12-25  8:55 Xiaoyun Li
  2019-12-26  1:50 ` Zhang, Qi Z
  0 siblings, 1 reply; 3+ messages in thread
From: Xiaoyun Li @ 2019-12-25  8:55 UTC (permalink / raw)
  To: qi.z.zhang, beilei.xing, xiaolong.ye, ciara.loftus, dev
  Cc: Xiaoyun Li, stable

Hardware limits that max buffer size per tx descriptor should be
(16K-1)B. So when TSO enabled, the mbuf data size may exceed the
limit and cause malicious behaviour to the NIC. This patch fixes
this issue by using more tx descs for this kind of large buffer.

Fixes: 4861cde46116 ("i40e: new poll mode driver")
Cc: stable@dpdk.org

Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
---
v2:
 * Each pkt can have several segments so the needed tx descs should sum
 * all segments up.
---
 drivers/net/i40e/i40e_rxtx.c | 44 +++++++++++++++++++++++++++++++++++-
 1 file changed, 43 insertions(+), 1 deletion(-)

diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 17dc8c78f..ce95d8c20 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -989,6 +989,23 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
 	return ctx_desc;
 }
 
+/* HW requires that Tx buffer size ranges from 1B up to (16K-1)B. */
+#define I40E_MAX_DATA_PER_TXD	(16 * 1024 - 1)
+/* Calculate the number of TX descriptors needed for each pkt */
+static inline uint16_t
+i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt)
+{
+	struct rte_mbuf *txd = tx_pkt;
+	uint16_t count = 0;
+
+	while (txd != NULL) {
+		count += DIV_ROUND_UP(txd->data_len, I40E_MAX_DATA_PER_TXD);
+		txd = txd->next;
+	}
+
+	return count;
+}
+
 uint16_t
 i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
@@ -1046,8 +1063,15 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		 * The number of descriptors that must be allocated for
 		 * a packet equals to the number of the segments of that
 		 * packet plus 1 context descriptor if needed.
+		 * Recalculate the needed tx descs when TSO enabled in case
+		 * the mbuf data size exceeds max data size that hw allows
+		 * per tx desc.
 		 */
-		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
+		if (ol_flags & PKT_TX_TCP_SEG)
+			nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) +
+					     nb_ctx);
+		else
+			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
 		tx_last = (uint16_t)(tx_id + nb_used - 1);
 
 		/* Circular ring */
@@ -1160,6 +1184,24 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			slen = m_seg->data_len;
 			buf_dma_addr = rte_mbuf_data_iova(m_seg);
 
+			while ((ol_flags & PKT_TX_TCP_SEG) &&
+				unlikely(slen > I40E_MAX_DATA_PER_TXD)) {
+				txd->buffer_addr =
+					rte_cpu_to_le_64(buf_dma_addr);
+				txd->cmd_type_offset_bsz =
+					i40e_build_ctob(td_cmd,
+					td_offset, I40E_MAX_DATA_PER_TXD,
+					td_tag);
+
+				buf_dma_addr += I40E_MAX_DATA_PER_TXD;
+				slen -= I40E_MAX_DATA_PER_TXD;
+
+				txe->last_id = tx_last;
+				tx_id = txe->next_id;
+				txe = txn;
+				txd = &txr[tx_id];
+				txn = &sw_ring[txe->next_id];
+			}
 			PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
 				"buf_dma_addr: %#"PRIx64";\n"
 				"td_cmd: %#x;\n"
-- 
2.17.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-stable] [PATCH v2] net/i40e: fix TSO pkt exceeds allowed buf size issue
  2019-12-25  8:55 [dpdk-stable] [PATCH v2] net/i40e: fix TSO pkt exceeds allowed buf size issue Xiaoyun Li
@ 2019-12-26  1:50 ` Zhang, Qi Z
  2019-12-26  2:06   ` Li, Xiaoyun
  0 siblings, 1 reply; 3+ messages in thread
From: Zhang, Qi Z @ 2019-12-26  1:50 UTC (permalink / raw)
  To: Li, Xiaoyun, Xing, Beilei, Ye, Xiaolong, Loftus, Ciara, dev; +Cc: stable

HI Xiaoyun:

	Overall looks good to me, some minor capture inline


> -----Original Message-----
> From: Li, Xiaoyun <xiaoyun.li@intel.com>
> Sent: Wednesday, December 25, 2019 4:56 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Xing, Beilei <beilei.xing@intel.com>;
> Ye, Xiaolong <xiaolong.ye@intel.com>; Loftus, Ciara <ciara.loftus@intel.com>;
> dev@dpdk.org
> Cc: Li, Xiaoyun <xiaoyun.li@intel.com>; stable@dpdk.org
> Subject: [PATCH v2] net/i40e: fix TSO pkt exceeds allowed buf size issue
> 
> Hardware limits that max buffer size per tx descriptor should be (16K-1)B. So
> when TSO enabled, the mbuf data size may exceed the limit and cause
> malicious behaviour to the NIC. This patch fixes this issue by using more tx

Behavior

> descs for this kind of large buffer.
> 
> Fixes: 4861cde46116 ("i40e: new poll mode driver")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
> ---
> v2:
>  * Each pkt can have several segments so the needed tx descs should sum
>  * all segments up.
> ---
>  drivers/net/i40e/i40e_rxtx.c | 44 +++++++++++++++++++++++++++++++++++-
>  1 file changed, 43 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index
> 17dc8c78f..ce95d8c20 100644
> --- a/drivers/net/i40e/i40e_rxtx.c
> +++ b/drivers/net/i40e/i40e_rxtx.c
> @@ -989,6 +989,23 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union
> i40e_tx_offload tx_offload)
>  	return ctx_desc;
>  }
> 
> +/* HW requires that Tx buffer size ranges from 1B up to (16K-1)B. */
> +#define I40E_MAX_DATA_PER_TXD	(16 * 1024 - 1)

Since this is limited by the 14 bit buffer size on Rx descriptor.

Is it better to reuse exist macro to define the max buf size?

#define I40E_MAX_DATA_PER_TXD \
	I40E_TXD_QW1_TX_BUF_SZ_MASK >> I40E_TXD_QW1_TX_BUF_SZ_SHIFT

Regards
Qi

> +/* Calculate the number of TX descriptors needed for each pkt */ static
> +inline uint16_t i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt) {
> +	struct rte_mbuf *txd = tx_pkt;
> +	uint16_t count = 0;
> +
> +	while (txd != NULL) {
> +		count += DIV_ROUND_UP(txd->data_len,
> I40E_MAX_DATA_PER_TXD);
> +		txd = txd->next;
> +	}
> +
> +	return count;
> +}
> +
>  uint16_t
>  i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> { @@ -1046,8 +1063,15 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  		 * The number of descriptors that must be allocated for
>  		 * a packet equals to the number of the segments of that
>  		 * packet plus 1 context descriptor if needed.
> +		 * Recalculate the needed tx descs when TSO enabled in case
> +		 * the mbuf data size exceeds max data size that hw allows
> +		 * per tx desc.
>  		 */
> -		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
> +		if (ol_flags & PKT_TX_TCP_SEG)
> +			nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) +
> +					     nb_ctx);
> +		else
> +			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
>  		tx_last = (uint16_t)(tx_id + nb_used - 1);
> 
>  		/* Circular ring */
> @@ -1160,6 +1184,24 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>  			slen = m_seg->data_len;
>  			buf_dma_addr = rte_mbuf_data_iova(m_seg);
> 
> +			while ((ol_flags & PKT_TX_TCP_SEG) &&
> +				unlikely(slen > I40E_MAX_DATA_PER_TXD)) {
> +				txd->buffer_addr =
> +					rte_cpu_to_le_64(buf_dma_addr);
> +				txd->cmd_type_offset_bsz =
> +					i40e_build_ctob(td_cmd,
> +					td_offset, I40E_MAX_DATA_PER_TXD,
> +					td_tag);
> +
> +				buf_dma_addr += I40E_MAX_DATA_PER_TXD;
> +				slen -= I40E_MAX_DATA_PER_TXD;
> +
> +				txe->last_id = tx_last;
> +				tx_id = txe->next_id;
> +				txe = txn;
> +				txd = &txr[tx_id];
> +				txn = &sw_ring[txe->next_id];
> +			}
>  			PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
>  				"buf_dma_addr: %#"PRIx64";\n"
>  				"td_cmd: %#x;\n"
> --
> 2.17.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-stable] [PATCH v2] net/i40e: fix TSO pkt exceeds allowed buf size issue
  2019-12-26  1:50 ` Zhang, Qi Z
@ 2019-12-26  2:06   ` Li, Xiaoyun
  0 siblings, 0 replies; 3+ messages in thread
From: Li, Xiaoyun @ 2019-12-26  2:06 UTC (permalink / raw)
  To: Zhang, Qi Z, Xing, Beilei, Ye, Xiaolong, Loftus, Ciara, dev; +Cc: stable

Sure. Will fix them in v3. Thanks.

> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Thursday, December 26, 2019 09:51
> To: Li, Xiaoyun <xiaoyun.li@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Ye,
> Xiaolong <xiaolong.ye@intel.com>; Loftus, Ciara <ciara.loftus@intel.com>;
> dev@dpdk.org
> Cc: stable@dpdk.org
> Subject: RE: [PATCH v2] net/i40e: fix TSO pkt exceeds allowed buf size issue
> 
> HI Xiaoyun:
> 
> 	Overall looks good to me, some minor capture inline
> 
> 
> > -----Original Message-----
> > From: Li, Xiaoyun <xiaoyun.li@intel.com>
> > Sent: Wednesday, December 25, 2019 4:56 PM
> > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>; Ye, Xiaolong <xiaolong.ye@intel.com>; Loftus,
> > Ciara <ciara.loftus@intel.com>; dev@dpdk.org
> > Cc: Li, Xiaoyun <xiaoyun.li@intel.com>; stable@dpdk.org
> > Subject: [PATCH v2] net/i40e: fix TSO pkt exceeds allowed buf size
> > issue
> >
> > Hardware limits that max buffer size per tx descriptor should be
> > (16K-1)B. So when TSO enabled, the mbuf data size may exceed the limit
> > and cause malicious behaviour to the NIC. This patch fixes this issue
> > by using more tx
> 
> Behavior
> 
> > descs for this kind of large buffer.
> >
> > Fixes: 4861cde46116 ("i40e: new poll mode driver")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
> > ---
> > v2:
> >  * Each pkt can have several segments so the needed tx descs should
> > sum
> >  * all segments up.
> > ---
> >  drivers/net/i40e/i40e_rxtx.c | 44
> > +++++++++++++++++++++++++++++++++++-
> >  1 file changed, 43 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/net/i40e/i40e_rxtx.c
> > b/drivers/net/i40e/i40e_rxtx.c index
> > 17dc8c78f..ce95d8c20 100644
> > --- a/drivers/net/i40e/i40e_rxtx.c
> > +++ b/drivers/net/i40e/i40e_rxtx.c
> > @@ -989,6 +989,23 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union
> > i40e_tx_offload tx_offload)
> >  	return ctx_desc;
> >  }
> >
> > +/* HW requires that Tx buffer size ranges from 1B up to (16K-1)B. */
> > +#define I40E_MAX_DATA_PER_TXD	(16 * 1024 - 1)
> 
> Since this is limited by the 14 bit buffer size on Rx descriptor.
> 
> Is it better to reuse exist macro to define the max buf size?
> 
> #define I40E_MAX_DATA_PER_TXD \
> 	I40E_TXD_QW1_TX_BUF_SZ_MASK >>
> I40E_TXD_QW1_TX_BUF_SZ_SHIFT
> 
> Regards
> Qi
> 
> > +/* Calculate the number of TX descriptors needed for each pkt */
> > +static inline uint16_t i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt) {
> > +	struct rte_mbuf *txd = tx_pkt;
> > +	uint16_t count = 0;
> > +
> > +	while (txd != NULL) {
> > +		count += DIV_ROUND_UP(txd->data_len,
> > I40E_MAX_DATA_PER_TXD);
> > +		txd = txd->next;
> > +	}
> > +
> > +	return count;
> > +}
> > +
> >  uint16_t
> >  i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t
> > nb_pkts) { @@ -1046,8 +1063,15 @@ i40e_xmit_pkts(void *tx_queue,
> > struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> >  		 * The number of descriptors that must be allocated for
> >  		 * a packet equals to the number of the segments of that
> >  		 * packet plus 1 context descriptor if needed.
> > +		 * Recalculate the needed tx descs when TSO enabled in case
> > +		 * the mbuf data size exceeds max data size that hw allows
> > +		 * per tx desc.
> >  		 */
> > -		nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
> > +		if (ol_flags & PKT_TX_TCP_SEG)
> > +			nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) +
> > +					     nb_ctx);
> > +		else
> > +			nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
> >  		tx_last = (uint16_t)(tx_id + nb_used - 1);
> >
> >  		/* Circular ring */
> > @@ -1160,6 +1184,24 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf
> > **tx_pkts, uint16_t nb_pkts)
> >  			slen = m_seg->data_len;
> >  			buf_dma_addr = rte_mbuf_data_iova(m_seg);
> >
> > +			while ((ol_flags & PKT_TX_TCP_SEG) &&
> > +				unlikely(slen > I40E_MAX_DATA_PER_TXD)) {
> > +				txd->buffer_addr =
> > +					rte_cpu_to_le_64(buf_dma_addr);
> > +				txd->cmd_type_offset_bsz =
> > +					i40e_build_ctob(td_cmd,
> > +					td_offset, I40E_MAX_DATA_PER_TXD,
> > +					td_tag);
> > +
> > +				buf_dma_addr += I40E_MAX_DATA_PER_TXD;
> > +				slen -= I40E_MAX_DATA_PER_TXD;
> > +
> > +				txe->last_id = tx_last;
> > +				tx_id = txe->next_id;
> > +				txe = txn;
> > +				txd = &txr[tx_id];
> > +				txn = &sw_ring[txe->next_id];
> > +			}
> >  			PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
> >  				"buf_dma_addr: %#"PRIx64";\n"
> >  				"td_cmd: %#x;\n"
> > --
> > 2.17.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-12-26  2:06 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-25  8:55 [dpdk-stable] [PATCH v2] net/i40e: fix TSO pkt exceeds allowed buf size issue Xiaoyun Li
2019-12-26  1:50 ` Zhang, Qi Z
2019-12-26  2:06   ` Li, Xiaoyun

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).