patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Marvin Liu <yong.liu@intel.com>
To: maxime.coquelin@redhat.com, tiwei.bie@intel.com,
	zhihong.wang@intel.com, stephen@networkplumber.org
Cc: dev@dpdk.org, stable@dpdk.org, Marvin Liu <yong.liu@intel.com>
Subject: [dpdk-stable] [PATCH] net/virtio: fix mbuf data and pkt length mismatch
Date: Mon, 23 Sep 2019 22:05:11 +0800	[thread overview]
Message-ID: <20190923140511.107939-1-yong.liu@intel.com> (raw)

If reserve virtio header room by function rte_pktmbuf_prepend, both
segment data length and packet length of mbuf will be increased.
Data length will be equal to descriptor length, while packet length
should be decreased as virtio-net header won't be taken into packet.
Thus will cause mismatch in mbuf structure. Fix this issue by access
mbuf data directly and increase descriptor length if it is needed.

Fixes: 58169a9c8153 ("net/virtio: support Tx checksum offload")
Fixes: 892dc798fa9c ("net/virtio: implement Tx path for packed queues")
Fixes: 4905ed3a523f ("net/virtio: optimize Tx enqueue for packed ring")
Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
Cc: stable@dpdk.org

Reported-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Marvin Liu <yong.liu@intel.com>

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 27ead19fb..822cce06d 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -597,9 +597,8 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
 		dxp->cookie = (void *)cookies[i];
 		dxp->ndescs = 1;
 
-		hdr = (struct virtio_net_hdr *)
-			rte_pktmbuf_prepend(cookies[i], head_size);
-		cookies[i]->pkt_len -= head_size;
+		hdr = (struct virtio_net_hdr *)(char *)cookies[i]->buf_addr +
+			cookies[i]->data_off - head_size;
 
 		/* if offload disabled, hdr is not zeroed yet, do it now */
 		if (!vq->hw->has_tx_offload)
@@ -608,9 +607,10 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
 			virtqueue_xmit_offload(hdr, cookies[i], true);
 
 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
-		start_dp[idx].len   = cookies[i]->data_len;
+		start_dp[idx].len   = cookies[i]->data_len + head_size;
 		start_dp[idx].flags = 0;
 
+
 		vq_update_avail_ring(vq, idx);
 
 		idx++;
@@ -644,9 +644,8 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
 	flags = vq->vq_packed.cached_flags;
 
 	/* prepend cannot fail, checked by caller */
-	hdr = (struct virtio_net_hdr *)
-		rte_pktmbuf_prepend(cookie, head_size);
-	cookie->pkt_len -= head_size;
+	hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
+		cookie->data_off - head_size;
 
 	/* if offload disabled, hdr is not zeroed yet, do it now */
 	if (!vq->hw->has_tx_offload)
@@ -655,7 +654,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
 		virtqueue_xmit_offload(hdr, cookie, true);
 
 	dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
-	dp->len  = cookie->data_len;
+	dp->len  = cookie->data_len + head_size;
 	dp->id   = id;
 
 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
@@ -687,6 +686,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 	uint16_t head_size = vq->hw->vtnet_hdr_size;
 	struct virtio_net_hdr *hdr;
 	uint16_t prev;
+	bool prepend_header = false;
 
 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
 
@@ -705,12 +705,9 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 
 	if (can_push) {
 		/* prepend cannot fail, checked by caller */
-		hdr = (struct virtio_net_hdr *)
-			rte_pktmbuf_prepend(cookie, head_size);
-		/* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
-		 * which is wrong. Below subtract restores correct pkt size.
-		 */
-		cookie->pkt_len -= head_size;
+		hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
+			cookie->data_off - head_size;
+		prepend_header = true;
 
 		/* if offload disabled, it is not zeroed below, do it now */
 		if (!vq->hw->has_tx_offload)
@@ -738,6 +735,11 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 
 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
 		start_dp[idx].len  = cookie->data_len;
+		if (prepend_header) {
+			start_dp[idx].len += head_size;
+			prepend_header = false;
+		}
+
 		if (likely(idx != head_idx)) {
 			flags = cookie->next ? VRING_DESC_F_NEXT : 0;
 			flags |= vq->vq_packed.cached_flags;
@@ -779,6 +781,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 	uint16_t seg_num = cookie->nb_segs;
 	uint16_t head_idx, idx;
 	uint16_t head_size = vq->hw->vtnet_hdr_size;
+	bool prepend_header = false;
 	struct virtio_net_hdr *hdr;
 
 	head_idx = vq->vq_desc_head_idx;
@@ -794,12 +797,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 
 	if (can_push) {
 		/* prepend cannot fail, checked by caller */
-		hdr = (struct virtio_net_hdr *)
-			rte_pktmbuf_prepend(cookie, head_size);
-		/* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
-		 * which is wrong. Below subtract restores correct pkt size.
-		 */
-		cookie->pkt_len -= head_size;
+		hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
+			cookie->data_off - head_size;
+		prepend_header = true;
 
 		/* if offload disabled, it is not zeroed below, do it now */
 		if (!vq->hw->has_tx_offload)
@@ -838,6 +838,10 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 	do {
 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
 		start_dp[idx].len   = cookie->data_len;
+		if (prepend_header) {
+			start_dp[idx].len += head_size;
+			prepend_header = false;
+		}
 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
 		idx = start_dp[idx].next;
 	} while ((cookie = cookie->next) != NULL);
-- 
2.17.1


             reply	other threads:[~2019-09-23  6:24 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-09-23 14:05 Marvin Liu [this message]
2019-09-23 15:22 ` Stephen Hemminger
2019-09-24  4:53   ` Liu, Yong
2019-09-27  9:30 ` Maxime Coquelin
2019-09-27  9:50 ` Maxime Coquelin
2019-10-07  7:17   ` [dpdk-stable] [dpdk-dev] " Andrew Rybchenko
2019-10-14 15:15 ` Andrew Rybchenko
2019-10-14 15:28   ` Kevin Traynor
2019-10-15  5:33   ` Tiwei Bie
2019-10-15  8:14     ` Andrew Rybchenko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190923140511.107939-1-yong.liu@intel.com \
    --to=yong.liu@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=stable@dpdk.org \
    --cc=stephen@networkplumber.org \
    --cc=tiwei.bie@intel.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).