patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH v3 1/2] net/virtio: update stats when in order xmit done
       [not found] <20190910161446.36361-1-yong.liu@intel.com>
@ 2019-09-18 17:06 ` Marvin Liu
  2019-09-18 17:06   ` [dpdk-stable] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
                     ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Marvin Liu @ 2019-09-18 17:06 UTC (permalink / raw)
  To: maxime.coquelin, tiwei.bie, zhihong.wang; +Cc: dev, stable, Marvin Liu

When doing xmit in-order enqueue, packets are buffered and then flushed
into avail ring. Buffered packets can be dropped due to insufficient
space. Moving stats update action just after successful avail ring
updates can guarantee correctness.

Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
Cc: stable@dpdk.org

Signed-off-by: Marvin Liu <yong.liu@intel.com>

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 27ead19fb..91df5b1d0 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -106,6 +106,48 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
 	dxp->next = VQ_RING_DESC_CHAIN_END;
 }
 
+static inline void
+virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
+{
+	uint32_t s = mbuf->pkt_len;
+	struct rte_ether_addr *ea;
+
+	stats->bytes += s;
+
+	if (s == 64) {
+		stats->size_bins[1]++;
+	} else if (s > 64 && s < 1024) {
+		uint32_t bin;
+
+		/* count zeros, and offset into correct bin */
+		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+		stats->size_bins[bin]++;
+	} else {
+		if (s < 64)
+			stats->size_bins[0]++;
+		else if (s < 1519)
+			stats->size_bins[6]++;
+		else
+			stats->size_bins[7]++;
+	}
+
+	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+	if (rte_is_multicast_ether_addr(ea)) {
+		if (rte_is_broadcast_ether_addr(ea))
+			stats->broadcast++;
+		else
+			stats->multicast++;
+	}
+}
+
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+	VIRTIO_DUMP_PACKET(m, m->data_len);
+
+	virtio_update_packet_stats(&rxvq->stats, m);
+}
+
 static uint16_t
 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
 				  struct rte_mbuf **rx_pkts,
@@ -317,7 +359,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
 }
 
 /* Cleanup from completed inorder transmits. */
-static void
+static __rte_always_inline void
 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
 {
 	uint16_t i, idx = vq->vq_used_cons_idx;
@@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
 		dxp->cookie = (void *)cookies[i];
 		dxp->ndescs = 1;
+		virtio_update_packet_stats(&txvq->stats, cookies[i]);
 
 		hdr = (struct virtio_net_hdr *)
 			rte_pktmbuf_prepend(cookies[i], head_size);
@@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
 	}
 }
 
-static inline void
-virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
-{
-	uint32_t s = mbuf->pkt_len;
-	struct rte_ether_addr *ea;
-
-	stats->bytes += s;
-
-	if (s == 64) {
-		stats->size_bins[1]++;
-	} else if (s > 64 && s < 1024) {
-		uint32_t bin;
-
-		/* count zeros, and offset into correct bin */
-		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
-		stats->size_bins[bin]++;
-	} else {
-		if (s < 64)
-			stats->size_bins[0]++;
-		else if (s < 1519)
-			stats->size_bins[6]++;
-		else
-			stats->size_bins[7]++;
-	}
-
-	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
-	if (rte_is_multicast_ether_addr(ea)) {
-		if (rte_is_broadcast_ether_addr(ea))
-			stats->broadcast++;
-		else
-			stats->multicast++;
-	}
-}
-
-static inline void
-virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
-{
-	VIRTIO_DUMP_PACKET(m, m->data_len);
-
-	virtio_update_packet_stats(&rxvq->stats, m);
-}
-
 /* Optionally fill offload information in structure */
 static inline int
 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
@@ -2198,7 +2199,6 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 			inorder_pkts[nb_inorder_pkts] = txm;
 			nb_inorder_pkts++;
 
-			virtio_update_packet_stats(&txvq->stats, txm);
 			continue;
 		}
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-stable] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit
  2019-09-18 17:06 ` [dpdk-stable] [PATCH v3 1/2] net/virtio: update stats when in order xmit done Marvin Liu
@ 2019-09-18 17:06   ` Marvin Liu
  2019-09-27  9:02     ` Maxime Coquelin
  2019-09-27  9:49     ` Maxime Coquelin
  2019-09-27  9:02   ` [dpdk-stable] [PATCH v3 1/2] net/virtio: update stats when in order xmit done Maxime Coquelin
  2019-09-27  9:49   ` Maxime Coquelin
  2 siblings, 2 replies; 6+ messages in thread
From: Marvin Liu @ 2019-09-18 17:06 UTC (permalink / raw)
  To: maxime.coquelin, tiwei.bie, zhihong.wang; +Cc: dev, stable, Marvin Liu

Check whether space are enough before burst enqueue operation. If more
space is needed, will try to clean up used descriptors for space on
demand. It can give more chances to free used descriptors, thus will
help RFC2544 performance. Also deduct failed xmit packets from total
xmit number.

Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
Cc: stable@dpdk.org

Signed-off-by: Marvin Liu <yong.liu@intel.com>

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 91df5b1d0..7d5c60532 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -2152,6 +2152,22 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	return nb_tx;
 }
 
+static __rte_always_inline int
+virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
+{
+	uint16_t nb_used, nb_clean, nb_descs;
+	struct virtio_hw *hw = vq->hw;
+
+	nb_descs = vq->vq_free_cnt + need;
+	nb_used = VIRTQUEUE_NUSED(vq);
+	virtio_rmb(hw->weak_barriers);
+	nb_clean = RTE_MIN(need, (int)nb_used);
+
+	virtio_xmit_cleanup_inorder(vq, nb_clean);
+
+	return (nb_descs - vq->vq_free_cnt);
+}
+
 uint16_t
 virtio_xmit_pkts_inorder(void *tx_queue,
 			struct rte_mbuf **tx_pkts,
@@ -2161,8 +2177,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 	struct virtqueue *vq = txvq->vq;
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
-	uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
 	struct rte_mbuf *inorder_pkts[nb_pkts];
+	int need;
 
 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
 		return nb_tx;
@@ -2178,14 +2195,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
 		virtio_xmit_cleanup_inorder(vq, nb_used);
 
-	if (unlikely(!vq->vq_free_cnt))
-		virtio_xmit_cleanup_inorder(vq, nb_used);
-
-	nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
-
-	for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
 		struct rte_mbuf *txm = tx_pkts[nb_tx];
-		int slots, need;
+		int slots;
 
 		/* optimize ring usage */
 		if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
@@ -2203,6 +2215,17 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 		}
 
 		if (nb_inorder_pkts) {
+			need = nb_inorder_pkts - vq->vq_free_cnt;
+			if (unlikely(need > 0)) {
+				need = virtio_xmit_try_cleanup_inorder(vq,
+								       need);
+				if (unlikely(need > 0)) {
+					PMD_TX_LOG(ERR,
+						"No free tx descriptors to "
+						"transmit");
+					break;
+				}
+			}
 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
 							nb_inorder_pkts);
 			nb_inorder_pkts = 0;
@@ -2211,13 +2234,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 		slots = txm->nb_segs + 1;
 		need = slots - vq->vq_free_cnt;
 		if (unlikely(need > 0)) {
-			nb_used = VIRTQUEUE_NUSED(vq);
-			virtio_rmb(hw->weak_barriers);
-			need = RTE_MIN(need, (int)nb_used);
-
-			virtio_xmit_cleanup_inorder(vq, need);
-
-			need = slots - vq->vq_free_cnt;
+			need = virtio_xmit_try_cleanup_inorder(vq, slots);
 
 			if (unlikely(need > 0)) {
 				PMD_TX_LOG(ERR,
@@ -2232,9 +2249,22 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 	}
 
 	/* Transmit all inorder packets */
-	if (nb_inorder_pkts)
+	if (nb_inorder_pkts) {
+		need = nb_inorder_pkts - vq->vq_free_cnt;
+		if (unlikely(need > 0)) {
+			need = virtio_xmit_try_cleanup_inorder(vq,
+								  need);
+			if (unlikely(need > 0)) {
+				PMD_TX_LOG(ERR,
+					"No free tx descriptors to transmit");
+				nb_inorder_pkts = vq->vq_free_cnt;
+				nb_tx -= need;
+			}
+		}
+
 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
 						nb_inorder_pkts);
+	}
 
 	txvq->stats.packets += nb_tx;
 
-- 
2.17.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-stable] [PATCH v3 1/2] net/virtio: update stats when in order xmit done
  2019-09-18 17:06 ` [dpdk-stable] [PATCH v3 1/2] net/virtio: update stats when in order xmit done Marvin Liu
  2019-09-18 17:06   ` [dpdk-stable] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
@ 2019-09-27  9:02   ` Maxime Coquelin
  2019-09-27  9:49   ` Maxime Coquelin
  2 siblings, 0 replies; 6+ messages in thread
From: Maxime Coquelin @ 2019-09-27  9:02 UTC (permalink / raw)
  To: Marvin Liu, tiwei.bie, zhihong.wang; +Cc: dev, stable



On 9/18/19 7:06 PM, Marvin Liu wrote:
> When doing xmit in-order enqueue, packets are buffered and then flushed
> into avail ring. Buffered packets can be dropped due to insufficient
> space. Moving stats update action just after successful avail ring
> updates can guarantee correctness.
> 
> Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-stable] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit
  2019-09-18 17:06   ` [dpdk-stable] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
@ 2019-09-27  9:02     ` Maxime Coquelin
  2019-09-27  9:49     ` Maxime Coquelin
  1 sibling, 0 replies; 6+ messages in thread
From: Maxime Coquelin @ 2019-09-27  9:02 UTC (permalink / raw)
  To: Marvin Liu, tiwei.bie, zhihong.wang; +Cc: dev, stable



On 9/18/19 7:06 PM, Marvin Liu wrote:
> Check whether space are enough before burst enqueue operation. If more
> space is needed, will try to clean up used descriptors for space on
> demand. It can give more chances to free used descriptors, thus will
> help RFC2544 performance. Also deduct failed xmit packets from total
> xmit number.
> 
> Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> 
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-stable] [PATCH v3 1/2] net/virtio: update stats when in order xmit done
  2019-09-18 17:06 ` [dpdk-stable] [PATCH v3 1/2] net/virtio: update stats when in order xmit done Marvin Liu
  2019-09-18 17:06   ` [dpdk-stable] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
  2019-09-27  9:02   ` [dpdk-stable] [PATCH v3 1/2] net/virtio: update stats when in order xmit done Maxime Coquelin
@ 2019-09-27  9:49   ` Maxime Coquelin
  2 siblings, 0 replies; 6+ messages in thread
From: Maxime Coquelin @ 2019-09-27  9:49 UTC (permalink / raw)
  To: Marvin Liu, tiwei.bie, zhihong.wang; +Cc: dev, stable



On 9/18/19 7:06 PM, Marvin Liu wrote:
> When doing xmit in-order enqueue, packets are buffered and then flushed
> into avail ring. Buffered packets can be dropped due to insufficient
> space. Moving stats update action just after successful avail ring
> updates can guarantee correctness.
> 
> Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> 
Applied to dpdk-next-virtio/master.

Thanks,
Maxime

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-stable] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit
  2019-09-18 17:06   ` [dpdk-stable] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
  2019-09-27  9:02     ` Maxime Coquelin
@ 2019-09-27  9:49     ` Maxime Coquelin
  1 sibling, 0 replies; 6+ messages in thread
From: Maxime Coquelin @ 2019-09-27  9:49 UTC (permalink / raw)
  To: Marvin Liu, tiwei.bie, zhihong.wang; +Cc: dev, stable



On 9/18/19 7:06 PM, Marvin Liu wrote:
> Check whether space are enough before burst enqueue operation. If more
> space is needed, will try to clean up used descriptors for space on
> demand. It can give more chances to free used descriptors, thus will
> help RFC2544 performance. Also deduct failed xmit packets from total
> xmit number.
> 
> Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Marvin Liu <yong.liu@intel.com>

Applied to dpdk-next-virtio/master.

Thanks,
Maxime

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-09-27  9:49 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20190910161446.36361-1-yong.liu@intel.com>
2019-09-18 17:06 ` [dpdk-stable] [PATCH v3 1/2] net/virtio: update stats when in order xmit done Marvin Liu
2019-09-18 17:06   ` [dpdk-stable] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
2019-09-27  9:02     ` Maxime Coquelin
2019-09-27  9:49     ` Maxime Coquelin
2019-09-27  9:02   ` [dpdk-stable] [PATCH v3 1/2] net/virtio: update stats when in order xmit done Maxime Coquelin
2019-09-27  9:49   ` Maxime Coquelin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).