* [dpdk-dev] [PATCH 1/2] net/virtio: update stats when in order xmit done
@ 2019-08-27 10:24 Marvin Liu
2019-08-27 10:24 ` [dpdk-dev] [PATCH 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
` (2 more replies)
0 siblings, 3 replies; 18+ messages in thread
From: Marvin Liu @ 2019-08-27 10:24 UTC (permalink / raw)
To: tiwei.bie, dev; +Cc: Marvin Liu
When doing xmit in-order enqueue, packets are buffered and then flushed
into avail ring. It has possibility that no free room in avail ring,
thus some buffered packets can't be transmitted. So move stats update
just after successful avail ring updates.
Signed-off-by: Marvin Liu <yong.liu@intel.com>
---
drivers/net/virtio/virtio_rxtx.c | 86 ++++++++++++++++----------------
1 file changed, 43 insertions(+), 43 deletions(-)
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 27ead19fb..5d4ed524e 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -575,6 +575,48 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
}
}
+static inline void
+virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
+{
+ uint32_t s = mbuf->pkt_len;
+ struct rte_ether_addr *ea;
+
+ stats->bytes += s;
+
+ if (s == 64) {
+ stats->size_bins[1]++;
+ } else if (s > 64 && s < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+ stats->size_bins[bin]++;
+ } else {
+ if (s < 64)
+ stats->size_bins[0]++;
+ else if (s < 1519)
+ stats->size_bins[6]++;
+ else
+ stats->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
+ stats->broadcast++;
+ else
+ stats->multicast++;
+ }
+}
+
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+ VIRTIO_DUMP_PACKET(m, m->data_len);
+
+ virtio_update_packet_stats(&rxvq->stats, m);
+}
+
static inline void
virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
struct rte_mbuf **cookies,
@@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
+ virtio_update_packet_stats(&txvq->stats, cookies[i]);
hdr = (struct virtio_net_hdr *)
rte_pktmbuf_prepend(cookies[i], head_size);
@@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
}
}
-static inline void
-virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
-{
- uint32_t s = mbuf->pkt_len;
- struct rte_ether_addr *ea;
-
- stats->bytes += s;
-
- if (s == 64) {
- stats->size_bins[1]++;
- } else if (s > 64 && s < 1024) {
- uint32_t bin;
-
- /* count zeros, and offset into correct bin */
- bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
- stats->size_bins[bin]++;
- } else {
- if (s < 64)
- stats->size_bins[0]++;
- else if (s < 1519)
- stats->size_bins[6]++;
- else
- stats->size_bins[7]++;
- }
-
- ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
- if (rte_is_multicast_ether_addr(ea)) {
- if (rte_is_broadcast_ether_addr(ea))
- stats->broadcast++;
- else
- stats->multicast++;
- }
-}
-
-static inline void
-virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
-{
- VIRTIO_DUMP_PACKET(m, m->data_len);
-
- virtio_update_packet_stats(&rxvq->stats, m);
-}
-
/* Optionally fill offload information in structure */
static inline int
virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
@@ -2198,7 +2199,6 @@ virtio_xmit_pkts_inorder(void *tx_queue,
inorder_pkts[nb_inorder_pkts] = txm;
nb_inorder_pkts++;
- virtio_update_packet_stats(&txvq->stats, txm);
continue;
}
--
2.17.1
^ permalink raw reply [flat|nested] 18+ messages in thread
* [dpdk-dev] [PATCH 2/2] net/virtio: on demand cleanup when doing in order xmit
2019-08-27 10:24 [dpdk-dev] [PATCH 1/2] net/virtio: update stats when in order xmit done Marvin Liu
@ 2019-08-27 10:24 ` Marvin Liu
2019-09-10 6:16 ` Tiwei Bie
2019-09-10 5:45 ` [dpdk-dev] [PATCH 1/2] net/virtio: update stats when in order xmit done Tiwei Bie
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 " Marvin Liu
2 siblings, 1 reply; 18+ messages in thread
From: Marvin Liu @ 2019-08-27 10:24 UTC (permalink / raw)
To: tiwei.bie, dev; +Cc: Marvin Liu
Check whether freed descriptors are enough before enqueue operation.
If more space is needed, will try to cleanup used ring on demand. It
can give more chances to cleanup used ring, thus help RFC2544 perf.
Signed-off-by: Marvin Liu <yong.liu@intel.com>
---
drivers/net/virtio/virtio_rxtx.c | 73 +++++++++++++++++++++++---------
1 file changed, 54 insertions(+), 19 deletions(-)
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 5d4ed524e..550b0aa62 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -317,7 +317,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
}
/* Cleanup from completed inorder transmits. */
-static void
+static __rte_always_inline void
virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
{
uint16_t i, idx = vq->vq_used_cons_idx;
@@ -2152,6 +2152,21 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_tx;
}
+static __rte_always_inline int
+virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
+{
+ uint16_t nb_used;
+ struct virtio_hw *hw = vq->hw;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+ virtio_rmb(hw->weak_barriers);
+ need = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup_inorder(vq, need);
+
+ return (need - vq->vq_free_cnt);
+}
+
uint16_t
virtio_xmit_pkts_inorder(void *tx_queue,
struct rte_mbuf **tx_pkts,
@@ -2161,8 +2176,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
struct virtqueue *vq = txvq->vq;
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
- uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+ uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
struct rte_mbuf *inorder_pkts[nb_pkts];
+ int need, nb_left;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
@@ -2175,17 +2191,12 @@ virtio_xmit_pkts_inorder(void *tx_queue,
nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb(hw->weak_barriers);
- if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
- virtio_xmit_cleanup_inorder(vq, nb_used);
-
- if (unlikely(!vq->vq_free_cnt))
+ if (likely(nb_used > (vq->vq_nentries - vq->vq_free_thresh)))
virtio_xmit_cleanup_inorder(vq, nb_used);
- nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
-
- for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *txm = tx_pkts[nb_tx];
- int slots, need;
+ int slots;
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
@@ -2203,6 +2214,22 @@ virtio_xmit_pkts_inorder(void *tx_queue,
}
if (nb_inorder_pkts) {
+ need = nb_inorder_pkts - vq->vq_free_cnt;
+
+
+ if (unlikely(need > 0)) {
+ nb_left = virtio_xmit_try_cleanup_inorder(vq,
+ need);
+
+ if (unlikely(nb_left > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to "
+ "transmit");
+ nb_inorder_pkts = vq->vq_free_cnt;
+ break;
+ }
+ }
+
virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
nb_inorder_pkts);
nb_inorder_pkts = 0;
@@ -2211,15 +2238,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
slots = txm->nb_segs + 1;
need = slots - vq->vq_free_cnt;
if (unlikely(need > 0)) {
- nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb(hw->weak_barriers);
- need = RTE_MIN(need, (int)nb_used);
+ nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
- virtio_xmit_cleanup_inorder(vq, need);
-
- need = slots - vq->vq_free_cnt;
-
- if (unlikely(need > 0)) {
+ if (unlikely(nb_left > 0)) {
PMD_TX_LOG(ERR,
"No free tx descriptors to transmit");
break;
@@ -2232,9 +2253,23 @@ virtio_xmit_pkts_inorder(void *tx_queue,
}
/* Transmit all inorder packets */
- if (nb_inorder_pkts)
+ if (nb_inorder_pkts) {
+ need = nb_inorder_pkts - vq->vq_free_cnt;
+
+ if (unlikely(need > 0)) {
+ nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
+
+ if (unlikely(nb_left > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ nb_inorder_pkts = vq->vq_free_cnt;
+ nb_tx -= nb_left;
+ }
+ }
+
virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
nb_inorder_pkts);
+ }
txvq->stats.packets += nb_tx;
--
2.17.1
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] net/virtio: update stats when in order xmit done
2019-08-27 10:24 [dpdk-dev] [PATCH 1/2] net/virtio: update stats when in order xmit done Marvin Liu
2019-08-27 10:24 ` [dpdk-dev] [PATCH 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
@ 2019-09-10 5:45 ` Tiwei Bie
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 " Marvin Liu
2 siblings, 0 replies; 18+ messages in thread
From: Tiwei Bie @ 2019-09-10 5:45 UTC (permalink / raw)
To: Marvin Liu; +Cc: dev, maxime.coquelin, zhihong.wang
On Tue, Aug 27, 2019 at 06:24:06PM +0800, Marvin Liu wrote:
> When doing xmit in-order enqueue, packets are buffered and then flushed
> into avail ring. It has possibility that no free room in avail ring,
> thus some buffered packets can't be transmitted. So move stats update
> just after successful avail ring updates.
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> ---
> drivers/net/virtio/virtio_rxtx.c | 86 ++++++++++++++++----------------
> 1 file changed, 43 insertions(+), 43 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 27ead19fb..5d4ed524e 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -575,6 +575,48 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
> }
> }
>
> +static inline void
> +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
> +{
> + uint32_t s = mbuf->pkt_len;
> + struct rte_ether_addr *ea;
> +
> + stats->bytes += s;
> +
> + if (s == 64) {
> + stats->size_bins[1]++;
> + } else if (s > 64 && s < 1024) {
> + uint32_t bin;
> +
> + /* count zeros, and offset into correct bin */
> + bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> + stats->size_bins[bin]++;
> + } else {
> + if (s < 64)
> + stats->size_bins[0]++;
> + else if (s < 1519)
> + stats->size_bins[6]++;
> + else
> + stats->size_bins[7]++;
> + }
> +
> + ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> + if (rte_is_multicast_ether_addr(ea)) {
> + if (rte_is_broadcast_ether_addr(ea))
> + stats->broadcast++;
> + else
> + stats->multicast++;
> + }
> +}
> +
> +static inline void
> +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> +{
> + VIRTIO_DUMP_PACKET(m, m->data_len);
> +
> + virtio_update_packet_stats(&rxvq->stats, m);
> +}
If we move above helpers, it's better to just move them
to the top of this file.
Thanks,
Tiwei
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH 2/2] net/virtio: on demand cleanup when doing in order xmit
2019-08-27 10:24 ` [dpdk-dev] [PATCH 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
@ 2019-09-10 6:16 ` Tiwei Bie
2019-09-10 7:44 ` Liu, Yong
0 siblings, 1 reply; 18+ messages in thread
From: Tiwei Bie @ 2019-09-10 6:16 UTC (permalink / raw)
To: Marvin Liu; +Cc: dev, maxime.coquelin, zhihong.wang
On Tue, Aug 27, 2019 at 06:24:07PM +0800, Marvin Liu wrote:
> Check whether freed descriptors are enough before enqueue operation.
> If more space is needed, will try to cleanup used ring on demand. It
> can give more chances to cleanup used ring, thus help RFC2544 perf.
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> ---
> drivers/net/virtio/virtio_rxtx.c | 73 +++++++++++++++++++++++---------
> 1 file changed, 54 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 5d4ed524e..550b0aa62 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -317,7 +317,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
> }
>
> /* Cleanup from completed inorder transmits. */
> -static void
> +static __rte_always_inline void
> virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
> {
> uint16_t i, idx = vq->vq_used_cons_idx;
> @@ -2152,6 +2152,21 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> return nb_tx;
> }
>
> +static __rte_always_inline int
> +virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
> +{
> + uint16_t nb_used;
> + struct virtio_hw *hw = vq->hw;
> +
> + nb_used = VIRTQUEUE_NUSED(vq);
> + virtio_rmb(hw->weak_barriers);
> + need = RTE_MIN(need, (int)nb_used);
> +
> + virtio_xmit_cleanup_inorder(vq, need);
> +
> + return (need - vq->vq_free_cnt);
It's possible that the `need` has been changed by
need = RTE_MIN(need, (int)nb_used);
So it can't reflect the actual needs.
Besides, you are passing (nb_inorder_pkts - vq->vq_free_cnt)
as the `need`, here you can't subtract vq->vq_free_cnt to see
whether the needs have been met.
> +}
> +
> uint16_t
> virtio_xmit_pkts_inorder(void *tx_queue,
> struct rte_mbuf **tx_pkts,
> @@ -2161,8 +2176,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> struct virtqueue *vq = txvq->vq;
> struct virtio_hw *hw = vq->hw;
> uint16_t hdr_size = hw->vtnet_hdr_size;
> - uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
> + uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
> struct rte_mbuf *inorder_pkts[nb_pkts];
> + int need, nb_left;
>
> if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
> return nb_tx;
> @@ -2175,17 +2191,12 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> nb_used = VIRTQUEUE_NUSED(vq);
>
> virtio_rmb(hw->weak_barriers);
> - if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
> - virtio_xmit_cleanup_inorder(vq, nb_used);
> -
> - if (unlikely(!vq->vq_free_cnt))
> + if (likely(nb_used > (vq->vq_nentries - vq->vq_free_thresh)))
> virtio_xmit_cleanup_inorder(vq, nb_used);
>
> - nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
> -
> - for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
> + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
> struct rte_mbuf *txm = tx_pkts[nb_tx];
> - int slots, need;
> + int slots;
>
> /* optimize ring usage */
> if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> @@ -2203,6 +2214,22 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> }
>
> if (nb_inorder_pkts) {
> + need = nb_inorder_pkts - vq->vq_free_cnt;
> +
> +
There is no need to add blank lines here.
> + if (unlikely(need > 0)) {
> + nb_left = virtio_xmit_try_cleanup_inorder(vq,
> + need);
> +
> + if (unlikely(nb_left > 0)) {
> + PMD_TX_LOG(ERR,
> + "No free tx descriptors to "
> + "transmit");
> + nb_inorder_pkts = vq->vq_free_cnt;
You need to handle nb_tx as well.
> + break;
> + }
> + }
> +
> virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> nb_inorder_pkts);
> nb_inorder_pkts = 0;
> @@ -2211,15 +2238,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> slots = txm->nb_segs + 1;
> need = slots - vq->vq_free_cnt;
> if (unlikely(need > 0)) {
> - nb_used = VIRTQUEUE_NUSED(vq);
> - virtio_rmb(hw->weak_barriers);
> - need = RTE_MIN(need, (int)nb_used);
> + nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
>
> - virtio_xmit_cleanup_inorder(vq, need);
> -
> - need = slots - vq->vq_free_cnt;
> -
> - if (unlikely(need > 0)) {
> + if (unlikely(nb_left > 0)) {
> PMD_TX_LOG(ERR,
> "No free tx descriptors to transmit");
> break;
> @@ -2232,9 +2253,23 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> }
>
> /* Transmit all inorder packets */
> - if (nb_inorder_pkts)
> + if (nb_inorder_pkts) {
> + need = nb_inorder_pkts - vq->vq_free_cnt;
> +
> + if (unlikely(need > 0)) {
> + nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
> +
> + if (unlikely(nb_left > 0)) {
> + PMD_TX_LOG(ERR,
> + "No free tx descriptors to transmit");
> + nb_inorder_pkts = vq->vq_free_cnt;
> + nb_tx -= nb_left;
> + }
> + }
> +
> virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> nb_inorder_pkts);
> + }
>
> txvq->stats.packets += nb_tx;
>
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH 2/2] net/virtio: on demand cleanup when doing in order xmit
2019-09-10 6:16 ` Tiwei Bie
@ 2019-09-10 7:44 ` Liu, Yong
0 siblings, 0 replies; 18+ messages in thread
From: Liu, Yong @ 2019-09-10 7:44 UTC (permalink / raw)
To: Bie, Tiwei; +Cc: dev, maxime.coquelin, Wang, Zhihong
> -----Original Message-----
> From: Bie, Tiwei
> Sent: Tuesday, September 10, 2019 2:17 PM
> To: Liu, Yong <yong.liu@intel.com>
> Cc: dev@dpdk.org; maxime.coquelin@redhat.com; Wang, Zhihong
> <zhihong.wang@intel.com>
> Subject: Re: [PATCH 2/2] net/virtio: on demand cleanup when doing in order
> xmit
>
> On Tue, Aug 27, 2019 at 06:24:07PM +0800, Marvin Liu wrote:
> > Check whether freed descriptors are enough before enqueue operation.
> > If more space is needed, will try to cleanup used ring on demand. It
> > can give more chances to cleanup used ring, thus help RFC2544 perf.
> >
> > Signed-off-by: Marvin Liu <yong.liu@intel.com>
> > ---
> > drivers/net/virtio/virtio_rxtx.c | 73 +++++++++++++++++++++++---------
> > 1 file changed, 54 insertions(+), 19 deletions(-)
> >
> > diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> > index 5d4ed524e..550b0aa62 100644
> > --- a/drivers/net/virtio/virtio_rxtx.c
> > +++ b/drivers/net/virtio/virtio_rxtx.c
> > @@ -317,7 +317,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t
> num)
> > }
> >
> > /* Cleanup from completed inorder transmits. */
> > -static void
> > +static __rte_always_inline void
> > virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
> > {
> > uint16_t i, idx = vq->vq_used_cons_idx;
> > @@ -2152,6 +2152,21 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
> > return nb_tx;
> > }
> >
> > +static __rte_always_inline int
> > +virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
> > +{
> > + uint16_t nb_used;
> > + struct virtio_hw *hw = vq->hw;
> > +
> > + nb_used = VIRTQUEUE_NUSED(vq);
> > + virtio_rmb(hw->weak_barriers);
> > + need = RTE_MIN(need, (int)nb_used);
> > +
> > + virtio_xmit_cleanup_inorder(vq, need);
> > +
> > + return (need - vq->vq_free_cnt);
>
> It's possible that the `need` has been changed by
>
> need = RTE_MIN(need, (int)nb_used);
>
> So it can't reflect the actual needs.
>
> Besides, you are passing (nb_inorder_pkts - vq->vq_free_cnt)
> as the `need`, here you can't subtract vq->vq_free_cnt to see
> whether the needs have been met.
>
Tiwei,
Thanks for reminder, this calculation can't reflect the number of left packets. I will fix it in v2.
Regards,
Marvin
> > +}
> > +
> > uint16_t
> > virtio_xmit_pkts_inorder(void *tx_queue,
> > struct rte_mbuf **tx_pkts,
> > @@ -2161,8 +2176,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> > struct virtqueue *vq = txvq->vq;
> > struct virtio_hw *hw = vq->hw;
> > uint16_t hdr_size = hw->vtnet_hdr_size;
> > - uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
> > + uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
> > struct rte_mbuf *inorder_pkts[nb_pkts];
> > + int need, nb_left;
> >
> > if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
> > return nb_tx;
> > @@ -2175,17 +2191,12 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> > nb_used = VIRTQUEUE_NUSED(vq);
> >
> > virtio_rmb(hw->weak_barriers);
> > - if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
> > - virtio_xmit_cleanup_inorder(vq, nb_used);
> > -
> > - if (unlikely(!vq->vq_free_cnt))
> > + if (likely(nb_used > (vq->vq_nentries - vq->vq_free_thresh)))
> > virtio_xmit_cleanup_inorder(vq, nb_used);
> >
> > - nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
> > -
> > - for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
> > + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
> > struct rte_mbuf *txm = tx_pkts[nb_tx];
> > - int slots, need;
> > + int slots;
> >
> > /* optimize ring usage */
> > if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> > @@ -2203,6 +2214,22 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> > }
> >
> > if (nb_inorder_pkts) {
> > + need = nb_inorder_pkts - vq->vq_free_cnt;
> > +
> > +
>
> There is no need to add blank lines here.
>
> > + if (unlikely(need > 0)) {
> > + nb_left = virtio_xmit_try_cleanup_inorder(vq,
> > + need);
> > +
> > + if (unlikely(nb_left > 0)) {
> > + PMD_TX_LOG(ERR,
> > + "No free tx descriptors to "
> > + "transmit");
> > + nb_inorder_pkts = vq->vq_free_cnt;
>
> You need to handle nb_tx as well.
>
> > + break;
> > + }
> > + }
> > +
> > virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> > nb_inorder_pkts);
> > nb_inorder_pkts = 0;
> > @@ -2211,15 +2238,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> > slots = txm->nb_segs + 1;
> > need = slots - vq->vq_free_cnt;
> > if (unlikely(need > 0)) {
> > - nb_used = VIRTQUEUE_NUSED(vq);
> > - virtio_rmb(hw->weak_barriers);
> > - need = RTE_MIN(need, (int)nb_used);
> > + nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
> >
> > - virtio_xmit_cleanup_inorder(vq, need);
> > -
> > - need = slots - vq->vq_free_cnt;
> > -
> > - if (unlikely(need > 0)) {
> > + if (unlikely(nb_left > 0)) {
> > PMD_TX_LOG(ERR,
> > "No free tx descriptors to transmit");
> > break;
> > @@ -2232,9 +2253,23 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> > }
> >
> > /* Transmit all inorder packets */
> > - if (nb_inorder_pkts)
> > + if (nb_inorder_pkts) {
> > + need = nb_inorder_pkts - vq->vq_free_cnt;
> > +
> > + if (unlikely(need > 0)) {
> > + nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
> > +
> > + if (unlikely(nb_left > 0)) {
> > + PMD_TX_LOG(ERR,
> > + "No free tx descriptors to transmit");
> > + nb_inorder_pkts = vq->vq_free_cnt;
> > + nb_tx -= nb_left;
> > + }
> > + }
> > +
> > virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> > nb_inorder_pkts);
> > + }
> >
> > txvq->stats.packets += nb_tx;
> >
> > --
> > 2.17.1
> >
^ permalink raw reply [flat|nested] 18+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] net/virtio: update stats when in order xmit done
2019-08-27 10:24 [dpdk-dev] [PATCH 1/2] net/virtio: update stats when in order xmit done Marvin Liu
2019-08-27 10:24 ` [dpdk-dev] [PATCH 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
2019-09-10 5:45 ` [dpdk-dev] [PATCH 1/2] net/virtio: update stats when in order xmit done Tiwei Bie
@ 2019-09-10 16:14 ` Marvin Liu
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
` (2 more replies)
2 siblings, 3 replies; 18+ messages in thread
From: Marvin Liu @ 2019-09-10 16:14 UTC (permalink / raw)
To: maxime.coquelin, tiwei.bie; +Cc: dev, Marvin Liu
When doing xmit in-order enqueue, packets are buffered and then flushed
into avail ring. Buffered packets can be dropped due to insufficient
space. Moving stats update action just after successful avail ring
updates can guarantee correctness.
Signed-off-by: Marvin Liu <yong.liu@intel.com>
---
drivers/net/virtio/virtio_rxtx.c | 87 ++++++++++++++++----------------
1 file changed, 44 insertions(+), 43 deletions(-)
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 27ead19fb..d3ca36831 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -106,6 +106,48 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
dxp->next = VQ_RING_DESC_CHAIN_END;
}
+static inline void
+virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
+{
+ uint32_t s = mbuf->pkt_len;
+ struct rte_ether_addr *ea;
+
+ stats->bytes += s;
+
+ if (s == 64) {
+ stats->size_bins[1]++;
+ } else if (s > 64 && s < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+ stats->size_bins[bin]++;
+ } else {
+ if (s < 64)
+ stats->size_bins[0]++;
+ else if (s < 1519)
+ stats->size_bins[6]++;
+ else
+ stats->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
+ stats->broadcast++;
+ else
+ stats->multicast++;
+ }
+}
+
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+ VIRTIO_DUMP_PACKET(m, m->data_len);
+
+ virtio_update_packet_stats(&rxvq->stats, m);
+}
+
static uint16_t
virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
struct rte_mbuf **rx_pkts,
@@ -317,7 +359,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
}
/* Cleanup from completed inorder transmits. */
-static void
+static __rte_always_inline void
virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
{
uint16_t i, idx = vq->vq_used_cons_idx;
@@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
+ virtio_update_packet_stats(&txvq->stats, cookies[i]);
hdr = (struct virtio_net_hdr *)
rte_pktmbuf_prepend(cookies[i], head_size);
@@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
}
}
-static inline void
-virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
-{
- uint32_t s = mbuf->pkt_len;
- struct rte_ether_addr *ea;
-
- stats->bytes += s;
-
- if (s == 64) {
- stats->size_bins[1]++;
- } else if (s > 64 && s < 1024) {
- uint32_t bin;
-
- /* count zeros, and offset into correct bin */
- bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
- stats->size_bins[bin]++;
- } else {
- if (s < 64)
- stats->size_bins[0]++;
- else if (s < 1519)
- stats->size_bins[6]++;
- else
- stats->size_bins[7]++;
- }
-
- ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
- if (rte_is_multicast_ether_addr(ea)) {
- if (rte_is_broadcast_ether_addr(ea))
- stats->broadcast++;
- else
- stats->multicast++;
- }
-}
-
-static inline void
-virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
-{
- VIRTIO_DUMP_PACKET(m, m->data_len);
-
- virtio_update_packet_stats(&rxvq->stats, m);
-}
-
/* Optionally fill offload information in structure */
static inline int
virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
--
2.17.1
^ permalink raw reply [flat|nested] 18+ messages in thread
* [dpdk-dev] [PATCH v2 2/2] net/virtio: on demand cleanup when doing in order xmit
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 " Marvin Liu
@ 2019-09-10 16:14 ` Marvin Liu
2019-09-18 2:43 ` Tiwei Bie
2019-09-18 2:34 ` [dpdk-dev] [PATCH v2 1/2] net/virtio: update stats when in order xmit done Tiwei Bie
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 " Marvin Liu
2 siblings, 1 reply; 18+ messages in thread
From: Marvin Liu @ 2019-09-10 16:14 UTC (permalink / raw)
To: maxime.coquelin, tiwei.bie; +Cc: dev, Marvin Liu
Check whether space are enough before burst enqueue operation. If more
space is needed, will try to cleanup used descriptors for space on
demand. It can give more chances to free used descriptors, thus will
help RFC2544 performance.
Signed-off-by: Marvin Liu <yong.liu@intel.com>
---
drivers/net/virtio/virtio_rxtx.c | 73 +++++++++++++++++++++++---------
1 file changed, 54 insertions(+), 19 deletions(-)
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index d3ca36831..842b600c3 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -2152,6 +2152,22 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_tx;
}
+static __rte_always_inline int
+virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
+{
+ uint16_t nb_used, nb_clean, nb_descs;
+ struct virtio_hw *hw = vq->hw;
+
+ nb_descs = vq->vq_free_cnt + need;
+ nb_used = VIRTQUEUE_NUSED(vq);
+ virtio_rmb(hw->weak_barriers);
+ nb_clean = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup_inorder(vq, nb_clean);
+
+ return (nb_descs - vq->vq_free_cnt);
+}
+
uint16_t
virtio_xmit_pkts_inorder(void *tx_queue,
struct rte_mbuf **tx_pkts,
@@ -2161,8 +2177,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
struct virtqueue *vq = txvq->vq;
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
- uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+ uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
struct rte_mbuf *inorder_pkts[nb_pkts];
+ int need, nb_left;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
@@ -2175,17 +2192,12 @@ virtio_xmit_pkts_inorder(void *tx_queue,
nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb(hw->weak_barriers);
- if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
- virtio_xmit_cleanup_inorder(vq, nb_used);
-
- if (unlikely(!vq->vq_free_cnt))
+ if (likely(nb_used > (vq->vq_nentries - vq->vq_free_thresh)))
virtio_xmit_cleanup_inorder(vq, nb_used);
- nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
-
- for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *txm = tx_pkts[nb_tx];
- int slots, need;
+ int slots;
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
@@ -2199,11 +2211,25 @@ virtio_xmit_pkts_inorder(void *tx_queue,
inorder_pkts[nb_inorder_pkts] = txm;
nb_inorder_pkts++;
- virtio_update_packet_stats(&txvq->stats, txm);
continue;
}
if (nb_inorder_pkts) {
+ need = nb_inorder_pkts - vq->vq_free_cnt;
+
+ if (unlikely(need > 0)) {
+ nb_left = virtio_xmit_try_cleanup_inorder(vq,
+ need);
+
+ if (unlikely(nb_left > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to "
+ "transmit");
+ nb_inorder_pkts = vq->vq_free_cnt;
+ break;
+ }
+ }
+
virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
nb_inorder_pkts);
nb_inorder_pkts = 0;
@@ -2212,15 +2238,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
slots = txm->nb_segs + 1;
need = slots - vq->vq_free_cnt;
if (unlikely(need > 0)) {
- nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb(hw->weak_barriers);
- need = RTE_MIN(need, (int)nb_used);
-
- virtio_xmit_cleanup_inorder(vq, need);
+ nb_left = virtio_xmit_try_cleanup_inorder(vq, slots);
- need = slots - vq->vq_free_cnt;
-
- if (unlikely(need > 0)) {
+ if (unlikely(nb_left > 0)) {
PMD_TX_LOG(ERR,
"No free tx descriptors to transmit");
break;
@@ -2233,9 +2253,24 @@ virtio_xmit_pkts_inorder(void *tx_queue,
}
/* Transmit all inorder packets */
- if (nb_inorder_pkts)
+ if (nb_inorder_pkts) {
+ need = nb_inorder_pkts - vq->vq_free_cnt;
+
+ if (unlikely(need > 0)) {
+ nb_left = virtio_xmit_try_cleanup_inorder(vq,
+ need);
+
+ if (unlikely(nb_left > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ nb_inorder_pkts = vq->vq_free_cnt;
+ nb_tx -= nb_left;
+ }
+ }
+
virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
nb_inorder_pkts);
+ }
txvq->stats.packets += nb_tx;
--
2.17.1
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] net/virtio: update stats when in order xmit done
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 " Marvin Liu
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
@ 2019-09-18 2:34 ` Tiwei Bie
2019-09-18 3:19 ` Liu, Yong
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 " Marvin Liu
2 siblings, 1 reply; 18+ messages in thread
From: Tiwei Bie @ 2019-09-18 2:34 UTC (permalink / raw)
To: Marvin Liu; +Cc: maxime.coquelin, dev
On Wed, Sep 11, 2019 at 12:14:45AM +0800, Marvin Liu wrote:
> When doing xmit in-order enqueue, packets are buffered and then flushed
> into avail ring. Buffered packets can be dropped due to insufficient
> space. Moving stats update action just after successful avail ring
> updates can guarantee correctness.
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> ---
> drivers/net/virtio/virtio_rxtx.c | 87 ++++++++++++++++----------------
> 1 file changed, 44 insertions(+), 43 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 27ead19fb..d3ca36831 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -106,6 +106,48 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
> dxp->next = VQ_RING_DESC_CHAIN_END;
> }
>
> +static inline void
> +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
> +{
> + uint32_t s = mbuf->pkt_len;
> + struct rte_ether_addr *ea;
> +
> + stats->bytes += s;
> +
> + if (s == 64) {
> + stats->size_bins[1]++;
> + } else if (s > 64 && s < 1024) {
> + uint32_t bin;
> +
> + /* count zeros, and offset into correct bin */
> + bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> + stats->size_bins[bin]++;
> + } else {
> + if (s < 64)
> + stats->size_bins[0]++;
> + else if (s < 1519)
> + stats->size_bins[6]++;
> + else
> + stats->size_bins[7]++;
> + }
> +
> + ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> + if (rte_is_multicast_ether_addr(ea)) {
> + if (rte_is_broadcast_ether_addr(ea))
> + stats->broadcast++;
> + else
> + stats->multicast++;
> + }
> +}
> +
> +static inline void
> +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> +{
> + VIRTIO_DUMP_PACKET(m, m->data_len);
> +
> + virtio_update_packet_stats(&rxvq->stats, m);
> +}
> +
> static uint16_t
> virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
> struct rte_mbuf **rx_pkts,
> @@ -317,7 +359,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
> }
>
> /* Cleanup from completed inorder transmits. */
> -static void
> +static __rte_always_inline void
> virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
> {
> uint16_t i, idx = vq->vq_used_cons_idx;
> @@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
> dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
> dxp->cookie = (void *)cookies[i];
> dxp->ndescs = 1;
> + virtio_update_packet_stats(&txvq->stats, cookies[i]);
The virtio_update_packet_stats() call in virtio_xmit_pkts_inorder()
should be removed.
>
> hdr = (struct virtio_net_hdr *)
> rte_pktmbuf_prepend(cookies[i], head_size);
> @@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
> }
> }
>
> -static inline void
> -virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
> -{
> - uint32_t s = mbuf->pkt_len;
> - struct rte_ether_addr *ea;
> -
> - stats->bytes += s;
> -
> - if (s == 64) {
> - stats->size_bins[1]++;
> - } else if (s > 64 && s < 1024) {
> - uint32_t bin;
> -
> - /* count zeros, and offset into correct bin */
> - bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> - stats->size_bins[bin]++;
> - } else {
> - if (s < 64)
> - stats->size_bins[0]++;
> - else if (s < 1519)
> - stats->size_bins[6]++;
> - else
> - stats->size_bins[7]++;
> - }
> -
> - ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> - if (rte_is_multicast_ether_addr(ea)) {
> - if (rte_is_broadcast_ether_addr(ea))
> - stats->broadcast++;
> - else
> - stats->multicast++;
> - }
> -}
> -
> -static inline void
> -virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> -{
> - VIRTIO_DUMP_PACKET(m, m->data_len);
> -
> - virtio_update_packet_stats(&rxvq->stats, m);
> -}
> -
> /* Optionally fill offload information in structure */
> static inline int
> virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/2] net/virtio: on demand cleanup when doing in order xmit
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
@ 2019-09-18 2:43 ` Tiwei Bie
2019-09-18 3:23 ` Liu, Yong
0 siblings, 1 reply; 18+ messages in thread
From: Tiwei Bie @ 2019-09-18 2:43 UTC (permalink / raw)
To: Marvin Liu; +Cc: maxime.coquelin, dev
On Wed, Sep 11, 2019 at 12:14:46AM +0800, Marvin Liu wrote:
> Check whether space are enough before burst enqueue operation. If more
> space is needed, will try to cleanup used descriptors for space on
> demand. It can give more chances to free used descriptors, thus will
> help RFC2544 performance.
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> ---
> drivers/net/virtio/virtio_rxtx.c | 73 +++++++++++++++++++++++---------
> 1 file changed, 54 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index d3ca36831..842b600c3 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -2152,6 +2152,22 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> return nb_tx;
> }
>
> +static __rte_always_inline int
> +virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
> +{
> + uint16_t nb_used, nb_clean, nb_descs;
> + struct virtio_hw *hw = vq->hw;
> +
> + nb_descs = vq->vq_free_cnt + need;
> + nb_used = VIRTQUEUE_NUSED(vq);
> + virtio_rmb(hw->weak_barriers);
> + nb_clean = RTE_MIN(need, (int)nb_used);
> +
> + virtio_xmit_cleanup_inorder(vq, nb_clean);
> +
> + return (nb_descs - vq->vq_free_cnt);
> +}
> +
> uint16_t
> virtio_xmit_pkts_inorder(void *tx_queue,
> struct rte_mbuf **tx_pkts,
> @@ -2161,8 +2177,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> struct virtqueue *vq = txvq->vq;
> struct virtio_hw *hw = vq->hw;
> uint16_t hdr_size = hw->vtnet_hdr_size;
> - uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
> + uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
> struct rte_mbuf *inorder_pkts[nb_pkts];
> + int need, nb_left;
>
> if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
> return nb_tx;
> @@ -2175,17 +2192,12 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> nb_used = VIRTQUEUE_NUSED(vq);
>
> virtio_rmb(hw->weak_barriers);
> - if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
> - virtio_xmit_cleanup_inorder(vq, nb_used);
> -
> - if (unlikely(!vq->vq_free_cnt))
> + if (likely(nb_used > (vq->vq_nentries - vq->vq_free_thresh)))
> virtio_xmit_cleanup_inorder(vq, nb_used);
>
> - nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
> -
> - for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
> + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
> struct rte_mbuf *txm = tx_pkts[nb_tx];
> - int slots, need;
> + int slots;
>
> /* optimize ring usage */
> if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> @@ -2199,11 +2211,25 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> inorder_pkts[nb_inorder_pkts] = txm;
> nb_inorder_pkts++;
>
> - virtio_update_packet_stats(&txvq->stats, txm);
> continue;
> }
>
> if (nb_inorder_pkts) {
> + need = nb_inorder_pkts - vq->vq_free_cnt;
> +
> + if (unlikely(need > 0)) {
> + nb_left = virtio_xmit_try_cleanup_inorder(vq,
> + need);
There is no need to introduce `nb_left`. Looks better
to just reuse `need`.
> +
> + if (unlikely(nb_left > 0)) {
> + PMD_TX_LOG(ERR,
> + "No free tx descriptors to "
> + "transmit");
> + nb_inorder_pkts = vq->vq_free_cnt;
You need to handle nb_tx as well, otherwise mbufs will leak.
Or maybe just leave nb_inorder_pkts unchanged, and let the code
outside the loop handle it.
> + break;
> + }
> + }
> +
> virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> nb_inorder_pkts);
> nb_inorder_pkts = 0;
> @@ -2212,15 +2238,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> slots = txm->nb_segs + 1;
> need = slots - vq->vq_free_cnt;
> if (unlikely(need > 0)) {
> - nb_used = VIRTQUEUE_NUSED(vq);
> - virtio_rmb(hw->weak_barriers);
> - need = RTE_MIN(need, (int)nb_used);
> -
> - virtio_xmit_cleanup_inorder(vq, need);
> + nb_left = virtio_xmit_try_cleanup_inorder(vq, slots);
>
> - need = slots - vq->vq_free_cnt;
> -
> - if (unlikely(need > 0)) {
> + if (unlikely(nb_left > 0)) {
> PMD_TX_LOG(ERR,
> "No free tx descriptors to transmit");
> break;
> @@ -2233,9 +2253,24 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> }
>
> /* Transmit all inorder packets */
> - if (nb_inorder_pkts)
> + if (nb_inorder_pkts) {
> + need = nb_inorder_pkts - vq->vq_free_cnt;
> +
> + if (unlikely(need > 0)) {
> + nb_left = virtio_xmit_try_cleanup_inorder(vq,
> + need);
> +
> + if (unlikely(nb_left > 0)) {
> + PMD_TX_LOG(ERR,
> + "No free tx descriptors to transmit");
> + nb_inorder_pkts = vq->vq_free_cnt;
> + nb_tx -= nb_left;
> + }
> + }
> +
> virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> nb_inorder_pkts);
> + }
>
> txvq->stats.packets += nb_tx;
>
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] net/virtio: update stats when in order xmit done
2019-09-18 2:34 ` [dpdk-dev] [PATCH v2 1/2] net/virtio: update stats when in order xmit done Tiwei Bie
@ 2019-09-18 3:19 ` Liu, Yong
2019-09-18 4:18 ` Tiwei Bie
0 siblings, 1 reply; 18+ messages in thread
From: Liu, Yong @ 2019-09-18 3:19 UTC (permalink / raw)
To: Bie, Tiwei; +Cc: maxime.coquelin, dev
> -----Original Message-----
> From: Bie, Tiwei
> Sent: Wednesday, September 18, 2019 10:35 AM
> To: Liu, Yong <yong.liu@intel.com>
> Cc: maxime.coquelin@redhat.com; dev@dpdk.org
> Subject: Re: [PATCH v2 1/2] net/virtio: update stats when in order xmit
> done
>
> On Wed, Sep 11, 2019 at 12:14:45AM +0800, Marvin Liu wrote:
> > When doing xmit in-order enqueue, packets are buffered and then flushed
> > into avail ring. Buffered packets can be dropped due to insufficient
> > space. Moving stats update action just after successful avail ring
> > updates can guarantee correctness.
> >
> > Signed-off-by: Marvin Liu <yong.liu@intel.com>
> > ---
> > drivers/net/virtio/virtio_rxtx.c | 87 ++++++++++++++++----------------
> > 1 file changed, 44 insertions(+), 43 deletions(-)
> >
> > diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> > index 27ead19fb..d3ca36831 100644
> > --- a/drivers/net/virtio/virtio_rxtx.c
> > +++ b/drivers/net/virtio/virtio_rxtx.c
> > @@ -106,6 +106,48 @@ vq_ring_free_id_packed(struct virtqueue *vq,
> uint16_t id)
> > dxp->next = VQ_RING_DESC_CHAIN_END;
> > }
> >
> > +static inline void
> > +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf
> *mbuf)
> > +{
> > + uint32_t s = mbuf->pkt_len;
> > + struct rte_ether_addr *ea;
> > +
> > + stats->bytes += s;
> > +
> > + if (s == 64) {
> > + stats->size_bins[1]++;
> > + } else if (s > 64 && s < 1024) {
> > + uint32_t bin;
> > +
> > + /* count zeros, and offset into correct bin */
> > + bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> > + stats->size_bins[bin]++;
> > + } else {
> > + if (s < 64)
> > + stats->size_bins[0]++;
> > + else if (s < 1519)
> > + stats->size_bins[6]++;
> > + else
> > + stats->size_bins[7]++;
> > + }
> > +
> > + ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> > + if (rte_is_multicast_ether_addr(ea)) {
> > + if (rte_is_broadcast_ether_addr(ea))
> > + stats->broadcast++;
> > + else
> > + stats->multicast++;
> > + }
> > +}
> > +
> > +static inline void
> > +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> > +{
> > + VIRTIO_DUMP_PACKET(m, m->data_len);
> > +
> > + virtio_update_packet_stats(&rxvq->stats, m);
> > +}
> > +
> > static uint16_t
> > virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
> > struct rte_mbuf **rx_pkts,
> > @@ -317,7 +359,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t
> num)
> > }
> >
> > /* Cleanup from completed inorder transmits. */
> > -static void
> > +static __rte_always_inline void
> > virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
> > {
> > uint16_t i, idx = vq->vq_used_cons_idx;
> > @@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx
> *txvq,
> > dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
> > dxp->cookie = (void *)cookies[i];
> > dxp->ndescs = 1;
> > + virtio_update_packet_stats(&txvq->stats, cookies[i]);
>
> The virtio_update_packet_stats() call in virtio_xmit_pkts_inorder()
> should be removed.
>
Hi Tiwei,
Function remained in virtio_xmit_pkts_inorder is for those packets not handled by burst enqueue function.
Statistic of packets which handled in burst in_order enqueue function is updated in inner loop.
Thanks,
Marvin
>
> >
> > hdr = (struct virtio_net_hdr *)
> > rte_pktmbuf_prepend(cookies[i], head_size);
> > @@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq,
> struct rte_mbuf *m)
> > }
> > }
> >
> > -static inline void
> > -virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf
> *mbuf)
> > -{
> > - uint32_t s = mbuf->pkt_len;
> > - struct rte_ether_addr *ea;
> > -
> > - stats->bytes += s;
> > -
> > - if (s == 64) {
> > - stats->size_bins[1]++;
> > - } else if (s > 64 && s < 1024) {
> > - uint32_t bin;
> > -
> > - /* count zeros, and offset into correct bin */
> > - bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> > - stats->size_bins[bin]++;
> > - } else {
> > - if (s < 64)
> > - stats->size_bins[0]++;
> > - else if (s < 1519)
> > - stats->size_bins[6]++;
> > - else
> > - stats->size_bins[7]++;
> > - }
> > -
> > - ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> > - if (rte_is_multicast_ether_addr(ea)) {
> > - if (rte_is_broadcast_ether_addr(ea))
> > - stats->broadcast++;
> > - else
> > - stats->multicast++;
> > - }
> > -}
> > -
> > -static inline void
> > -virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> > -{
> > - VIRTIO_DUMP_PACKET(m, m->data_len);
> > -
> > - virtio_update_packet_stats(&rxvq->stats, m);
> > -}
> > -
> > /* Optionally fill offload information in structure */
> > static inline int
> > virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
> > --
> > 2.17.1
> >
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/2] net/virtio: on demand cleanup when doing in order xmit
2019-09-18 2:43 ` Tiwei Bie
@ 2019-09-18 3:23 ` Liu, Yong
0 siblings, 0 replies; 18+ messages in thread
From: Liu, Yong @ 2019-09-18 3:23 UTC (permalink / raw)
To: Bie, Tiwei; +Cc: maxime.coquelin, dev
Thanks for comments, will update in next patch.
> -----Original Message-----
> From: Bie, Tiwei
> Sent: Wednesday, September 18, 2019 10:44 AM
> To: Liu, Yong <yong.liu@intel.com>
> Cc: maxime.coquelin@redhat.com; dev@dpdk.org
> Subject: Re: [PATCH v2 2/2] net/virtio: on demand cleanup when doing in
> order xmit
>
> On Wed, Sep 11, 2019 at 12:14:46AM +0800, Marvin Liu wrote:
> > Check whether space are enough before burst enqueue operation. If more
> > space is needed, will try to cleanup used descriptors for space on
> > demand. It can give more chances to free used descriptors, thus will
> > help RFC2544 performance.
> >
> > Signed-off-by: Marvin Liu <yong.liu@intel.com>
> > ---
> > drivers/net/virtio/virtio_rxtx.c | 73 +++++++++++++++++++++++---------
> > 1 file changed, 54 insertions(+), 19 deletions(-)
> >
> > diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> > index d3ca36831..842b600c3 100644
> > --- a/drivers/net/virtio/virtio_rxtx.c
> > +++ b/drivers/net/virtio/virtio_rxtx.c
> > @@ -2152,6 +2152,22 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
> > return nb_tx;
> > }
> >
> > +static __rte_always_inline int
> > +virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
> > +{
> > + uint16_t nb_used, nb_clean, nb_descs;
> > + struct virtio_hw *hw = vq->hw;
> > +
> > + nb_descs = vq->vq_free_cnt + need;
> > + nb_used = VIRTQUEUE_NUSED(vq);
> > + virtio_rmb(hw->weak_barriers);
> > + nb_clean = RTE_MIN(need, (int)nb_used);
> > +
> > + virtio_xmit_cleanup_inorder(vq, nb_clean);
> > +
> > + return (nb_descs - vq->vq_free_cnt);
> > +}
> > +
> > uint16_t
> > virtio_xmit_pkts_inorder(void *tx_queue,
> > struct rte_mbuf **tx_pkts,
> > @@ -2161,8 +2177,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> > struct virtqueue *vq = txvq->vq;
> > struct virtio_hw *hw = vq->hw;
> > uint16_t hdr_size = hw->vtnet_hdr_size;
> > - uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
> > + uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
> > struct rte_mbuf *inorder_pkts[nb_pkts];
> > + int need, nb_left;
> >
> > if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
> > return nb_tx;
> > @@ -2175,17 +2192,12 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> > nb_used = VIRTQUEUE_NUSED(vq);
> >
> > virtio_rmb(hw->weak_barriers);
> > - if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
> > - virtio_xmit_cleanup_inorder(vq, nb_used);
> > -
> > - if (unlikely(!vq->vq_free_cnt))
> > + if (likely(nb_used > (vq->vq_nentries - vq->vq_free_thresh)))
> > virtio_xmit_cleanup_inorder(vq, nb_used);
> >
> > - nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
> > -
> > - for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
> > + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
> > struct rte_mbuf *txm = tx_pkts[nb_tx];
> > - int slots, need;
> > + int slots;
> >
> > /* optimize ring usage */
> > if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> > @@ -2199,11 +2211,25 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> > inorder_pkts[nb_inorder_pkts] = txm;
> > nb_inorder_pkts++;
> >
> > - virtio_update_packet_stats(&txvq->stats, txm);
> > continue;
> > }
> >
> > if (nb_inorder_pkts) {
> > + need = nb_inorder_pkts - vq->vq_free_cnt;
> > +
> > + if (unlikely(need > 0)) {
> > + nb_left = virtio_xmit_try_cleanup_inorder(vq,
> > + need);
>
> There is no need to introduce `nb_left`. Looks better
> to just reuse `need`.
>
> > +
> > + if (unlikely(nb_left > 0)) {
> > + PMD_TX_LOG(ERR,
> > + "No free tx descriptors to "
> > + "transmit");
> > + nb_inorder_pkts = vq->vq_free_cnt;
>
> You need to handle nb_tx as well, otherwise mbufs will leak.
> Or maybe just leave nb_inorder_pkts unchanged, and let the code
> outside the loop handle it.
>
>
> > + break;
> > + }
> > + }
> > +
> > virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> > nb_inorder_pkts);
> > nb_inorder_pkts = 0;
> > @@ -2212,15 +2238,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> > slots = txm->nb_segs + 1;
> > need = slots - vq->vq_free_cnt;
> > if (unlikely(need > 0)) {
> > - nb_used = VIRTQUEUE_NUSED(vq);
> > - virtio_rmb(hw->weak_barriers);
> > - need = RTE_MIN(need, (int)nb_used);
> > -
> > - virtio_xmit_cleanup_inorder(vq, need);
> > + nb_left = virtio_xmit_try_cleanup_inorder(vq, slots);
> >
> > - need = slots - vq->vq_free_cnt;
> > -
> > - if (unlikely(need > 0)) {
> > + if (unlikely(nb_left > 0)) {
> > PMD_TX_LOG(ERR,
> > "No free tx descriptors to transmit");
> > break;
> > @@ -2233,9 +2253,24 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> > }
> >
> > /* Transmit all inorder packets */
> > - if (nb_inorder_pkts)
> > + if (nb_inorder_pkts) {
> > + need = nb_inorder_pkts - vq->vq_free_cnt;
> > +
> > + if (unlikely(need > 0)) {
> > + nb_left = virtio_xmit_try_cleanup_inorder(vq,
> > + need);
> > +
> > + if (unlikely(nb_left > 0)) {
> > + PMD_TX_LOG(ERR,
> > + "No free tx descriptors to transmit");
> > + nb_inorder_pkts = vq->vq_free_cnt;
> > + nb_tx -= nb_left;
> > + }
> > + }
> > +
> > virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> > nb_inorder_pkts);
> > + }
> >
> > txvq->stats.packets += nb_tx;
> >
> > --
> > 2.17.1
> >
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] net/virtio: update stats when in order xmit done
2019-09-18 3:19 ` Liu, Yong
@ 2019-09-18 4:18 ` Tiwei Bie
0 siblings, 0 replies; 18+ messages in thread
From: Tiwei Bie @ 2019-09-18 4:18 UTC (permalink / raw)
To: Liu, Yong; +Cc: maxime.coquelin, dev
On Wed, Sep 18, 2019 at 11:19:03AM +0800, Liu, Yong wrote:
> > -----Original Message-----
> > From: Bie, Tiwei
> > Sent: Wednesday, September 18, 2019 10:35 AM
> > To: Liu, Yong <yong.liu@intel.com>
> > Cc: maxime.coquelin@redhat.com; dev@dpdk.org
> > Subject: Re: [PATCH v2 1/2] net/virtio: update stats when in order xmit
> > done
> >
> > On Wed, Sep 11, 2019 at 12:14:45AM +0800, Marvin Liu wrote:
> > > When doing xmit in-order enqueue, packets are buffered and then flushed
> > > into avail ring. Buffered packets can be dropped due to insufficient
> > > space. Moving stats update action just after successful avail ring
> > > updates can guarantee correctness.
> > >
> > > Signed-off-by: Marvin Liu <yong.liu@intel.com>
> > > ---
> > > drivers/net/virtio/virtio_rxtx.c | 87 ++++++++++++++++----------------
> > > 1 file changed, 44 insertions(+), 43 deletions(-)
> > >
> > > diff --git a/drivers/net/virtio/virtio_rxtx.c
> > b/drivers/net/virtio/virtio_rxtx.c
> > > index 27ead19fb..d3ca36831 100644
> > > --- a/drivers/net/virtio/virtio_rxtx.c
> > > +++ b/drivers/net/virtio/virtio_rxtx.c
> > > @@ -106,6 +106,48 @@ vq_ring_free_id_packed(struct virtqueue *vq,
> > uint16_t id)
> > > dxp->next = VQ_RING_DESC_CHAIN_END;
> > > }
> > >
> > > +static inline void
> > > +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf
> > *mbuf)
> > > +{
> > > + uint32_t s = mbuf->pkt_len;
> > > + struct rte_ether_addr *ea;
> > > +
> > > + stats->bytes += s;
> > > +
> > > + if (s == 64) {
> > > + stats->size_bins[1]++;
> > > + } else if (s > 64 && s < 1024) {
> > > + uint32_t bin;
> > > +
> > > + /* count zeros, and offset into correct bin */
> > > + bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> > > + stats->size_bins[bin]++;
> > > + } else {
> > > + if (s < 64)
> > > + stats->size_bins[0]++;
> > > + else if (s < 1519)
> > > + stats->size_bins[6]++;
> > > + else
> > > + stats->size_bins[7]++;
> > > + }
> > > +
> > > + ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> > > + if (rte_is_multicast_ether_addr(ea)) {
> > > + if (rte_is_broadcast_ether_addr(ea))
> > > + stats->broadcast++;
> > > + else
> > > + stats->multicast++;
> > > + }
> > > +}
> > > +
> > > +static inline void
> > > +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> > > +{
> > > + VIRTIO_DUMP_PACKET(m, m->data_len);
> > > +
> > > + virtio_update_packet_stats(&rxvq->stats, m);
> > > +}
> > > +
> > > static uint16_t
> > > virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
> > > struct rte_mbuf **rx_pkts,
> > > @@ -317,7 +359,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t
> > num)
> > > }
> > >
> > > /* Cleanup from completed inorder transmits. */
> > > -static void
> > > +static __rte_always_inline void
> > > virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
> > > {
> > > uint16_t i, idx = vq->vq_used_cons_idx;
> > > @@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx
> > *txvq,
> > > dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
> > > dxp->cookie = (void *)cookies[i];
> > > dxp->ndescs = 1;
> > > + virtio_update_packet_stats(&txvq->stats, cookies[i]);
> >
> > The virtio_update_packet_stats() call in virtio_xmit_pkts_inorder()
> > should be removed.
> >
>
> Hi Tiwei,
> Function remained in virtio_xmit_pkts_inorder is for those packets not handled by burst enqueue function.
> Statistic of packets which handled in burst in_order enqueue function is updated in inner loop.
I mean below virtio_update_packet_stats() call in
virtio_xmit_pkts_inorder() should be removed while
doing above change:
https://github.com/DPDK/dpdk/blob/master/drivers/net/virtio/virtio_rxtx.c#L2201
I saw above line is removed by PATCH v2 2/2, but it
should be done in this patch.
>
> Thanks,
> Marvin
>
> >
> > >
> > > hdr = (struct virtio_net_hdr *)
> > > rte_pktmbuf_prepend(cookies[i], head_size);
> > > @@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq,
> > struct rte_mbuf *m)
> > > }
> > > }
> > >
> > > -static inline void
> > > -virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf
> > *mbuf)
> > > -{
> > > - uint32_t s = mbuf->pkt_len;
> > > - struct rte_ether_addr *ea;
> > > -
> > > - stats->bytes += s;
> > > -
> > > - if (s == 64) {
> > > - stats->size_bins[1]++;
> > > - } else if (s > 64 && s < 1024) {
> > > - uint32_t bin;
> > > -
> > > - /* count zeros, and offset into correct bin */
> > > - bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
> > > - stats->size_bins[bin]++;
> > > - } else {
> > > - if (s < 64)
> > > - stats->size_bins[0]++;
> > > - else if (s < 1519)
> > > - stats->size_bins[6]++;
> > > - else
> > > - stats->size_bins[7]++;
> > > - }
> > > -
> > > - ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
> > > - if (rte_is_multicast_ether_addr(ea)) {
> > > - if (rte_is_broadcast_ether_addr(ea))
> > > - stats->broadcast++;
> > > - else
> > > - stats->multicast++;
> > > - }
> > > -}
> > > -
> > > -static inline void
> > > -virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
> > > -{
> > > - VIRTIO_DUMP_PACKET(m, m->data_len);
> > > -
> > > - virtio_update_packet_stats(&rxvq->stats, m);
> > > -}
> > > -
> > > /* Optionally fill offload information in structure */
> > > static inline int
> > > virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
> > > --
> > > 2.17.1
> > >
^ permalink raw reply [flat|nested] 18+ messages in thread
* [dpdk-dev] [PATCH v3 1/2] net/virtio: update stats when in order xmit done
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 " Marvin Liu
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
2019-09-18 2:34 ` [dpdk-dev] [PATCH v2 1/2] net/virtio: update stats when in order xmit done Tiwei Bie
@ 2019-09-18 17:06 ` Marvin Liu
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
` (2 more replies)
2 siblings, 3 replies; 18+ messages in thread
From: Marvin Liu @ 2019-09-18 17:06 UTC (permalink / raw)
To: maxime.coquelin, tiwei.bie, zhihong.wang; +Cc: dev, stable, Marvin Liu
When doing xmit in-order enqueue, packets are buffered and then flushed
into avail ring. Buffered packets can be dropped due to insufficient
space. Moving stats update action just after successful avail ring
updates can guarantee correctness.
Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
Cc: stable@dpdk.org
Signed-off-by: Marvin Liu <yong.liu@intel.com>
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 27ead19fb..91df5b1d0 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -106,6 +106,48 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
dxp->next = VQ_RING_DESC_CHAIN_END;
}
+static inline void
+virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
+{
+ uint32_t s = mbuf->pkt_len;
+ struct rte_ether_addr *ea;
+
+ stats->bytes += s;
+
+ if (s == 64) {
+ stats->size_bins[1]++;
+ } else if (s > 64 && s < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+ stats->size_bins[bin]++;
+ } else {
+ if (s < 64)
+ stats->size_bins[0]++;
+ else if (s < 1519)
+ stats->size_bins[6]++;
+ else
+ stats->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
+ stats->broadcast++;
+ else
+ stats->multicast++;
+ }
+}
+
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+ VIRTIO_DUMP_PACKET(m, m->data_len);
+
+ virtio_update_packet_stats(&rxvq->stats, m);
+}
+
static uint16_t
virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
struct rte_mbuf **rx_pkts,
@@ -317,7 +359,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
}
/* Cleanup from completed inorder transmits. */
-static void
+static __rte_always_inline void
virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
{
uint16_t i, idx = vq->vq_used_cons_idx;
@@ -596,6 +638,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
+ virtio_update_packet_stats(&txvq->stats, cookies[i]);
hdr = (struct virtio_net_hdr *)
rte_pktmbuf_prepend(cookies[i], head_size);
@@ -1083,48 +1126,6 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
}
}
-static inline void
-virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
-{
- uint32_t s = mbuf->pkt_len;
- struct rte_ether_addr *ea;
-
- stats->bytes += s;
-
- if (s == 64) {
- stats->size_bins[1]++;
- } else if (s > 64 && s < 1024) {
- uint32_t bin;
-
- /* count zeros, and offset into correct bin */
- bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
- stats->size_bins[bin]++;
- } else {
- if (s < 64)
- stats->size_bins[0]++;
- else if (s < 1519)
- stats->size_bins[6]++;
- else
- stats->size_bins[7]++;
- }
-
- ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
- if (rte_is_multicast_ether_addr(ea)) {
- if (rte_is_broadcast_ether_addr(ea))
- stats->broadcast++;
- else
- stats->multicast++;
- }
-}
-
-static inline void
-virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
-{
- VIRTIO_DUMP_PACKET(m, m->data_len);
-
- virtio_update_packet_stats(&rxvq->stats, m);
-}
-
/* Optionally fill offload information in structure */
static inline int
virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
@@ -2198,7 +2199,6 @@ virtio_xmit_pkts_inorder(void *tx_queue,
inorder_pkts[nb_inorder_pkts] = txm;
nb_inorder_pkts++;
- virtio_update_packet_stats(&txvq->stats, txm);
continue;
}
--
2.17.1
^ permalink raw reply [flat|nested] 18+ messages in thread
* [dpdk-dev] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 " Marvin Liu
@ 2019-09-18 17:06 ` Marvin Liu
2019-09-27 9:02 ` Maxime Coquelin
2019-09-27 9:49 ` Maxime Coquelin
2019-09-27 9:02 ` [dpdk-dev] [PATCH v3 1/2] net/virtio: update stats when in order xmit done Maxime Coquelin
2019-09-27 9:49 ` Maxime Coquelin
2 siblings, 2 replies; 18+ messages in thread
From: Marvin Liu @ 2019-09-18 17:06 UTC (permalink / raw)
To: maxime.coquelin, tiwei.bie, zhihong.wang; +Cc: dev, stable, Marvin Liu
Check whether space are enough before burst enqueue operation. If more
space is needed, will try to clean up used descriptors for space on
demand. It can give more chances to free used descriptors, thus will
help RFC2544 performance. Also deduct failed xmit packets from total
xmit number.
Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
Cc: stable@dpdk.org
Signed-off-by: Marvin Liu <yong.liu@intel.com>
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 91df5b1d0..7d5c60532 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -2152,6 +2152,22 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_tx;
}
+static __rte_always_inline int
+virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
+{
+ uint16_t nb_used, nb_clean, nb_descs;
+ struct virtio_hw *hw = vq->hw;
+
+ nb_descs = vq->vq_free_cnt + need;
+ nb_used = VIRTQUEUE_NUSED(vq);
+ virtio_rmb(hw->weak_barriers);
+ nb_clean = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup_inorder(vq, nb_clean);
+
+ return (nb_descs - vq->vq_free_cnt);
+}
+
uint16_t
virtio_xmit_pkts_inorder(void *tx_queue,
struct rte_mbuf **tx_pkts,
@@ -2161,8 +2177,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
struct virtqueue *vq = txvq->vq;
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
- uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+ uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
struct rte_mbuf *inorder_pkts[nb_pkts];
+ int need;
if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
return nb_tx;
@@ -2178,14 +2195,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
virtio_xmit_cleanup_inorder(vq, nb_used);
- if (unlikely(!vq->vq_free_cnt))
- virtio_xmit_cleanup_inorder(vq, nb_used);
-
- nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
-
- for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *txm = tx_pkts[nb_tx];
- int slots, need;
+ int slots;
/* optimize ring usage */
if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
@@ -2203,6 +2215,17 @@ virtio_xmit_pkts_inorder(void *tx_queue,
}
if (nb_inorder_pkts) {
+ need = nb_inorder_pkts - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ need = virtio_xmit_try_cleanup_inorder(vq,
+ need);
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to "
+ "transmit");
+ break;
+ }
+ }
virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
nb_inorder_pkts);
nb_inorder_pkts = 0;
@@ -2211,13 +2234,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
slots = txm->nb_segs + 1;
need = slots - vq->vq_free_cnt;
if (unlikely(need > 0)) {
- nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb(hw->weak_barriers);
- need = RTE_MIN(need, (int)nb_used);
-
- virtio_xmit_cleanup_inorder(vq, need);
-
- need = slots - vq->vq_free_cnt;
+ need = virtio_xmit_try_cleanup_inorder(vq, slots);
if (unlikely(need > 0)) {
PMD_TX_LOG(ERR,
@@ -2232,9 +2249,22 @@ virtio_xmit_pkts_inorder(void *tx_queue,
}
/* Transmit all inorder packets */
- if (nb_inorder_pkts)
+ if (nb_inorder_pkts) {
+ need = nb_inorder_pkts - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ need = virtio_xmit_try_cleanup_inorder(vq,
+ need);
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ nb_inorder_pkts = vq->vq_free_cnt;
+ nb_tx -= need;
+ }
+ }
+
virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
nb_inorder_pkts);
+ }
txvq->stats.packets += nb_tx;
--
2.17.1
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/virtio: update stats when in order xmit done
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 " Marvin Liu
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
@ 2019-09-27 9:02 ` Maxime Coquelin
2019-09-27 9:49 ` Maxime Coquelin
2 siblings, 0 replies; 18+ messages in thread
From: Maxime Coquelin @ 2019-09-27 9:02 UTC (permalink / raw)
To: Marvin Liu, tiwei.bie, zhihong.wang; +Cc: dev, stable
On 9/18/19 7:06 PM, Marvin Liu wrote:
> When doing xmit in-order enqueue, packets are buffered and then flushed
> into avail ring. Buffered packets can be dropped due to insufficient
> space. Moving stats update action just after successful avail ring
> updates can guarantee correctness.
>
> Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
> Cc: stable@dpdk.org
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
@ 2019-09-27 9:02 ` Maxime Coquelin
2019-09-27 9:49 ` Maxime Coquelin
1 sibling, 0 replies; 18+ messages in thread
From: Maxime Coquelin @ 2019-09-27 9:02 UTC (permalink / raw)
To: Marvin Liu, tiwei.bie, zhihong.wang; +Cc: dev, stable
On 9/18/19 7:06 PM, Marvin Liu wrote:
> Check whether space are enough before burst enqueue operation. If more
> space is needed, will try to clean up used descriptors for space on
> demand. It can give more chances to free used descriptors, thus will
> help RFC2544 performance. Also deduct failed xmit packets from total
> xmit number.
>
> Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
> Cc: stable@dpdk.org
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/virtio: update stats when in order xmit done
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 " Marvin Liu
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
2019-09-27 9:02 ` [dpdk-dev] [PATCH v3 1/2] net/virtio: update stats when in order xmit done Maxime Coquelin
@ 2019-09-27 9:49 ` Maxime Coquelin
2 siblings, 0 replies; 18+ messages in thread
From: Maxime Coquelin @ 2019-09-27 9:49 UTC (permalink / raw)
To: Marvin Liu, tiwei.bie, zhihong.wang; +Cc: dev, stable
On 9/18/19 7:06 PM, Marvin Liu wrote:
> When doing xmit in-order enqueue, packets are buffered and then flushed
> into avail ring. Buffered packets can be dropped due to insufficient
> space. Moving stats update action just after successful avail ring
> updates can guarantee correctness.
>
> Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
> Cc: stable@dpdk.org
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
>
Applied to dpdk-next-virtio/master.
Thanks,
Maxime
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
2019-09-27 9:02 ` Maxime Coquelin
@ 2019-09-27 9:49 ` Maxime Coquelin
1 sibling, 0 replies; 18+ messages in thread
From: Maxime Coquelin @ 2019-09-27 9:49 UTC (permalink / raw)
To: Marvin Liu, tiwei.bie, zhihong.wang; +Cc: dev, stable
On 9/18/19 7:06 PM, Marvin Liu wrote:
> Check whether space are enough before burst enqueue operation. If more
> space is needed, will try to clean up used descriptors for space on
> demand. It can give more chances to free used descriptors, thus will
> help RFC2544 performance. Also deduct failed xmit packets from total
> xmit number.
>
> Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
> Cc: stable@dpdk.org
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
Applied to dpdk-next-virtio/master.
Thanks,
Maxime
^ permalink raw reply [flat|nested] 18+ messages in thread
end of thread, other threads:[~2019-09-27 9:49 UTC | newest]
Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-27 10:24 [dpdk-dev] [PATCH 1/2] net/virtio: update stats when in order xmit done Marvin Liu
2019-08-27 10:24 ` [dpdk-dev] [PATCH 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
2019-09-10 6:16 ` Tiwei Bie
2019-09-10 7:44 ` Liu, Yong
2019-09-10 5:45 ` [dpdk-dev] [PATCH 1/2] net/virtio: update stats when in order xmit done Tiwei Bie
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 " Marvin Liu
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
2019-09-18 2:43 ` Tiwei Bie
2019-09-18 3:23 ` Liu, Yong
2019-09-18 2:34 ` [dpdk-dev] [PATCH v2 1/2] net/virtio: update stats when in order xmit done Tiwei Bie
2019-09-18 3:19 ` Liu, Yong
2019-09-18 4:18 ` Tiwei Bie
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 " Marvin Liu
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
2019-09-27 9:02 ` Maxime Coquelin
2019-09-27 9:49 ` Maxime Coquelin
2019-09-27 9:02 ` [dpdk-dev] [PATCH v3 1/2] net/virtio: update stats when in order xmit done Maxime Coquelin
2019-09-27 9:49 ` Maxime Coquelin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).