From: Tiwei Bie <tiwei.bie@intel.com>
To: Marvin Liu <yong.liu@intel.com>
Cc: dev@dpdk.org, maxime.coquelin@redhat.com, zhihong.wang@intel.com
Subject: Re: [dpdk-dev] [PATCH 2/2] net/virtio: on demand cleanup when doing in order xmit
Date: Tue, 10 Sep 2019 14:16:47 +0800 [thread overview]
Message-ID: <20190910061647.GA13119@___> (raw)
In-Reply-To: <20190827102407.65106-2-yong.liu@intel.com>
On Tue, Aug 27, 2019 at 06:24:07PM +0800, Marvin Liu wrote:
> Check whether freed descriptors are enough before enqueue operation.
> If more space is needed, will try to cleanup used ring on demand. It
> can give more chances to cleanup used ring, thus help RFC2544 perf.
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> ---
> drivers/net/virtio/virtio_rxtx.c | 73 +++++++++++++++++++++++---------
> 1 file changed, 54 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 5d4ed524e..550b0aa62 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -317,7 +317,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
> }
>
> /* Cleanup from completed inorder transmits. */
> -static void
> +static __rte_always_inline void
> virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
> {
> uint16_t i, idx = vq->vq_used_cons_idx;
> @@ -2152,6 +2152,21 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> return nb_tx;
> }
>
> +static __rte_always_inline int
> +virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
> +{
> + uint16_t nb_used;
> + struct virtio_hw *hw = vq->hw;
> +
> + nb_used = VIRTQUEUE_NUSED(vq);
> + virtio_rmb(hw->weak_barriers);
> + need = RTE_MIN(need, (int)nb_used);
> +
> + virtio_xmit_cleanup_inorder(vq, need);
> +
> + return (need - vq->vq_free_cnt);
It's possible that the `need` has been changed by
need = RTE_MIN(need, (int)nb_used);
So it can't reflect the actual needs.
Besides, you are passing (nb_inorder_pkts - vq->vq_free_cnt)
as the `need`, here you can't subtract vq->vq_free_cnt to see
whether the needs have been met.
> +}
> +
> uint16_t
> virtio_xmit_pkts_inorder(void *tx_queue,
> struct rte_mbuf **tx_pkts,
> @@ -2161,8 +2176,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> struct virtqueue *vq = txvq->vq;
> struct virtio_hw *hw = vq->hw;
> uint16_t hdr_size = hw->vtnet_hdr_size;
> - uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
> + uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
> struct rte_mbuf *inorder_pkts[nb_pkts];
> + int need, nb_left;
>
> if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
> return nb_tx;
> @@ -2175,17 +2191,12 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> nb_used = VIRTQUEUE_NUSED(vq);
>
> virtio_rmb(hw->weak_barriers);
> - if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
> - virtio_xmit_cleanup_inorder(vq, nb_used);
> -
> - if (unlikely(!vq->vq_free_cnt))
> + if (likely(nb_used > (vq->vq_nentries - vq->vq_free_thresh)))
> virtio_xmit_cleanup_inorder(vq, nb_used);
>
> - nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
> -
> - for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
> + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
> struct rte_mbuf *txm = tx_pkts[nb_tx];
> - int slots, need;
> + int slots;
>
> /* optimize ring usage */
> if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> @@ -2203,6 +2214,22 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> }
>
> if (nb_inorder_pkts) {
> + need = nb_inorder_pkts - vq->vq_free_cnt;
> +
> +
There is no need to add blank lines here.
> + if (unlikely(need > 0)) {
> + nb_left = virtio_xmit_try_cleanup_inorder(vq,
> + need);
> +
> + if (unlikely(nb_left > 0)) {
> + PMD_TX_LOG(ERR,
> + "No free tx descriptors to "
> + "transmit");
> + nb_inorder_pkts = vq->vq_free_cnt;
You need to handle nb_tx as well.
> + break;
> + }
> + }
> +
> virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> nb_inorder_pkts);
> nb_inorder_pkts = 0;
> @@ -2211,15 +2238,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> slots = txm->nb_segs + 1;
> need = slots - vq->vq_free_cnt;
> if (unlikely(need > 0)) {
> - nb_used = VIRTQUEUE_NUSED(vq);
> - virtio_rmb(hw->weak_barriers);
> - need = RTE_MIN(need, (int)nb_used);
> + nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
>
> - virtio_xmit_cleanup_inorder(vq, need);
> -
> - need = slots - vq->vq_free_cnt;
> -
> - if (unlikely(need > 0)) {
> + if (unlikely(nb_left > 0)) {
> PMD_TX_LOG(ERR,
> "No free tx descriptors to transmit");
> break;
> @@ -2232,9 +2253,23 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> }
>
> /* Transmit all inorder packets */
> - if (nb_inorder_pkts)
> + if (nb_inorder_pkts) {
> + need = nb_inorder_pkts - vq->vq_free_cnt;
> +
> + if (unlikely(need > 0)) {
> + nb_left = virtio_xmit_try_cleanup_inorder(vq, need);
> +
> + if (unlikely(nb_left > 0)) {
> + PMD_TX_LOG(ERR,
> + "No free tx descriptors to transmit");
> + nb_inorder_pkts = vq->vq_free_cnt;
> + nb_tx -= nb_left;
> + }
> + }
> +
> virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> nb_inorder_pkts);
> + }
>
> txvq->stats.packets += nb_tx;
>
> --
> 2.17.1
>
next prev parent reply other threads:[~2019-09-10 6:19 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-08-27 10:24 [dpdk-dev] [PATCH 1/2] net/virtio: update stats when in order xmit done Marvin Liu
2019-08-27 10:24 ` [dpdk-dev] [PATCH 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
2019-09-10 6:16 ` Tiwei Bie [this message]
2019-09-10 7:44 ` Liu, Yong
2019-09-10 5:45 ` [dpdk-dev] [PATCH 1/2] net/virtio: update stats when in order xmit done Tiwei Bie
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 " Marvin Liu
2019-09-10 16:14 ` [dpdk-dev] [PATCH v2 2/2] net/virtio: on demand cleanup when doing in order xmit Marvin Liu
2019-09-18 2:43 ` Tiwei Bie
2019-09-18 3:23 ` Liu, Yong
2019-09-18 2:34 ` [dpdk-dev] [PATCH v2 1/2] net/virtio: update stats when in order xmit done Tiwei Bie
2019-09-18 3:19 ` Liu, Yong
2019-09-18 4:18 ` Tiwei Bie
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 " Marvin Liu
2019-09-18 17:06 ` [dpdk-dev] [PATCH v3 2/2] net/virtio: on demand cleanup when in order xmit Marvin Liu
2019-09-27 9:02 ` Maxime Coquelin
2019-09-27 9:49 ` Maxime Coquelin
2019-09-27 9:02 ` [dpdk-dev] [PATCH v3 1/2] net/virtio: update stats when in order xmit done Maxime Coquelin
2019-09-27 9:49 ` Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190910061647.GA13119@___ \
--to=tiwei.bie@intel.com \
--cc=dev@dpdk.org \
--cc=maxime.coquelin@redhat.com \
--cc=yong.liu@intel.com \
--cc=zhihong.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).