From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Zhiyong Yang <zhiyong.yang@intel.com>, dev@dpdk.org
Cc: yuanhan.liu@linux.intel.com
Subject: Re: [dpdk-dev] [PATCH v2] net/virtio: support to turn on/off traffic flow
Date: Wed, 26 Apr 2017 09:45:42 +0200 [thread overview]
Message-ID: <09fa9de3-b186-5949-d3a0-93e90064a357@redhat.com> (raw)
In-Reply-To: <1492583361-43601-1-git-send-email-zhiyong.yang@intel.com>
On 04/19/2017 08:29 AM, Zhiyong Yang wrote:
> Current virtio_dev_stop only disables interrupt and marks link down,
> When it is invoked, tx/rx traffic flows still work. This is a strange
> behavior. The patch supports the switch of flow.
>
> Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
> ---
>
> Changes in V2:
> Use the flag "started" introduced by yuanhan's applied patch, rx/tx
> check the per-device defined var "started".
>
> drivers/net/virtio/virtio_rxtx.c | 21 ++++++++++++++-------
> drivers/net/virtio/virtio_rxtx_simple.c | 5 +++++
> drivers/net/virtio/virtio_rxtx_simple_neon.c | 6 +++++-
> drivers/net/virtio/virtio_rxtx_simple_sse.c | 6 +++++-
> 4 files changed, 29 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index ea0bd9d..fbc96df 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -725,7 +725,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
> {
> struct virtnet_rx *rxvq = rx_queue;
> struct virtqueue *vq = rxvq->vq;
> - struct virtio_hw *hw;
> + struct virtio_hw *hw = vq->hw;
> struct rte_mbuf *rxm, *new_mbuf;
> uint16_t nb_used, num, nb_rx;
> uint32_t len[VIRTIO_MBUF_BURST_SZ];
> @@ -736,6 +736,10 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
> int offload;
> struct virtio_net_hdr *hdr;
>
> + nb_rx = 0;
Only cosmetics, but I would do like in Tx path, i.e. set nb_rx to 0
directly at declaration time.
> + if (unlikely(hw->started == 0))
> + return nb_rx;
> +
> nb_used = VIRTQUEUE_NUSED(vq);
>
> virtio_rmb();
> @@ -748,8 +752,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
> num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
> PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
>
> - hw = vq->hw;
> - nb_rx = 0;
> nb_enqueued = 0;
> hdr_size = hw->vtnet_hdr_size;
> offload = rx_offload_enabled(hw);
> @@ -834,7 +836,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
> {
> struct virtnet_rx *rxvq = rx_queue;
> struct virtqueue *vq = rxvq->vq;
> - struct virtio_hw *hw;
> + struct virtio_hw *hw = vq->hw;
> struct rte_mbuf *rxm, *new_mbuf;
> uint16_t nb_used, num, nb_rx;
> uint32_t len[VIRTIO_MBUF_BURST_SZ];
> @@ -848,14 +850,16 @@ virtio_recv_mergeable_pkts(void *rx_queue,
> uint32_t hdr_size;
> int offload;
>
> + nb_rx = 0;
Ditto.
> + if (unlikely(hw->started == 0))
> + return nb_rx;
> +
> nb_used = VIRTQUEUE_NUSED(vq);
>
> virtio_rmb();
>
> PMD_RX_LOG(DEBUG, "used:%d", nb_used);
>
> - hw = vq->hw;
> - nb_rx = 0;
> i = 0;
> nb_enqueued = 0;
> seg_num = 0;
> @@ -1005,9 +1009,12 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> struct virtqueue *vq = txvq->vq;
> struct virtio_hw *hw = vq->hw;
> uint16_t hdr_size = hw->vtnet_hdr_size;
> - uint16_t nb_used, nb_tx;
> + uint16_t nb_used, nb_tx = 0;
> int error;
>
> + if (unlikely(hw->started == 0))
> + return nb_tx;
> +
> if (unlikely(nb_pkts < 1))
> return nb_pkts;
>
> diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
> index b651e53..542cf80 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple.c
> @@ -89,12 +89,17 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
> {
> struct virtnet_tx *txvq = tx_queue;
> struct virtqueue *vq = txvq->vq;
> + struct virtio_hw *hw = vq->hw;
> uint16_t nb_used;
> uint16_t desc_idx;
> struct vring_desc *start_dp;
> uint16_t nb_tail, nb_commit;
> int i;
> uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
> + uint16_t nb_tx = 0;
> +
> + if (unlikely(hw->started == 0))
> + return nb_tx;
Maybe just return 0 directly, nb_tx really needed here?
>
> nb_used = VIRTQUEUE_NUSED(vq);
> rte_compiler_barrier();
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
> index 793eefb..ecc62ad 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
> @@ -72,12 +72,13 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> {
> struct virtnet_rx *rxvq = rx_queue;
> struct virtqueue *vq = rxvq->vq;
> + struct virtio_hw *hw = vq->hw;
> uint16_t nb_used;
> uint16_t desc_idx;
> struct vring_used_elem *rused;
> struct rte_mbuf **sw_ring;
> struct rte_mbuf **sw_ring_end;
> - uint16_t nb_pkts_received;
> + uint16_t nb_pkts_received = 0;
>
> uint8x16_t shuf_msk1 = {
> 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
> @@ -106,6 +107,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> 0, 0
> };
>
> + if (unlikely(hw->started == 0))
> + return nb_pkts_received;
Ditto.
> +
> if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
> return 0;
>
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
> index 87bb5c6..7cf0f8b 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
> @@ -74,12 +74,13 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> {
> struct virtnet_rx *rxvq = rx_queue;
> struct virtqueue *vq = rxvq->vq;
> + struct virtio_hw *hw = vq->hw;
> uint16_t nb_used;
> uint16_t desc_idx;
> struct vring_used_elem *rused;
> struct rte_mbuf **sw_ring;
> struct rte_mbuf **sw_ring_end;
> - uint16_t nb_pkts_received;
> + uint16_t nb_pkts_received = 0;
> __m128i shuf_msk1, shuf_msk2, len_adjust;
>
> shuf_msk1 = _mm_set_epi8(
> @@ -109,6 +110,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
> 0, (uint16_t)-vq->hw->vtnet_hdr_size,
> 0, 0);
>
> + if (unlikely(hw->started == 0))
> + return nb_pkts_received;
Ditto.
> +
> if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
> return 0;
As said, only cosmetic comments, it looks otherwise valid to me.
If you fix these, feel free to add my:
Acked-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
next prev parent reply other threads:[~2017-04-26 7:45 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-03-31 11:40 [dpdk-dev] [PATCH 0/2] net/virtio: support to turn on/off the " Zhiyong Yang
2017-03-31 11:40 ` [dpdk-dev] [PATCH 1/2] net/virtio: add data elements to turn on/off " Zhiyong Yang
2017-04-19 6:29 ` [dpdk-dev] [PATCH v2] net/virtio: support " Zhiyong Yang
2017-04-26 7:45 ` Maxime Coquelin [this message]
2017-04-26 7:56 ` Yuanhan Liu
2017-04-26 8:02 ` Maxime Coquelin
2017-03-31 11:40 ` [dpdk-dev] [PATCH 2/2] net/virtio: support to turn on/off the " Zhiyong Yang
2017-04-06 3:59 ` [dpdk-dev] [PATCH 0/2] " Yuanhan Liu
2017-04-17 8:50 ` Yang, Zhiyong
2017-04-19 2:03 ` Yuanhan Liu
2017-04-19 2:31 ` Yang, Zhiyong
2017-04-19 2:58 ` Yuanhan Liu
2017-04-19 5:27 ` Yang, Zhiyong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=09fa9de3-b186-5949-d3a0-93e90064a357@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=dev@dpdk.org \
--cc=yuanhan.liu@linux.intel.com \
--cc=zhiyong.yang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).