DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Gavin Hu (Arm Technology China)" <Gavin.Hu@arm.com>
To: Jens Freimann <jfreimann@redhat.com>, "dev@dpdk.org" <dev@dpdk.org>
Cc: "tiwei.bie@intel.com" <tiwei.bie@intel.com>,
	"maxime.coquelin@redhat.com" <maxime.coquelin@redhat.com>
Subject: Re: [dpdk-dev] [PATCH v5 08/11] net/virtio: implement receive path for	packed queues
Date: Mon, 10 Sep 2018 10:56:47 +0000	[thread overview]
Message-ID: <VI1PR08MB31674AC39345720EB2AAE45F8F050@VI1PR08MB3167.eurprd08.prod.outlook.com> (raw)
In-Reply-To: <20180906181947.20646-9-jfreimann@redhat.com>



> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Jens Freimann
> Sent: Friday, September 7, 2018 2:20 AM
> To: dev@dpdk.org
> Cc: tiwei.bie@intel.com; maxime.coquelin@redhat.com
> Subject: [dpdk-dev] [PATCH v5 08/11] net/virtio: implement receive path for
> packed queues
>
> Implement the receive part.
>
> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c |  15 +++-
>  drivers/net/virtio/virtio_ethdev.h |   2 +
>  drivers/net/virtio/virtio_rxtx.c   | 131 +++++++++++++++++++++++++++++
>  3 files changed, 145 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index d2c5755bb..a2bb726ba 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -384,8 +384,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> vtpci_queue_idx)
>  vq->hw = hw;
>  vq->vq_queue_index = vtpci_queue_idx;
>  vq->vq_nentries = vq_size;
> -if (vtpci_packed_queue(hw))
> +if (vtpci_packed_queue(hw)) {
>  vq->vq_ring.avail_wrap_counter = 1;
> +vq->vq_ring.used_wrap_counter = 1;
> +}
>
>  /*
>   * Reserve a memzone for vring elements @@ -1320,7 +1322,13 @@
> set_rxtx_funcs(struct rte_eth_dev *eth_dev)  {
>  struct virtio_hw *hw = eth_dev->data->dev_private;
>
> -if (hw->use_simple_rx) {
> +/*
> + * workarount for packed vqs which don't support
> + * mrg_rxbuf at this point
> + */
> +if (vtpci_packed_queue(hw)) {
> +eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
> +} else if (hw->use_simple_rx) {
>  PMD_INIT_LOG(INFO, "virtio: using simple Rx path on
> port %u",
>  eth_dev->data->port_id);
>  eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; @@ -1484,7
> +1492,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t
> req_features)
>
>  /* Setting up rx_header size for the device */
>  if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
> -    vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
> +    vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
> +    vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
>  hw->vtnet_hdr_size = sizeof(struct
> virtio_net_hdr_mrg_rxbuf);
>  else
>  hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr); diff --git
> a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
> index 04161b461..25eaff224 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -70,6 +70,8 @@ int virtio_dev_tx_queue_setup_finish(struct
> rte_eth_dev *dev,
>
>  uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
>  uint16_t nb_pkts);
> +uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf
> **rx_pkts,
> +uint16_t nb_pkts);
>
>  uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts,
>  uint16_t nb_pkts);
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 12787070e..3f5fa7366 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -31,6 +31,7 @@
>  #include "virtqueue.h"
>  #include "virtio_rxtx.h"
>  #include "virtio_rxtx_simple.h"
> +#include "virtio_ring.h"
>
>  #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
>  #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
> @@ -710,6 +711,34 @@ virtio_dev_rx_queue_setup_finish(struct
> rte_eth_dev *dev, uint16_t queue_idx)
>
>  PMD_INIT_FUNC_TRACE();
>
> +if (vtpci_packed_queue(hw)) {
> +struct vring_desc_packed *desc;
> +struct vq_desc_extra *dxp;
> +
> +for (desc_idx = 0; desc_idx < vq->vq_nentries;
> +desc_idx++) {
> +m = rte_mbuf_raw_alloc(rxvq->mpool);
> +if (unlikely(m == NULL))
> +return -ENOMEM;
> +
> +dxp = &vq->vq_descx[desc_idx];
> +dxp->cookie = m;
> +dxp->ndescs = 1;
> +
> +desc = &vq->vq_ring.desc_packed[desc_idx];
> +desc->addr = VIRTIO_MBUF_ADDR(m, vq) +
> +RTE_PKTMBUF_HEADROOM - hw-
> >vtnet_hdr_size;
> +desc->len = m->buf_len -
> RTE_PKTMBUF_HEADROOM +
> +hw->vtnet_hdr_size;
> +desc->flags |= VRING_DESC_F_WRITE;
> +rte_smp_wmb();
> +set_desc_avail(&vq->vq_ring, desc);
> +}
> +vq->vq_ring.avail_wrap_counter ^= 1;
> +nbufs = desc_idx;
> +goto out;
> +}
> +
>  /* Allocate blank mbufs for the each rx descriptor */
>  nbufs = 0;
>
> @@ -773,6 +802,7 @@ virtio_dev_rx_queue_setup_finish(struct
> rte_eth_dev *dev, uint16_t queue_idx)
>  vq_update_avail_idx(vq);
>  }
>
> +out:
>  PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
>
>  VIRTQUEUE_DUMP(vq);
> @@ -993,6 +1023,107 @@ virtio_rx_offload(struct rte_mbuf *m, struct
> virtio_net_hdr *hdr)
>  return 0;
>  }
>
> +uint16_t
> +virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
> +     uint16_t nb_pkts)
> +{
> +struct virtnet_rx *rxvq = rx_queue;
> +struct virtqueue *vq = rxvq->vq;
> +struct virtio_hw *hw = vq->hw;
> +struct rte_mbuf *rxm, *nmb;
> +uint16_t nb_rx;
> +uint32_t len;
> +uint32_t i;
> +uint32_t hdr_size;
> +struct virtio_net_hdr *hdr;
> +struct vring_desc_packed *descs = vq->vq_ring.desc_packed;
> +struct vring_desc_packed *desc;
> +uint16_t used_idx, id;
> +struct vq_desc_extra *dxp;
> +
> +nb_rx = 0;
> +if (unlikely(hw->started == 0))
> +return nb_rx;
> +
> +hdr_size = hw->vtnet_hdr_size;
> +
> +for (i = 0; i < nb_pkts; i++) {
> +rte_smp_rmb();
> +used_idx = vq->vq_used_cons_idx;
> +desc = &descs[used_idx];
> +id = desc->index;
> +if (!desc_is_used(desc, &vq->vq_ring))
> +break;
> +
> +nmb = rte_mbuf_raw_alloc(rxvq->mpool);
> +if (unlikely(nmb == NULL)) {
> +struct rte_eth_dev *dev
> += &rte_eth_devices[rxvq->port_id];
> +dev->data->rx_mbuf_alloc_failed++;
> +break;
> +}
> +
> +dxp = &vq->vq_descx[id];
> +len = desc->len;
> +rxm = dxp->cookie;
> +dxp->cookie = nmb;
> +dxp->ndescs = 1;
> +
> +desc->addr = VIRTIO_MBUF_ADDR(nmb, vq) +
> +RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> +desc->len = nmb->buf_len - RTE_PKTMBUF_HEADROOM +
> +hw->vtnet_hdr_size;


There should be a wmb here?

> +desc->flags = VRING_DESC_F_WRITE;
> +
> +PMD_RX_LOG(DEBUG, "packet len:%d", len);
> +
> +if (unlikely(len < hdr_size + ETHER_HDR_LEN)) {
> +PMD_RX_LOG(ERR, "Packet drop");
> +rte_pktmbuf_free(rxm);
> +rxvq->stats.errors++;
> +continue;
> +}
> +
> +rxm->port = rxvq->port_id;
> +rxm->data_off = RTE_PKTMBUF_HEADROOM;
> +rxm->ol_flags = 0;
> +rxm->vlan_tci = 0;
> +
> +rxm->pkt_len = (uint32_t)(len - hdr_size);
> +rxm->data_len = (uint16_t)(len - hdr_size);
> +
> +hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
> +RTE_PKTMBUF_HEADROOM - hdr_size);
> +
> +if (hw->vlan_strip)
> +rte_vlan_strip(rxm);
> +
> +if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
> +rte_pktmbuf_free(rxm);
> +rxvq->stats.errors++;
> +continue;
> +}
> +
> +VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
> +
> +rxvq->stats.bytes += rxm->pkt_len;
> +virtio_update_packet_stats(&rxvq->stats, rxm);
> +
> +rte_smp_wmb();
What's this wmb for?
> +
> +rx_pkts[nb_rx++] = rxm;
> +vq->vq_used_cons_idx += dxp->ndescs;
> +if (vq->vq_used_cons_idx >= vq->vq_nentries) {
> +vq->vq_used_cons_idx -= vq->vq_nentries;
> +vq->vq_ring.used_wrap_counter ^= 1;
> +}
> +}
> +
> +rxvq->stats.packets += nb_rx;
> +
> +return nb_rx;
> +}
> +
>  #define VIRTIO_MBUF_BURST_SZ 64
>  #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct
> vring_desc))  uint16_t
> --
> 2.17.1

IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.

  reply	other threads:[~2018-09-10 10:56 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-06 18:19 [dpdk-dev] [PATCH v5 00/11] implement packed virtqueues Jens Freimann
2018-09-06 18:19 ` [dpdk-dev] [PATCH v5 01/11] net/virtio: vring init for packed queues Jens Freimann
2018-09-10  5:48   ` Gavin Hu (Arm Technology China)
2018-09-12  8:02   ` Maxime Coquelin
2018-09-12  9:04     ` Jens Freimann
2018-09-06 18:19 ` [dpdk-dev] [PATCH v5 02/11] net/virtio: add virtio 1.1 defines Jens Freimann
2018-09-10  5:22   ` Gavin Hu (Arm Technology China)
2018-09-10  6:07     ` Tiwei Bie
2018-09-11  7:18     ` Jens Freimann
2018-09-06 18:19 ` [dpdk-dev] [PATCH v5 03/11] net/virtio: add packed virtqueue helpers Jens Freimann
2018-09-12  8:25   ` Maxime Coquelin
2018-09-12  9:04     ` Jens Freimann
2018-09-06 18:19 ` [dpdk-dev] [PATCH v5 04/11] net/virtio: flush packed receive virtqueues Jens Freimann
2018-09-12  9:12   ` Maxime Coquelin
2018-09-12  9:49     ` Jens Freimann
2018-09-06 18:19 ` [dpdk-dev] [PATCH v5 05/11] net/virtio: dump packed virtqueue data Jens Freimann
2018-09-10  6:02   ` Gavin Hu (Arm Technology China)
2018-09-10  6:18     ` Tiwei Bie
2018-09-11  7:16       ` Jens Freimann
2018-09-12  9:13   ` Maxime Coquelin
2018-09-06 18:19 ` [dpdk-dev] [PATCH v5 06/11] net/virtio-user: add option to use packed queues Jens Freimann
2018-09-10  6:32   ` Gavin Hu (Arm Technology China)
2018-09-21 10:05     ` Jens Freimann
2018-09-12  9:25   ` Maxime Coquelin
2018-09-06 18:19 ` [dpdk-dev] [PATCH v5 07/11] net/virtio: implement transmit path for " Jens Freimann
2018-09-10  7:13   ` Gavin Hu (Arm Technology China)
2018-09-10  9:39   ` Gavin Hu (Arm Technology China)
2018-09-12 14:58   ` Maxime Coquelin
2018-09-13  9:15   ` Tiwei Bie
2018-09-06 18:19 ` [dpdk-dev] [PATCH v5 08/11] net/virtio: implement receive " Jens Freimann
2018-09-10 10:56   ` Gavin Hu (Arm Technology China) [this message]
2018-09-06 18:19 ` [dpdk-dev] [PATCH v5 09/11] net/virtio: disable ctrl virtqueue for packed rings Jens Freimann
2018-09-14  5:32   ` Tiwei Bie
2018-09-17  9:11     ` Jens Freimann
2018-09-06 18:19 ` [dpdk-dev] [PATCH v5 10/11] net/virtio: add support for mergeable buffers with packed virtqueues Jens Freimann
2018-09-06 18:19 ` [dpdk-dev] [PATCH v5 11/11] net/virtio: add support for event suppression Jens Freimann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=VI1PR08MB31674AC39345720EB2AAE45F8F050@VI1PR08MB3167.eurprd08.prod.outlook.com \
    --to=gavin.hu@arm.com \
    --cc=dev@dpdk.org \
    --cc=jfreimann@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=tiwei.bie@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).