From: Tiwei Bie <tiwei.bie@intel.com>
To: Jens Freimann <jfreimann@redhat.com>
Cc: dev@dpdk.org, maxime.coquelin@redhat.com, zhihong.wang@intel.com
Subject: Re: [dpdk-dev] [PATCH v9 6/8] net/virtio: implement receive path for packed queues
Date: Thu, 25 Oct 2018 17:39:09 +0800 [thread overview]
Message-ID: <20181025093909.GF22179@debian> (raw)
In-Reply-To: <20181024143236.21271-7-jfreimann@redhat.com>
On Wed, Oct 24, 2018 at 04:32:34PM +0200, Jens Freimann wrote:
> Implement the receive part.
>
> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
> ---
> drivers/net/virtio/virtio_ethdev.c | 18 +-
> drivers/net/virtio/virtio_ethdev.h | 2 +
> drivers/net/virtio/virtio_rxtx.c | 280 ++++++++++++++++++++++++++---
> drivers/net/virtio/virtqueue.c | 22 +++
> drivers/net/virtio/virtqueue.h | 2 +-
> 5 files changed, 297 insertions(+), 27 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index d2118e6a9..7f81d24aa 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -384,8 +384,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
> vq->hw = hw;
> vq->vq_queue_index = vtpci_queue_idx;
> vq->vq_nentries = vq_size;
> - if (vtpci_packed_queue(hw))
> + if (vtpci_packed_queue(hw)) {
> vq->vq_ring.avail_wrap_counter = 1;
> + vq->vq_ring.used_wrap_counter = 1;
Why just use used_wrap_counter in receive path?
> + }
>
> /*
> * Reserve a memzone for vring elements
> @@ -1326,7 +1328,13 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
> {
> struct virtio_hw *hw = eth_dev->data->dev_private;
>
> - if (hw->use_simple_rx) {
> + if (vtpci_packed_queue(hw)) {
> + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
> + eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
> + } else {
> + eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
The indent is wrong.
> + }
> + } else if (hw->use_simple_rx) {
Please make the structure more clear, e.g.
if (vtpci_packed_queue(hw)) {
// packed ring
} else {
// split ring
}
> PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
> eth_dev->data->port_id);
> eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
> @@ -1490,7 +1498,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
>
> /* Setting up rx_header size for the device */
> if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
> - vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
> + vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
> + vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
> else
> hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
> @@ -1918,7 +1927,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>
> rte_spinlock_init(&hw->state_lock);
>
> - hw->use_simple_rx = 1;
> + if (!vtpci_packed_queue(hw))
> + hw->use_simple_rx = 1;
>
> if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
> hw->use_inorder_tx = 1;
> diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
> index 05d355180..6c9247639 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -73,6 +73,8 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
>
> uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
> +uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts);
>
> uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> uint16_t nb_pkts);
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 837457243..42702527c 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -31,6 +31,7 @@
> #include "virtqueue.h"
> #include "virtio_rxtx.h"
> #include "virtio_rxtx_simple.h"
> +#include "virtio_ring.h"
>
> #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
> #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
> @@ -89,7 +90,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
> }
>
> void
> -vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
> +vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, uint16_t used_idx)
> {
> struct vring_desc_packed *dp;
> struct vq_desc_extra *dxp = NULL, *dxp_tail = NULL;
> @@ -102,7 +103,6 @@ vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
> if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
> while (dp->flags & VRING_DESC_F_NEXT && i < dxp->ndescs) {
> desc_idx_last = dxp->next;
> - dp = &vq->vq_ring.desc_packed[dxp->next];
Why above line was added before this patch?
> dxp = &vq->vq_descx[dxp->next];
> i++;
> }
> @@ -124,6 +124,47 @@ vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx, used_idx)
> dxp->next = VQ_RING_DESC_CHAIN_END;
> }
>
> +static uint16_t
> +virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
> + struct rte_mbuf **rx_pkts,
> + uint32_t *len,
> + uint16_t num)
> +{
> + struct rte_mbuf *cookie;
> + uint16_t used_idx;
> + uint16_t id;
> + struct vring_desc_packed *desc;
> + uint16_t i;
> +
> + for (i = 0; i < num; i++) {
> + used_idx = vq->vq_used_cons_idx;
> + desc = (struct vring_desc_packed *) vq->vq_ring.desc_packed;
> + if (!desc_is_used(&desc[used_idx], &vq->vq_ring))
> + return i;
> + len[i] = desc[used_idx].len;
> + id = desc[used_idx].index;
> + cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
> +
> + if (unlikely(cookie == NULL)) {
> + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
> + vq->vq_used_cons_idx);
> + break;
> + }
> + rte_prefetch0(cookie);
> + rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
> + rx_pkts[i] = cookie;
> + vq_ring_free_chain_packed(vq, id, used_idx);
> +
> + vq->vq_used_cons_idx += vq->vq_descx[id].ndescs;
> + if (vq->vq_used_cons_idx >= vq->vq_nentries) {
> + vq->vq_used_cons_idx -= vq->vq_nentries;
> + vq->vq_ring.used_wrap_counter ^= 1;
> + }
> + }
> +
> + return i;
> +}
> +
> static uint16_t
> virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
> uint32_t *len, uint16_t num)
> @@ -369,6 +410,55 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
> return 0;
> }
>
> +static inline int
> +virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, struct rte_mbuf *cookie)
> +{
> + struct vq_desc_extra *dxp;
> + struct virtio_hw *hw = vq->hw;
> + struct vring_desc_packed *start_dp;
> + uint16_t needed = 1;
> + uint16_t head_idx, idx;
> + uint16_t flags;
> +
> + if (unlikely(vq->vq_free_cnt == 0))
> + return -ENOSPC;
> + if (unlikely(vq->vq_free_cnt < needed))
> + return -EMSGSIZE;
> +
> + head_idx = vq->vq_desc_head_idx;
> + if (unlikely(head_idx >= vq->vq_nentries))
> + return -EFAULT;
> +
> + idx = head_idx;
> + dxp = &vq->vq_descx[idx];
> + dxp->cookie = (void *)cookie;
> + dxp->ndescs = needed;
> +
> + start_dp = vq->vq_ring.desc_packed;
> + start_dp[idx].addr =
> + VIRTIO_MBUF_ADDR(cookie, vq) +
> + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> + start_dp[idx].len =
> + cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
> + flags = VRING_DESC_F_WRITE;
> + flags |= VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
> + VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
> + rte_smp_wmb();
> + start_dp[idx].flags = flags;
> + idx = dxp->next;
We should always use the descriptors in packed ring in the
ring order (we have to use the elements in vq_descx[] out
of order if device processes descriptors out of order).
> + vq->vq_desc_head_idx = idx;
> + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
> + vq->vq_desc_tail_idx = idx;
> + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
> +
> + vq->vq_avail_idx += needed;
> + if (vq->vq_avail_idx >= vq->vq_nentries) {
> + vq->vq_avail_idx = 0;
> + vq->vq_ring.avail_wrap_counter ^= 1;
> + }
> +
> + return 0;
> +}
[...]
> uint16_t
> virtio_recv_mergeable_pkts_inorder(void *rx_queue,
> struct rte_mbuf **rx_pkts,
> @@ -1385,12 +1582,20 @@ virtio_recv_mergeable_pkts(void *rx_queue,
> uint16_t extra_idx;
> uint32_t seg_res;
> uint32_t hdr_size;
> + uint32_t rx_num = 0;
>
> nb_rx = 0;
> if (unlikely(hw->started == 0))
> return nb_rx;
>
> - nb_used = VIRTQUEUE_NUSED(vq);
> + if (vtpci_packed_queue(vq->hw)) {
> + if (!desc_is_used(&vq->vq_ring.desc_packed[vq->vq_used_cons_idx],
> + &vq->vq_ring))
> + return 0;
> + nb_used = VIRTIO_MBUF_BURST_SZ;
> + } else {
> + nb_used = VIRTQUEUE_NUSED(vq);
> + }
>
> virtio_rmb();
>
> @@ -1403,13 +1608,20 @@ virtio_recv_mergeable_pkts(void *rx_queue,
> seg_res = 0;
> hdr_size = hw->vtnet_hdr_size;
>
> +
No need to add above empty line.
> while (i < nb_used) {
> struct virtio_net_hdr_mrg_rxbuf *header;
>
> if (nb_rx == nb_pkts)
> break;
>
> - num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
> + if (vtpci_packed_queue(vq->hw))
> + num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
> + len, 1);
> + else
> + num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
> + if (num == 0)
> + return nb_rx;
> if (num != 1)
> continue;
>
[...]
next prev parent reply other threads:[~2018-10-25 9:40 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-10-24 14:32 [dpdk-dev] [PATCH v9 0/8] implement packed virtqueues Jens Freimann
2018-10-24 14:32 ` [dpdk-dev] [PATCH v9 1/8] net/virtio: vring init for packed queues Jens Freimann
2018-10-25 9:21 ` Tiwei Bie
2018-10-25 13:48 ` Jens Freimann
2018-10-25 13:51 ` Maxime Coquelin
2018-10-25 15:12 ` Jens Freimann
2018-10-26 6:01 ` Tiwei Bie
2018-10-24 14:32 ` [dpdk-dev] [PATCH v9 2/8] net/virtio: add packed virtqueue defines Jens Freimann
2018-10-25 9:22 ` Tiwei Bie
2018-10-25 14:41 ` Jens Freimann
2018-10-24 14:32 ` [dpdk-dev] [PATCH v9 3/8] net/virtio: add packed virtqueue helpers Jens Freimann
2018-10-25 9:23 ` Tiwei Bie
2018-10-25 14:40 ` Jens Freimann
2018-10-24 14:32 ` [dpdk-dev] [PATCH v9 4/8] net/virtio: dump packed virtqueue data Jens Freimann
2018-10-25 9:25 ` Tiwei Bie
2018-10-25 13:54 ` Jens Freimann
2018-10-24 14:32 ` [dpdk-dev] [PATCH v9 5/8] net/virtio: implement transmit path for packed queues Jens Freimann
2018-10-24 16:55 ` Maxime Coquelin
2018-10-25 10:59 ` Jens Freimann
2018-10-25 9:31 ` Tiwei Bie
2018-10-25 13:19 ` Jens Freimann
2018-10-25 10:46 ` Jens Freimann
2018-10-24 14:32 ` [dpdk-dev] [PATCH v9 6/8] net/virtio: implement receive " Jens Freimann
2018-10-25 9:39 ` Tiwei Bie [this message]
2018-10-25 13:54 ` Jens Freimann
2018-10-26 5:43 ` Tiwei Bie
2018-10-31 12:46 ` Jens Freimann
2018-10-25 10:53 ` Jens Freimann
2018-10-24 14:32 ` [dpdk-dev] [PATCH v9 7/8] net/virtio: add virtio send command packed queue support Jens Freimann
2018-10-24 14:32 ` [dpdk-dev] [PATCH v9 8/8] net/virtio: enable packed virtqueues by default Jens Freimann
2018-10-26 6:06 ` Tiwei Bie
2018-11-02 8:58 ` Jens Freimann
2018-10-25 9:44 ` [dpdk-dev] [PATCH v9 0/8] implement packed virtqueues Tiwei Bie
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181025093909.GF22179@debian \
--to=tiwei.bie@intel.com \
--cc=dev@dpdk.org \
--cc=jfreimann@redhat.com \
--cc=maxime.coquelin@redhat.com \
--cc=zhihong.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).