From: Jens Freimann <jfreimann@redhat.com>
To: dev@dpdk.org
Cc: tiwei.bie@intel.com, maxime.coquelin@redhat.com, Gavin.Hu@arm.com
Subject: [dpdk-dev] [PATCH v6 08/11] net/virtio: add support for mergeable buffers with packed virtqueues
Date: Fri, 21 Sep 2018 12:33:05 +0200 [thread overview]
Message-ID: <20180921103308.16357-9-jfreimann@redhat.com> (raw)
In-Reply-To: <20180921103308.16357-1-jfreimann@redhat.com>
Implement support for receiving merged buffers in virtio when packed
virtqueues are enabled.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 8 +-
drivers/net/virtio/virtio_rxtx.c | 117 ++++++++++++++++++++++++++---
drivers/net/virtio/virtqueue.h | 1 +
3 files changed, 113 insertions(+), 13 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index f03cf04a9..c4ef095ed 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1322,12 +1322,12 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- /*
- * workarount for packed vqs which don't support
- * mrg_rxbuf at this point
- */
if (vtpci_packed_queue(hw)) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ } else {
eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
+ }
} else if (hw->use_simple_rx) {
PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
eth_dev->data->port_id);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 90a0e306f..35c375cd5 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -90,6 +90,80 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
dp->next = VQ_RING_DESC_CHAIN_END;
}
+static void
+virtio_refill_packed(struct virtqueue *vq, uint16_t used_idx,
+ struct virtnet_rx *rxvq)
+{
+ struct vq_desc_extra *dxp;
+ struct vring_desc_packed *descs = vq->vq_ring.desc_packed;
+ struct vring_desc_packed *desc;
+ struct rte_mbuf *nmb;
+
+ nmb = rte_mbuf_raw_alloc(rxvq->mpool);
+ if (unlikely(nmb == NULL)) {
+ struct rte_eth_dev *dev
+ = &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ return;
+ }
+
+ desc = &descs[used_idx];
+
+ dxp = &vq->vq_descx[used_idx];
+
+ dxp->cookie = nmb;
+ dxp->ndescs = 1;
+
+ desc->addr = VIRTIO_MBUF_ADDR(nmb, vq) +
+ RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
+ desc->len = nmb->buf_len - RTE_PKTMBUF_HEADROOM +
+ vq->hw->vtnet_hdr_size;
+ desc->flags |= VRING_DESC_F_WRITE;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t *len,
+ uint16_t num,
+ struct virtnet_rx *rx_queue)
+{
+ struct rte_mbuf *cookie;
+ uint16_t used_idx;
+ uint16_t id;
+ struct vring_desc_packed *desc;
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ used_idx = vq->vq_used_cons_idx;
+ desc = &vq->vq_ring.desc_packed[used_idx];
+ if (!desc_is_used(desc, &vq->vq_ring))
+ return i;
+ len[i] = desc->len;
+ id = desc[used_idx].index;
+ cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
+
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+ rte_prefetch0(cookie);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+ rx_pkts[i] = cookie;
+
+ virtio_refill_packed(vq, used_idx, rx_queue);
+
+ if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx = 0;
+ vq->vq_ring.used_wrap_counter ^= 1;
+ }
+ vq->vq_descx[id].cookie = NULL;
+ }
+
+ return i;
+}
+
static uint16_t
virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
uint32_t *len, uint16_t num)
@@ -1476,12 +1550,16 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint16_t extra_idx;
uint32_t seg_res;
uint32_t hdr_size;
+ uint32_t rx_num = 0;
nb_rx = 0;
if (unlikely(hw->started == 0))
return nb_rx;
- nb_used = VIRTQUEUE_NUSED(vq);
+ if (vtpci_packed_queue(vq->hw))
+ nb_used = VIRTIO_MBUF_BURST_SZ;
+ else
+ nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb();
@@ -1494,13 +1572,21 @@ virtio_recv_mergeable_pkts(void *rx_queue,
seg_res = 0;
hdr_size = hw->vtnet_hdr_size;
+ vq->vq_used_idx = vq->vq_used_cons_idx;
+
while (i < nb_used) {
struct virtio_net_hdr_mrg_rxbuf *header;
if (nb_rx == nb_pkts)
break;
- num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
+ if (vtpci_packed_queue(vq->hw))
+ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
+ len, 1, (struct virtnet_rx *)rx_queue);
+ else
+ num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
+ if (num == 0)
+ return nb_rx;
if (num != 1)
continue;
@@ -1552,12 +1638,15 @@ virtio_recv_mergeable_pkts(void *rx_queue,
*/
uint16_t rcv_cnt =
RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
- if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
- uint32_t rx_num =
- virtqueue_dequeue_burst_rx(vq,
- rcv_pkts, len, rcv_cnt);
- i += rx_num;
- rcv_cnt = rx_num;
+ if (vtpci_packed_queue(vq->hw)) {
+ if (likely(vq->vq_free_cnt >= rcv_cnt)) {
+ rx_num = virtqueue_dequeue_burst_rx_packed(vq,
+ rcv_pkts, len, rcv_cnt,
+ (struct virtnet_rx *)rx_queue);
+ }
+ } else if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ rx_num = virtqueue_dequeue_burst_rx(vq,
+ rcv_pkts, len, rcv_cnt);
} else {
PMD_RX_LOG(ERR,
"No enough segments for packet.");
@@ -1566,6 +1655,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxvq->stats.errors++;
break;
}
+ i += rx_num;
+ rcv_cnt = rx_num;
extra_idx = 0;
@@ -1599,6 +1690,15 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxvq->stats.packets += nb_rx;
+ if (vtpci_packed_queue(vq->hw)) {
+ if (unlikely(virtqueue_kick_prepare_packed(vq)) &&
+ likely(nb_rx)) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ return nb_rx;
+ }
+
/* Allocate new mbuf for the used descriptor */
while (likely(!virtqueue_full(vq))) {
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
@@ -1618,7 +1718,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
if (likely(nb_enqueued)) {
vq_update_avail_idx(vq);
-
if (unlikely(virtqueue_kick_prepare(vq))) {
virtqueue_notify(vq);
PMD_RX_LOG(DEBUG, "Notified");
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index ffa2d8f92..15df6a050 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -171,6 +171,7 @@ struct virtqueue {
* trails vq_ring.used->idx.
*/
uint16_t vq_used_cons_idx;
+ uint16_t vq_used_idx;
uint16_t vq_nentries; /**< vring desc numbers */
uint16_t vq_free_cnt; /**< num of desc available */
uint16_t vq_avail_idx; /**< sync until needed */
--
2.17.1
next prev parent reply other threads:[~2018-09-21 10:34 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-09-21 10:32 [dpdk-dev] [PATCH v6 00/11] implement " Jens Freimann
2018-09-21 10:32 ` [dpdk-dev] [PATCH v6 01/11] net/virtio: vring init for packed queues Jens Freimann
2018-09-21 10:32 ` [dpdk-dev] [PATCH v6 02/11] net/virtio: add packed virtqueue defines Jens Freimann
2018-09-21 10:33 ` [dpdk-dev] [PATCH v6 03/11] net/virtio: add packed virtqueue helpers Jens Freimann
2018-09-21 10:33 ` [dpdk-dev] [PATCH v6 04/11] net/virtio: flush packed receive virtqueues Jens Freimann
2018-09-21 10:33 ` [dpdk-dev] [PATCH v6 05/11] net/virtio: dump packed virtqueue data Jens Freimann
2018-09-21 10:33 ` [dpdk-dev] [PATCH v6 06/11] net/virtio: implement transmit path for packed queues Jens Freimann
2018-09-21 12:26 ` Tiwei Bie
2018-09-21 12:37 ` Jens Freimann
2018-09-21 12:49 ` Tiwei Bie
2018-09-21 10:33 ` [dpdk-dev] [PATCH v6 07/11] net/virtio: implement receive " Jens Freimann
2018-09-21 10:33 ` Jens Freimann [this message]
2018-09-21 10:33 ` [dpdk-dev] [PATCH v6 09/11] net/virtio: add virtio send command packed queue support Jens Freimann
2018-09-21 12:37 ` Tiwei Bie
2018-09-21 10:33 ` [dpdk-dev] [PATCH v6 10/11] net/virtio-user: add option to use packed queues Jens Freimann
2018-09-21 10:33 ` [dpdk-dev] [PATCH v6 11/11] net/virtio: enable packed virtqueues by default Jens Freimann
2018-09-21 12:32 ` [dpdk-dev] [PATCH v6 00/11] implement packed virtqueues Tiwei Bie
2018-09-21 14:06 ` Jens Freimann
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180921103308.16357-9-jfreimann@redhat.com \
--to=jfreimann@redhat.com \
--cc=Gavin.Hu@arm.com \
--cc=dev@dpdk.org \
--cc=maxime.coquelin@redhat.com \
--cc=tiwei.bie@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).