* [dpdk-dev] [PATCH v10 1/9] net/virtio: vring init for packed queues
2018-11-02 9:07 [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Jens Freimann
@ 2018-11-02 9:07 ` Jens Freimann
2018-11-02 14:00 ` Tiwei Bie
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 2/9] net/virtio: add packed virtqueue defines Jens Freimann
` (8 subsequent siblings)
9 siblings, 1 reply; 13+ messages in thread
From: Jens Freimann @ 2018-11-02 9:07 UTC (permalink / raw)
To: dev; +Cc: tiwei.bie, maxime.coquelin, Gavin.Hu
Add and initialize descriptor data structures.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 22 +++++++----
drivers/net/virtio/virtio_pci.h | 7 ++++
drivers/net/virtio/virtio_ring.h | 60 ++++++++++++++++++++++++++----
drivers/net/virtio/virtqueue.h | 19 +++++++++-
4 files changed, 92 insertions(+), 16 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 27088e3a6..0f5a3b80f 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -299,19 +299,21 @@ virtio_init_vring(struct virtqueue *vq)
PMD_INIT_FUNC_TRACE();
- /*
- * Reinitialise since virtio port might have been stopped and restarted
- */
memset(ring_mem, 0, vq->vq_ring_size);
- vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+
vq->vq_used_cons_idx = 0;
vq->vq_desc_head_idx = 0;
vq->vq_avail_idx = 0;
vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
-
- vring_desc_init(vr->desc, size);
+ if (vtpci_packed_queue(vq->hw)) {
+ vring_init_packed(&vq->ring_packed, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
+ vring_desc_init_packed(vq, size);
+ } else {
+ vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
+ vring_desc_init_split(vr->desc, size);
+ }
/*
* Disable device(host) interrupting guest
@@ -382,11 +384,15 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
vq->hw = hw;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
+ if (vtpci_packed_queue(hw)) {
+ vq->avail_wrap_counter = 1;
+ vq->used_wrap_counter = 1;
+ }
/*
* Reserve a memzone for vring elements
*/
- size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN);
vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
size, vq->vq_ring_size);
@@ -489,7 +495,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
for (i = 0; i < vq_size; i++) {
struct vring_desc *start_dp = txr[i].tx_indir;
- vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
+ vring_desc_init_split(start_dp, RTE_DIM(txr[i].tx_indir));
/* first indirect descriptor is always the tx header */
start_dp->addr = txvq->virtio_net_hdr_mem
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index 64ba6ecbc..070f8d119 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -113,6 +113,7 @@ struct virtnet_ctl;
#define VIRTIO_F_VERSION_1 32
#define VIRTIO_F_IOMMU_PLATFORM 33
+#define VIRTIO_F_RING_PACKED 34
/*
* Some VirtIO feature bits (currently bits 28 through 31) are
@@ -313,6 +314,12 @@ vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
return (hw->guest_features & (1ULL << bit)) != 0;
}
+static inline int
+vtpci_packed_queue(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_F_RING_PACKED);
+}
+
/*
* Function declaration from virtio_pci.c
*/
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index 9e3c2a015..69ec84d1e 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -54,9 +54,35 @@ struct vring_used {
struct vring_used_elem ring[0];
};
+/* For support of packed virtqueues in Virtio 1.1 the format of descriptors
+ * looks like this.
+ */
+struct vring_desc_packed {
+ uint64_t addr;
+ uint32_t len;
+ uint16_t index;
+ uint16_t flags;
+} __attribute__ ((aligned(16)));
+
+#define RING_EVENT_FLAGS_ENABLE 0x0
+#define RING_EVENT_FLAGS_DISABLE 0x1
+#define RING_EVENT_FLAGS_DESC 0x2
+struct vring_packed_desc_event {
+ uint16_t desc_event_off_wrap;
+ uint16_t desc_event_flags;
+};
+
+struct vring_packed {
+ unsigned int num;
+ struct vring_desc_packed *desc_packed;
+ struct vring_packed_desc_event *driver_event;
+ struct vring_packed_desc_event *device_event;
+
+};
+
struct vring {
unsigned int num;
- struct vring_desc *desc;
+ struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
};
@@ -95,10 +121,18 @@ struct vring {
#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
static inline size_t
-vring_size(unsigned int num, unsigned long align)
+vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
{
size_t size;
+ if (vtpci_packed_queue(hw)) {
+ size = num * sizeof(struct vring_desc_packed);
+ size += sizeof(struct vring_packed_desc_event);
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct vring_packed_desc_event);
+ return size;
+ }
+
size = num * sizeof(struct vring_desc);
size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
size = RTE_ALIGN_CEIL(size, align);
@@ -106,17 +140,29 @@ vring_size(unsigned int num, unsigned long align)
(num * sizeof(struct vring_used_elem));
return size;
}
-
static inline void
-vring_init(struct vring *vr, unsigned int num, uint8_t *p,
- unsigned long align)
+vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
+ unsigned int num)
{
vr->num = num;
vr->desc = (struct vring_desc *) p;
vr->avail = (struct vring_avail *) (p +
- num * sizeof(struct vring_desc));
+ vr->num * sizeof(struct vring_desc));
vr->used = (void *)
- RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
+ RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[vr->num]), align);
+}
+
+static inline void
+vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
+ unsigned int num)
+{
+ vr->num = num;
+ vr->desc_packed = (struct vring_desc_packed *)p;
+ vr->driver_event = (struct vring_packed_desc_event *)(p +
+ vr->num * sizeof(struct vring_desc_packed));
+ vr->device_event = (struct vring_packed_desc_event *)
+ RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event +
+ sizeof(struct vring_packed_desc_event)), align);
}
/*
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 26518ed98..90bff7e38 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -161,11 +161,16 @@ struct virtio_pmd_ctrl {
struct vq_desc_extra {
void *cookie;
uint16_t ndescs;
+ uint16_t next;
};
struct virtqueue {
struct virtio_hw *hw; /**< virtio_hw structure pointer. */
struct vring vq_ring; /**< vring keeping desc, used and avail */
+ struct vring_packed ring_packed; /**< vring keeping desc, used and avail */
+ bool avail_wrap_counter;
+ bool used_wrap_counter;
+
/**
* Last consumed descriptor in the used table,
* trails vq_ring.used->idx.
@@ -245,9 +250,21 @@ struct virtio_tx_region {
__attribute__((__aligned__(16)));
};
+static inline void
+vring_desc_init_packed(struct virtqueue *vq, int n)
+{
+ int i;
+ for (i = 0; i < n - 1; i++) {
+ vq->ring_packed.desc_packed[i].index = i;
+ vq->vq_descx[i].next = i + 1;
+ }
+ vq->ring_packed.desc_packed[i].index = i;
+ vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
+}
+
/* Chain all the descriptors in the ring with an END */
static inline void
-vring_desc_init(struct vring_desc *dp, uint16_t n)
+vring_desc_init_split(struct vring_desc *dp, uint16_t n)
{
uint16_t i;
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v10 1/9] net/virtio: vring init for packed queues
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 1/9] net/virtio: vring init for packed queues Jens Freimann
@ 2018-11-02 14:00 ` Tiwei Bie
0 siblings, 0 replies; 13+ messages in thread
From: Tiwei Bie @ 2018-11-02 14:00 UTC (permalink / raw)
To: Jens Freimann; +Cc: dev, maxime.coquelin, Gavin.Hu
On Fri, Nov 02, 2018 at 10:07:41AM +0100, Jens Freimann wrote:
> Add and initialize descriptor data structures.
>
> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
> ---
> drivers/net/virtio/virtio_ethdev.c | 22 +++++++----
> drivers/net/virtio/virtio_pci.h | 7 ++++
> drivers/net/virtio/virtio_ring.h | 60 ++++++++++++++++++++++++++----
> drivers/net/virtio/virtqueue.h | 19 +++++++++-
> 4 files changed, 92 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index 27088e3a6..0f5a3b80f 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -299,19 +299,21 @@ virtio_init_vring(struct virtqueue *vq)
>
> PMD_INIT_FUNC_TRACE();
>
> - /*
> - * Reinitialise since virtio port might have been stopped and restarted
> - */
This comment is supposed to be kept in this version.
> memset(ring_mem, 0, vq->vq_ring_size);
> - vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
> +
> vq->vq_used_cons_idx = 0;
> vq->vq_desc_head_idx = 0;
> vq->vq_avail_idx = 0;
> vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
> vq->vq_free_cnt = vq->vq_nentries;
> memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
> -
> - vring_desc_init(vr->desc, size);
> + if (vtpci_packed_queue(vq->hw)) {
> + vring_init_packed(&vq->ring_packed, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
> + vring_desc_init_packed(vq, size);
> + } else {
> + vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
> + vring_desc_init_split(vr->desc, size);
> + }
>
> /*
> * Disable device(host) interrupting guest
[...]
> diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
> index 9e3c2a015..69ec84d1e 100644
> --- a/drivers/net/virtio/virtio_ring.h
> +++ b/drivers/net/virtio/virtio_ring.h
> @@ -54,9 +54,35 @@ struct vring_used {
> struct vring_used_elem ring[0];
> };
>
> +/* For support of packed virtqueues in Virtio 1.1 the format of descriptors
> + * looks like this.
> + */
> +struct vring_desc_packed {
Please change the name to vring_packed_desc to make the name
consistent with the name in vhost code.
> + uint64_t addr;
> + uint32_t len;
> + uint16_t index;
The `index` is supposed to be changed to `id` in this version.
> + uint16_t flags;
> +} __attribute__ ((aligned(16)));
When defining the structure for vring desc, I think the
__attribute__ ((aligned(16))) isn't needed.
> +
> +#define RING_EVENT_FLAGS_ENABLE 0x0
> +#define RING_EVENT_FLAGS_DISABLE 0x1
> +#define RING_EVENT_FLAGS_DESC 0x2
An empty line is supposed to be added here in this version.
> +struct vring_packed_desc_event {
> + uint16_t desc_event_off_wrap;
> + uint16_t desc_event_flags;
> +};
> +
> +struct vring_packed {
> + unsigned int num;
> + struct vring_desc_packed *desc_packed;
> + struct vring_packed_desc_event *driver_event;
> + struct vring_packed_desc_event *device_event;
> +
> +};
> +
> struct vring {
> unsigned int num;
> - struct vring_desc *desc;
> + struct vring_desc *desc;
Above change is unnecessary.
> struct vring_avail *avail;
> struct vring_used *used;
> };
> @@ -95,10 +121,18 @@ struct vring {
> #define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
>
> static inline size_t
> -vring_size(unsigned int num, unsigned long align)
> +vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
> {
> size_t size;
>
> + if (vtpci_packed_queue(hw)) {
> + size = num * sizeof(struct vring_desc_packed);
> + size += sizeof(struct vring_packed_desc_event);
> + size = RTE_ALIGN_CEIL(size, align);
> + size += sizeof(struct vring_packed_desc_event);
> + return size;
> + }
> +
> size = num * sizeof(struct vring_desc);
> size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
> size = RTE_ALIGN_CEIL(size, align);
> @@ -106,17 +140,29 @@ vring_size(unsigned int num, unsigned long align)
> (num * sizeof(struct vring_used_elem));
> return size;
> }
> -
Why above empty line is removed?
> static inline void
> -vring_init(struct vring *vr, unsigned int num, uint8_t *p,
> - unsigned long align)
> +vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
> + unsigned int num)
Above change to the order of the params is unnecessary.
> {
> vr->num = num;
> vr->desc = (struct vring_desc *) p;
> vr->avail = (struct vring_avail *) (p +
> - num * sizeof(struct vring_desc));
> + vr->num * sizeof(struct vring_desc));
Above change is unnecessary.
> vr->used = (void *)
> - RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
> + RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[vr->num]), align);
Above change is unnecessary.
> +}
> +
> +static inline void
> +vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
> + unsigned int num)
> +{
> + vr->num = num;
> + vr->desc_packed = (struct vring_desc_packed *)p;
> + vr->driver_event = (struct vring_packed_desc_event *)(p +
> + vr->num * sizeof(struct vring_desc_packed));
> + vr->device_event = (struct vring_packed_desc_event *)
> + RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event +
> + sizeof(struct vring_packed_desc_event)), align);
> }
>
> /*
> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
> index 26518ed98..90bff7e38 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -161,11 +161,16 @@ struct virtio_pmd_ctrl {
> struct vq_desc_extra {
> void *cookie;
> uint16_t ndescs;
> + uint16_t next;
> };
>
> struct virtqueue {
> struct virtio_hw *hw; /**< virtio_hw structure pointer. */
> struct vring vq_ring; /**< vring keeping desc, used and avail */
> + struct vring_packed ring_packed; /**< vring keeping desc, used and avail */
The comment should be updated for packed ring.
> + bool avail_wrap_counter;
> + bool used_wrap_counter;
> +
> /**
> * Last consumed descriptor in the used table,
> * trails vq_ring.used->idx.
> @@ -245,9 +250,21 @@ struct virtio_tx_region {
> __attribute__((__aligned__(16)));
> };
>
> +static inline void
> +vring_desc_init_packed(struct virtqueue *vq, int n)
> +{
> + int i;
> + for (i = 0; i < n - 1; i++) {
> + vq->ring_packed.desc_packed[i].index = i;
> + vq->vq_descx[i].next = i + 1;
> + }
> + vq->ring_packed.desc_packed[i].index = i;
> + vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
> +}
> +
> /* Chain all the descriptors in the ring with an END */
> static inline void
> -vring_desc_init(struct vring_desc *dp, uint16_t n)
> +vring_desc_init_split(struct vring_desc *dp, uint16_t n)
> {
> uint16_t i;
>
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v10 2/9] net/virtio: add packed virtqueue defines
2018-11-02 9:07 [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Jens Freimann
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 1/9] net/virtio: vring init for packed queues Jens Freimann
@ 2018-11-02 9:07 ` Jens Freimann
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 3/9] net/virtio: add packed virtqueue helpers Jens Freimann
` (7 subsequent siblings)
9 siblings, 0 replies; 13+ messages in thread
From: Jens Freimann @ 2018-11-02 9:07 UTC (permalink / raw)
To: dev; +Cc: tiwei.bie, maxime.coquelin, Gavin.Hu
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ring.h | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index 69ec84d1e..ec67a85a3 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -15,6 +15,10 @@
#define VRING_DESC_F_WRITE 2
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4
+/* This flag means the descriptor was made available by the driver */
+#define VRING_DESC_F_AVAIL(b) ((uint16_t)(b) << 7)
+/* This flag means the descriptor was used by the device */
+#define VRING_DESC_F_USED(b) ((uint16_t)(b) << 15)
/* The Host uses this in used->flags to advise the Guest: don't kick me
* when you add a buffer. It's unreliable, so it's simply an
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v10 3/9] net/virtio: add packed virtqueue helpers
2018-11-02 9:07 [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Jens Freimann
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 1/9] net/virtio: vring init for packed queues Jens Freimann
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 2/9] net/virtio: add packed virtqueue defines Jens Freimann
@ 2018-11-02 9:07 ` Jens Freimann
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 4/9] net/virtio: dump packed virtqueue data Jens Freimann
` (6 subsequent siblings)
9 siblings, 0 replies; 13+ messages in thread
From: Jens Freimann @ 2018-11-02 9:07 UTC (permalink / raw)
To: dev; +Cc: tiwei.bie, maxime.coquelin, Gavin.Hu
Add helper functions to set/clear and check descriptor flags.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtqueue.h | 25 +++++++++++++++++++++++++
1 file changed, 25 insertions(+)
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 90bff7e38..f84079ba9 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -250,6 +250,31 @@ struct virtio_tx_region {
__attribute__((__aligned__(16)));
};
+static inline void
+_set_desc_avail(struct vring_desc_packed *desc, int wrap_counter)
+{
+ desc->flags |= VRING_DESC_F_AVAIL(wrap_counter) |
+ VRING_DESC_F_USED(!wrap_counter);
+}
+
+static inline void
+set_desc_avail(struct virtqueue *vq, struct vring_desc_packed *desc)
+{
+ _set_desc_avail(desc, vq->avail_wrap_counter);
+}
+
+static inline int
+desc_is_used(struct vring_desc_packed *desc, struct virtqueue *vq)
+{
+ uint16_t used, flags;
+
+ flags = desc->flags;
+ used = !!(flags & VRING_DESC_F_USED(1));
+
+ return used == vq->used_wrap_counter;
+}
+
+
static inline void
vring_desc_init_packed(struct virtqueue *vq, int n)
{
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v10 4/9] net/virtio: dump packed virtqueue data
2018-11-02 9:07 [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Jens Freimann
` (2 preceding siblings ...)
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 3/9] net/virtio: add packed virtqueue helpers Jens Freimann
@ 2018-11-02 9:07 ` Jens Freimann
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 5/9] net/virtio: implement transmit path for packed queues Jens Freimann
` (5 subsequent siblings)
9 siblings, 0 replies; 13+ messages in thread
From: Jens Freimann @ 2018-11-02 9:07 UTC (permalink / raw)
To: dev; +Cc: tiwei.bie, maxime.coquelin, Gavin.Hu
Add support to dump packed virtqueue data to the
VIRTQUEUE_DUMP() macro.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/net/virtio/virtqueue.h | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index f84079ba9..769815aa7 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -397,6 +397,15 @@ virtqueue_notify(struct virtqueue *vq)
uint16_t used_idx, nused; \
used_idx = (vq)->vq_ring.used->idx; \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ if (vtpci_packed_queue((vq)->hw)) { \
+ PMD_INIT_LOG(DEBUG, \
+ "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
+ " avail_wrap_counter=%d; used_wrap_counter=%d", \
+ (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
+ (vq)->vq_avail_idx, (vq)->avail_wrap_counter, \
+ (vq)->used_wrap_counter); \
+ break; \
+ } \
PMD_INIT_LOG(DEBUG, \
"VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
" avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v10 5/9] net/virtio: implement transmit path for packed queues
2018-11-02 9:07 [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Jens Freimann
` (3 preceding siblings ...)
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 4/9] net/virtio: dump packed virtqueue data Jens Freimann
@ 2018-11-02 9:07 ` Jens Freimann
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 6/9] net/virtio: implement receive " Jens Freimann
` (4 subsequent siblings)
9 siblings, 0 replies; 13+ messages in thread
From: Jens Freimann @ 2018-11-02 9:07 UTC (permalink / raw)
To: dev; +Cc: tiwei.bie, maxime.coquelin, Gavin.Hu
This implements the transmit path for devices with
support for packed virtqueues.
Signed-off-by: Jens Freiman <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 50 +++--
drivers/net/virtio/virtio_ethdev.h | 2 +
drivers/net/virtio/virtio_rxtx.c | 286 +++++++++++++++++++++++++++++
drivers/net/virtio/virtqueue.h | 16 +-
4 files changed, 334 insertions(+), 20 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 0f5a3b80f..f59f5ea8a 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -494,16 +494,22 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
memset(txr, 0, vq_size * sizeof(*txr));
for (i = 0; i < vq_size; i++) {
struct vring_desc *start_dp = txr[i].tx_indir;
-
- vring_desc_init_split(start_dp, RTE_DIM(txr[i].tx_indir));
-
+ struct vring_desc_packed *start_dp_packed = txr[i].tx_indir_pq;
+
/* first indirect descriptor is always the tx header */
- start_dp->addr = txvq->virtio_net_hdr_mem
- + i * sizeof(*txr)
- + offsetof(struct virtio_tx_region, tx_hdr);
-
- start_dp->len = hw->vtnet_hdr_size;
- start_dp->flags = VRING_DESC_F_NEXT;
+ if (vtpci_packed_queue(hw)) {
+ start_dp_packed->addr = txvq->virtio_net_hdr_mem
+ + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region, tx_hdr);
+ start_dp_packed->len = hw->vtnet_hdr_size;
+ } else {
+ vring_desc_init_split(start_dp, RTE_DIM(txr[i].tx_indir));
+ start_dp->addr = txvq->virtio_net_hdr_mem
+ + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region, tx_hdr);
+ start_dp->len = hw->vtnet_hdr_size;
+ start_dp->flags = VRING_DESC_F_NEXT;
+ }
}
}
@@ -1328,6 +1334,22 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
+ if (vtpci_packed_queue(hw)) {
+ PMD_INIT_LOG(INFO, "virtio: using virtio 1.1 Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
+ } else {
+ if (hw->use_inorder_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts;
+ }
+ }
+
if (hw->use_simple_rx) {
PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
eth_dev->data->port_id);
@@ -1348,15 +1370,7 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
}
- if (hw->use_inorder_tx) {
- PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
- eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
- } else {
- PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
- eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts;
- }
+
}
/* Only support 1:1 queue/interrupt mapping so far.
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index e0f80e5a4..05d355180 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -82,6 +82,8 @@ uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index eb891433e..2d13a3ad0 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -88,6 +88,44 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
dp->next = VQ_RING_DESC_CHAIN_END;
}
+void
+vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx)
+{
+ struct vring_desc_packed *dp;
+ struct vq_desc_extra *dxp, *dxp_tail;
+ uint16_t desc_idx_last, desc_idx;
+ int i = 0;
+
+ dp = &vq->ring_packed.desc_packed[used_idx];
+ dxp = &vq->vq_descx[dp->index];
+ desc_idx = desc_idx_last = dp->index;
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+ if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+ while (dxp->next != VQ_RING_DESC_CHAIN_END && i < dxp->ndescs) {
+ desc_idx_last = dxp->next;
+ dxp = &vq->vq_descx[dxp->next];
+ used_idx += dxp->ndescs;
+ dp = &vq->ring_packed.desc_packed[used_idx];
+ i++;
+ }
+ }
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+ vq->vq_desc_head_idx = desc_idx;
+ } else {
+ dxp_tail = &vq->vq_descx[vq->vq_desc_tail_idx];
+ dxp_tail->next = desc_idx;
+ }
+
+ vq->vq_desc_tail_idx = desc_idx_last;
+ dxp->next = VQ_RING_DESC_CHAIN_END;
+}
+
static uint16_t
virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
uint32_t *len, uint16_t num)
@@ -165,6 +203,34 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
#endif
/* Cleanup from completed transmits. */
+static void
+virtio_xmit_cleanup_packed(struct virtqueue *vq, int num)
+{
+ uint16_t used_idx, id;
+ uint16_t size = vq->vq_nentries;
+ struct vring_desc_packed *desc = vq->ring_packed.desc_packed;
+ struct vq_desc_extra *dxp;
+
+ used_idx = vq->vq_used_cons_idx;
+ while (num && desc_is_used(&desc[used_idx], vq)) {
+ num --;
+ used_idx = vq->vq_used_cons_idx;
+ id = desc[used_idx].index;
+ dxp = &vq->vq_descx[id];
+ vq->vq_used_cons_idx += dxp->ndescs;
+ if (vq->vq_used_cons_idx >= size) {
+ vq->vq_used_cons_idx -= size;
+ vq->used_wrap_counter ^= 1;
+ }
+ vq_ring_free_chain_packed(vq, used_idx);
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ used_idx = vq->vq_used_cons_idx;
+ }
+}
+
static void
virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
{
@@ -456,6 +522,134 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
}
+static inline void
+virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
+ uint16_t needed, int use_indirect, int can_push,
+ int in_order)
+{
+ struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+ struct vq_desc_extra *dxp, *head_dxp;
+ struct virtqueue *vq = txvq->vq;
+ struct vring_desc_packed *start_dp, *head_dp;
+ uint16_t seg_num = cookie->nb_segs;
+ uint16_t idx, head_id, head_flags;
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
+ struct virtio_net_hdr *hdr;
+ uint16_t prev;
+
+ head_id = vq->vq_desc_head_idx;
+ idx = vq->vq_avail_idx;
+ prev = idx;
+ start_dp = vq->ring_packed.desc_packed;
+ dxp = &vq->vq_descx[idx];
+ dxp->ndescs = needed;
+
+ head_dp = &vq->ring_packed.desc_packed[idx];
+ head_dxp = &vq->vq_descx[head_id];
+ head_dxp->cookie = (void *) cookie;
+ head_flags = cookie->next ? VRING_DESC_F_NEXT: 0;
+ head_flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+
+ if (can_push) {
+ /* prepend cannot fail, checked by caller */
+ hdr = (struct virtio_net_hdr *)
+ rte_pktmbuf_prepend(cookie, head_size);
+ /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
+ * which is wrong. Below subtract restores correct pkt size.
+ */
+ cookie->pkt_len -= head_size;
+
+ /* if offload disabled, it is not zeroed below, do it now */
+ if (!vq->hw->has_tx_offload) {
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+ } else if (use_indirect) {
+ /* setup tx ring slot to point to indirect
+ * descriptor list stored in reserved region.
+ *
+ * the first slot in indirect ring is already preset
+ * to point to the header in reserved region
+ */
+ start_dp[idx].addr = txvq->virtio_net_hdr_mem +
+ RTE_PTR_DIFF(&txr[idx].tx_indir_pq, txr);
+ start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc_packed);
+ start_dp[idx].flags = VRING_DESC_F_INDIRECT;
+ hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+
+ /* loop below will fill in rest of the indirect elements */
+ start_dp = txr[idx].tx_indir_pq;
+ idx = 1;
+ } else {
+ /* setup first tx ring slot to point to header
+ * stored in reserved region.
+ */
+ start_dp[idx].addr = txvq->virtio_net_hdr_mem +
+ RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+ start_dp[idx].len = vq->hw->vtnet_hdr_size;
+ hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+ idx = dxp->next;
+ }
+
+ virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+
+ do {
+ uint16_t flags;
+ if (idx >= vq->vq_nentries) {
+ idx = 0;
+ vq->avail_wrap_counter ^= 1;
+ }
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+ start_dp[idx].len = cookie->data_len;
+ if (likely(idx != head_id)) {
+ flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+ flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ start_dp[idx].flags = flags;
+ }
+ if (use_indirect) {
+ if (++idx >= (seg_num + 1))
+ break;
+ } else {
+ dxp = &vq->vq_descx[idx];
+ if (idx == (vq->vq_nentries -1) && dxp->next == 0)
+ vq->avail_wrap_counter ^= 1;
+ prev = idx;
+ idx = dxp->next;
+ }
+ } while ((cookie = cookie->next) != NULL);
+
+ start_dp[prev].index = head_id;
+
+ if (use_indirect)
+ idx = vq->ring_packed.desc_packed[head_id].index;
+
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+
+ if (!in_order) {
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = idx;
+ }
+ vq->vq_avail_idx += needed;
+ if (idx==VQ_RING_DESC_CHAIN_END)
+ idx = vq->vq_avail_idx;
+ if (vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ idx = vq->vq_avail_idx;
+ vq->avail_wrap_counter ^= 1;
+ }
+ vq->vq_desc_head_idx = idx;
+
+ rte_smp_wmb();
+ head_dp->flags = head_flags;
+}
+
+
static inline void
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t needed, int use_indirect, int can_push,
@@ -1346,6 +1540,98 @@ virtio_recv_mergeable_pkts(void *rx_queue,
return nb_rx;
}
+uint16_t
+virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct virtnet_tx *txvq = tx_queue;
+ struct virtqueue *vq = txvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ uint16_t hdr_size = hw->vtnet_hdr_size;
+ uint16_t nb_tx = 0;
+ int error;
+ int nb_clean;
+
+ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
+ return nb_tx;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+
+ virtio_rmb();
+ nb_clean = nb_pkts <= vq->vq_free_cnt ? 0: nb_pkts - vq->vq_free_cnt;
+ if (nb_clean)
+ virtio_xmit_cleanup_packed(vq, nb_clean);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ int can_push = 0, use_indirect = 0, slots, need;
+
+ /* Do VLAN tag insertion */
+ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&txm);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(txm);
+ continue;
+ }
+ }
+
+ /* optimize ring usage */
+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ rte_mbuf_refcnt_read(txm) == 1 &&
+ RTE_MBUF_DIRECT(txm) &&
+ txm->nb_segs == 1 &&
+ rte_pktmbuf_headroom(txm) >= hdr_size &&
+ rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
+ __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
+ can_push = 1;
+ else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+ txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
+ use_indirect = 1;
+
+ /* How many main ring entries are needed to this Tx?
+ * any_layout => number of segments
+ * indirect => 1
+ * default => number of segments + 1
+ */
+ slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
+ need = slots - vq->vq_free_cnt;
+
+ /* Positive value indicates it need free vring descriptors */
+ if (unlikely(need > 0)) {
+ virtio_rmb();
+ need = RTE_MIN(need, (int)nb_pkts);
+ virtio_xmit_cleanup_packed(vq, need);
+ need = slots - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ break;
+ }
+ }
+
+ /* Enqueue Packet buffers */
+ virtqueue_enqueue_xmit_packed(txvq, txm, slots, use_indirect,
+ can_push, 0);
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ }
+
+ txvq->stats.packets += nb_tx;
+
+ if (likely(nb_tx)) {
+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
+ virtqueue_notify(vq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
+
uint16_t
virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 769815aa7..2432b1f3b 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -246,8 +246,12 @@ struct virtio_net_hdr_mrg_rxbuf {
#define VIRTIO_MAX_TX_INDIRECT 8
struct virtio_tx_region {
struct virtio_net_hdr_mrg_rxbuf tx_hdr;
- struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
- __attribute__((__aligned__(16)));
+ union {
+ struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
+ __attribute__((__aligned__(16)));
+ struct vring_desc_packed tx_indir_pq[VIRTIO_MAX_TX_INDIRECT]
+ __attribute__((__aligned__(16)));
+ };
};
static inline void
@@ -348,6 +352,7 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx);
void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
uint16_t num);
@@ -381,6 +386,13 @@ virtqueue_kick_prepare(struct virtqueue *vq)
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
+static inline int
+virtqueue_kick_prepare_packed(struct virtqueue *vq)
+{
+ return vq->ring_packed.device_event->desc_event_flags !=
+ RING_EVENT_FLAGS_DISABLE;
+}
+
static inline void
virtqueue_notify(struct virtqueue *vq)
{
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v10 6/9] net/virtio: implement receive path for packed queues
2018-11-02 9:07 [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Jens Freimann
` (4 preceding siblings ...)
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 5/9] net/virtio: implement transmit path for packed queues Jens Freimann
@ 2018-11-02 9:07 ` Jens Freimann
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 7/9] net/virtio: add virtio send command packed queue support Jens Freimann
` (3 subsequent siblings)
9 siblings, 0 replies; 13+ messages in thread
From: Jens Freimann @ 2018-11-02 9:07 UTC (permalink / raw)
To: dev; +Cc: tiwei.bie, maxime.coquelin, Gavin.Hu
Implement the receive part.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 52 +++---
drivers/net/virtio/virtio_ethdev.h | 2 +
drivers/net/virtio/virtio_rxtx.c | 275 ++++++++++++++++++++++++++---
drivers/net/virtio/virtqueue.c | 22 +++
drivers/net/virtio/virtqueue.h | 2 +-
5 files changed, 310 insertions(+), 43 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index f59f5ea8a..ed3d5bd2c 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1350,27 +1350,33 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
}
}
- if (hw->use_simple_rx) {
- PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
- } else if (hw->use_inorder_rx) {
- PMD_INIT_LOG(INFO,
- "virtio: using inorder mergeable buffer Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder;
- } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- PMD_INIT_LOG(INFO,
- "virtio: using mergeable buffer Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
- } else {
- PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ if (vtpci_packed_queue(hw)) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ } else {
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
+ }
+ } else {
+ if (hw->use_simple_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (hw->use_inorder_rx) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using inorder mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ }
}
-
-
}
/* Only support 1:1 queue/interrupt mapping so far.
@@ -1502,7 +1508,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
- vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
@@ -1927,7 +1934,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
rte_spinlock_init(&hw->state_lock);
- hw->use_simple_rx = 1;
+ if (!vtpci_packed_queue(hw))
+ hw->use_simple_rx = 1;
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
hw->use_inorder_tx = 1;
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 05d355180..6c9247639 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -73,6 +73,8 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 2d13a3ad0..dbeacf776 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -31,6 +31,7 @@
#include "virtqueue.h"
#include "virtio_rxtx.h"
#include "virtio_rxtx_simple.h"
+#include "virtio_ring.h"
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@@ -126,6 +127,47 @@ vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx)
dxp->next = VQ_RING_DESC_CHAIN_END;
}
+static uint16_t
+virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t *len,
+ uint16_t num)
+{
+ struct rte_mbuf *cookie;
+ uint16_t used_idx;
+ uint16_t id;
+ struct vring_desc_packed *desc;
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ used_idx = vq->vq_used_cons_idx;
+ desc = (struct vring_desc_packed *) vq->ring_packed.desc_packed;
+ if (!desc_is_used(&desc[used_idx], vq))
+ return i;
+ len[i] = desc[used_idx].len;
+ id = desc[used_idx].index;
+ cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
+
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+ rte_prefetch0(cookie);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+ rx_pkts[i] = cookie;
+ vq_ring_free_chain_packed(vq, used_idx);
+
+ vq->vq_used_cons_idx += vq->vq_descx[id].ndescs;
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->used_wrap_counter ^= 1;
+ }
+ }
+
+ return i;
+}
+
static uint16_t
virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
uint32_t *len, uint16_t num)
@@ -372,6 +414,54 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
return 0;
}
+static inline int
+virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, struct rte_mbuf *cookie)
+{
+ struct vq_desc_extra *dxp;
+ struct virtio_hw *hw = vq->hw;
+ struct vring_desc_packed *start_dp;
+ uint16_t needed = 1;
+ uint16_t idx;
+ uint16_t flags;
+
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+
+ idx = vq->vq_desc_head_idx;
+ if (unlikely(idx >= vq->vq_nentries))
+ return -EFAULT;
+
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookie;
+ dxp->ndescs = needed;
+
+ start_dp = vq->ring_packed.desc_packed;
+ start_dp[vq->vq_avail_idx].addr =
+ VIRTIO_MBUF_ADDR(cookie, vq) +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ start_dp[vq->vq_avail_idx].len =
+ cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+ flags = VRING_DESC_F_WRITE;
+ flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ rte_smp_wmb();
+ start_dp[vq->vq_avail_idx].flags = flags;
+ idx = dxp->next;
+ vq->vq_desc_head_idx = idx;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = idx;
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+
+ vq->vq_avail_idx += needed;
+ if (vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ }
+
+ return 0;
+}
/* When doing TSO, the IP length is not included in the pseudo header
* checksum of the packet given to the PMD, but for virtio it is
* expected.
@@ -850,7 +940,10 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
break;
/* Enqueue allocated buffers */
- error = virtqueue_enqueue_recv_refill(vq, m);
+ if (vtpci_packed_queue(vq->hw))
+ error = virtqueue_enqueue_recv_refill_packed(vq, m);
+ else
+ error = virtqueue_enqueue_recv_refill(vq, m);
if (error) {
rte_pktmbuf_free(m);
break;
@@ -858,7 +951,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
nbufs++;
}
- vq_update_avail_idx(vq);
+ if (!vtpci_packed_queue(vq->hw))
+ vq_update_avail_idx(vq);
}
PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
@@ -1182,6 +1276,109 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return nb_rx;
}
+uint16_t
+virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm, *new_mbuf;
+ uint16_t nb_used, num, nb_rx;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ int error;
+ uint32_t i, nb_enqueued;
+ uint32_t hdr_size;
+ struct virtio_net_hdr *hdr;
+
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+ nb_used = VIRTIO_MBUF_BURST_SZ;
+
+ virtio_rmb();
+
+ num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
+ if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
+ num = VIRTIO_MBUF_BURST_SZ;
+ if (likely(num > DESC_PER_CACHELINE))
+ num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+
+ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
+ PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
+
+ nb_enqueued = 0;
+ hdr_size = hw->vtnet_hdr_size;
+
+ for (i = 0; i < num ; i++) {
+ rxm = rcv_pkts[i];
+
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ rxm->port = rxvq->port_id;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
+ RTE_PKTMBUF_HEADROOM - hdr_size);
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rxm);
+
+ if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ virtio_rx_stats_updated(rxvq, rxm);
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+ while (likely(!virtqueue_full(vq))) {
+ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
+ if (unlikely(new_mbuf == NULL)) {
+ struct rte_eth_dev *dev
+ = &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ error = virtqueue_enqueue_recv_refill_packed(vq, new_mbuf);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(new_mbuf);
+ break;
+ }
+ nb_enqueued++;
+ }
+
+ if (likely(nb_enqueued)) {
+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
+
uint16_t
virtio_recv_mergeable_pkts_inorder(void *rx_queue,
struct rte_mbuf **rx_pkts,
@@ -1388,12 +1585,20 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint16_t extra_idx;
uint32_t seg_res;
uint32_t hdr_size;
+ uint32_t rx_num = 0;
nb_rx = 0;
if (unlikely(hw->started == 0))
return nb_rx;
- nb_used = VIRTQUEUE_NUSED(vq);
+ if (vtpci_packed_queue(vq->hw)) {
+ if (!desc_is_used(&vq->ring_packed.desc_packed[vq->vq_used_cons_idx],
+ vq))
+ return 0;
+ nb_used = VIRTIO_MBUF_BURST_SZ;
+ } else {
+ nb_used = VIRTQUEUE_NUSED(vq);
+ }
virtio_rmb();
@@ -1412,7 +1617,13 @@ virtio_recv_mergeable_pkts(void *rx_queue,
if (nb_rx == nb_pkts)
break;
- num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
+ if (vtpci_packed_queue(vq->hw))
+ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
+ len, 1);
+ else
+ num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
+ if (num == 0)
+ return nb_rx;
if (num != 1)
continue;
@@ -1464,20 +1675,37 @@ virtio_recv_mergeable_pkts(void *rx_queue,
*/
uint16_t rcv_cnt =
RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
- if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
- uint32_t rx_num =
- virtqueue_dequeue_burst_rx(vq,
- rcv_pkts, len, rcv_cnt);
- i += rx_num;
- rcv_cnt = rx_num;
+ if (vtpci_packed_queue(vq->hw)) {
+ if (likely(vq->vq_free_cnt >= rcv_cnt)) {
+ if (!desc_is_used(&vq->ring_packed.desc_packed[vq->vq_used_cons_idx],
+ vq))
+ rx_num = 0;
+ else
+ rx_num = virtqueue_dequeue_burst_rx_packed(vq,
+ rcv_pkts, len, rcv_cnt);
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ break;
+ }
} else {
- PMD_RX_LOG(ERR,
- "No enough segments for packet.");
- nb_enqueued++;
- virtio_discard_rxbuf(vq, rxm);
- rxvq->stats.errors++;
- break;
+ if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ rx_num = virtqueue_dequeue_burst_rx(vq,
+ rcv_pkts, len, rcv_cnt);
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ break;
+ }
}
+ i += rx_num;
+ rcv_cnt = rx_num;
extra_idx = 0;
@@ -1520,7 +1748,10 @@ virtio_recv_mergeable_pkts(void *rx_queue,
dev->data->rx_mbuf_alloc_failed++;
break;
}
- error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
+ if (vtpci_packed_queue(vq->hw))
+ error = virtqueue_enqueue_recv_refill_packed(vq, new_mbuf);
+ else
+ error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
if (unlikely(error)) {
rte_pktmbuf_free(new_mbuf);
break;
@@ -1529,9 +1760,13 @@ virtio_recv_mergeable_pkts(void *rx_queue,
}
if (likely(nb_enqueued)) {
- vq_update_avail_idx(vq);
-
- if (unlikely(virtqueue_kick_prepare(vq))) {
+ if (likely(!vtpci_packed_queue(vq->hw))) {
+ vq_update_avail_idx(vq);
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ } else if (virtqueue_kick_prepare_packed(vq)) {
virtqueue_notify(vq);
PMD_RX_LOG(DEBUG, "Notified");
}
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 56a77cc71..d24783432 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -58,12 +58,34 @@ virtqueue_detach_unused(struct virtqueue *vq)
void
virtqueue_rxvq_flush(struct virtqueue *vq)
{
+ struct vring_desc_packed *descs = vq->ring_packed.desc_packed;
struct virtnet_rx *rxq = &vq->rxq;
struct virtio_hw *hw = vq->hw;
struct vring_used_elem *uep;
struct vq_desc_extra *dxp;
uint16_t used_idx, desc_idx;
uint16_t nb_used, i;
+ uint16_t size = vq->vq_nentries;
+
+ if (vtpci_packed_queue(vq->hw)) {
+ i = vq->vq_used_cons_idx;
+ if (i > size) {
+ PMD_INIT_LOG(ERR, "vq_used_cons_idx out of range, %d",
+ vq->vq_used_cons_idx);
+ return;
+ }
+ while (desc_is_used(&descs[i], vq) && i < size) {
+ dxp = &vq->vq_descx[descs[i].index];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq_ring_free_chain_packed(vq, i);
+ i = dxp->next;
+ vq->vq_used_cons_idx++;
+ }
+ return;
+ }
nb_used = VIRTQUEUE_NUSED(vq);
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 2432b1f3b..31afc4862 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -352,7 +352,7 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
-void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t desc_idx);
+void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
uint16_t num);
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v10 7/9] net/virtio: add virtio send command packed queue support
2018-11-02 9:07 [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Jens Freimann
` (5 preceding siblings ...)
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 6/9] net/virtio: implement receive " Jens Freimann
@ 2018-11-02 9:07 ` Jens Freimann
2018-11-28 17:25 ` Maxime Coquelin
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 8/9] net/virtio: enable packed virtqueues by default Jens Freimann
` (2 subsequent siblings)
9 siblings, 1 reply; 13+ messages in thread
From: Jens Freimann @ 2018-11-02 9:07 UTC (permalink / raw)
To: dev; +Cc: tiwei.bie, maxime.coquelin, Gavin.Hu
Use packed virtqueue format when reading and writing descriptors
to/from the ring.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 90 ++++++++++++++++++++++++++++++
1 file changed, 90 insertions(+)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index ed3d5bd2c..4451dd49b 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -141,6 +141,90 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
+static struct virtio_pmd_ctrl *
+virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
+{
+ struct virtqueue *vq = cvq->vq;
+ int head;
+ struct vring_desc_packed *desc = vq->ring_packed.desc_packed;
+ struct virtio_pmd_ctrl *result;
+ int wrap_counter;
+ int sum = 0;
+ int k;
+
+ /*
+ * Format is enforced in qemu code:
+ * One TX packet for header;
+ * At least one TX packet per argument;
+ * One RX packet for ACK.
+ */
+ head = vq->vq_avail_idx;
+ wrap_counter = vq->avail_wrap_counter;
+ desc[head].flags = VRING_DESC_F_NEXT;
+ desc[head].addr = cvq->virtio_net_hdr_mem;
+ desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_free_cnt--;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ }
+
+ for (k = 0; k < pkt_num; k++) {
+ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr)
+ + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
+ desc[vq->vq_avail_idx].len = dlen[k];
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT;
+ sum += dlen[k];
+ vq->vq_free_cnt--;
+ _set_desc_avail(&desc[vq->vq_avail_idx],
+ vq->avail_wrap_counter);
+ rte_smp_wmb();
+ vq->vq_free_cnt--;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ }
+ }
+
+
+ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr);
+ desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE;
+ _set_desc_avail(&desc[vq->vq_avail_idx],
+ vq->avail_wrap_counter);
+ _set_desc_avail(&desc[head], wrap_counter);
+ rte_smp_wmb();
+
+ vq->vq_free_cnt--;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ }
+
+ virtqueue_notify(vq);
+
+ /* wait for used descriptors in virtqueue */
+ do {
+ rte_rmb();
+ usleep(100);
+ } while (!desc_is_used(&desc[head], vq));
+
+ /* now get used descriptors */
+ while(desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
+ vq->vq_free_cnt++;
+ if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->used_wrap_counter ^= 1;
+ }
+ }
+
+ result = cvq->virtio_net_hdr_mz->addr;
+ return result;
+}
+
static int
virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
int *dlen, int pkt_num)
@@ -174,6 +258,11 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
sizeof(struct virtio_pmd_ctrl));
+ if (vtpci_packed_queue(vq->hw)) {
+ result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
+ goto out_unlock;
+ }
+
/*
* Format is enforced in qemu code:
* One TX packet for header;
@@ -245,6 +334,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
result = cvq->virtio_net_hdr_mz->addr;
+out_unlock:
rte_spinlock_unlock(&cvq->lock);
return result->status;
}
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v10 7/9] net/virtio: add virtio send command packed queue support
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 7/9] net/virtio: add virtio send command packed queue support Jens Freimann
@ 2018-11-28 17:25 ` Maxime Coquelin
0 siblings, 0 replies; 13+ messages in thread
From: Maxime Coquelin @ 2018-11-28 17:25 UTC (permalink / raw)
To: Jens Freimann, dev; +Cc: tiwei.bie, Gavin.Hu
On 11/2/18 10:07 AM, Jens Freimann wrote:
> Use packed virtqueue format when reading and writing descriptors
> to/from the ring.
>
> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
> ---
> drivers/net/virtio/virtio_ethdev.c | 90 ++++++++++++++++++++++++++++++
> 1 file changed, 90 insertions(+)
>
> diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
> index ed3d5bd2c..4451dd49b 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -141,6 +141,90 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
>
> struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
>
> +static struct virtio_pmd_ctrl *
> +virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
> + int *dlen, int pkt_num)
> +{
> + struct virtqueue *vq = cvq->vq;
> + int head;
> + struct vring_desc_packed *desc = vq->ring_packed.desc_packed;
> + struct virtio_pmd_ctrl *result;
> + int wrap_counter;
> + int sum = 0;
> + int k;
> +
> + /*
> + * Format is enforced in qemu code:
> + * One TX packet for header;
> + * At least one TX packet per argument;
> + * One RX packet for ACK.
> + */
> + head = vq->vq_avail_idx;
> + wrap_counter = vq->avail_wrap_counter;
> + desc[head].flags = VRING_DESC_F_NEXT;
> + desc[head].addr = cvq->virtio_net_hdr_mem;
> + desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
> + vq->vq_free_cnt--;
> + if (++vq->vq_avail_idx >= vq->vq_nentries) {
> + vq->vq_avail_idx -= vq->vq_nentries;
> + vq->avail_wrap_counter ^= 1;
> + }
> +
> + for (k = 0; k < pkt_num; k++) {
> + desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
> + + sizeof(struct virtio_net_ctrl_hdr)
> + + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
> + desc[vq->vq_avail_idx].len = dlen[k];
> + desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT;
> + sum += dlen[k];
> + vq->vq_free_cnt--;
> + _set_desc_avail(&desc[vq->vq_avail_idx],
> + vq->avail_wrap_counter);
> + rte_smp_wmb();
> + vq->vq_free_cnt--;
> + if (++vq->vq_avail_idx >= vq->vq_nentries) {
> + vq->vq_avail_idx -= vq->vq_nentries;
> + vq->avail_wrap_counter ^= 1;
> + }
> + }
> +
> +
> + desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
> + + sizeof(struct virtio_net_ctrl_hdr);
> + desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
> + desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE;
> + _set_desc_avail(&desc[vq->vq_avail_idx],
> + vq->avail_wrap_counter);
> + _set_desc_avail(&desc[head], wrap_counter);
> + rte_smp_wmb();
> +
> + vq->vq_free_cnt--;
> + if (++vq->vq_avail_idx >= vq->vq_nentries) {
> + vq->vq_avail_idx -= vq->vq_nentries;
> + vq->avail_wrap_counter ^= 1;
> + }
> +
> + virtqueue_notify(vq);
> +
> + /* wait for used descriptors in virtqueue */
> + do {
> + rte_rmb();
> + usleep(100);
> + } while (!desc_is_used(&desc[head], vq));
Maybe better to poll for !desc_is_used(&desc[vq->vq_used_cons_idx], vq))
to make it clear we're safe using used_wrap_counter?
> +
> + /* now get used descriptors */
> + while(desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
> + vq->vq_free_cnt++;
> + if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
> + vq->vq_used_cons_idx -= vq->vq_nentries;
> + vq->used_wrap_counter ^= 1;
> + }
> + }
> +
> + result = cvq->virtio_net_hdr_mz->addr;
> + return result;
> +}
> +
> static int
> virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
> int *dlen, int pkt_num)
> @@ -174,6 +258,11 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
> memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
> sizeof(struct virtio_pmd_ctrl));
>
> + if (vtpci_packed_queue(vq->hw)) {
> + result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
> + goto out_unlock;
> + }
> +
> /*
> * Format is enforced in qemu code:
> * One TX packet for header;
> @@ -245,6 +334,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
>
> result = cvq->virtio_net_hdr_mz->addr;
>
> +out_unlock:
> rte_spinlock_unlock(&cvq->lock);
> return result->status;
> }
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v10 8/9] net/virtio: enable packed virtqueues by default
2018-11-02 9:07 [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Jens Freimann
` (6 preceding siblings ...)
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 7/9] net/virtio: add virtio send command packed queue support Jens Freimann
@ 2018-11-02 9:07 ` Jens Freimann
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 9/9] net/virtio-user: disable packed virtqueues when CTRL_VQ enabled Jens Freimann
2018-11-02 14:02 ` [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Tiwei Bie
9 siblings, 0 replies; 13+ messages in thread
From: Jens Freimann @ 2018-11-02 9:07 UTC (permalink / raw)
To: dev; +Cc: tiwei.bie, maxime.coquelin, Gavin.Hu
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.h | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 6c9247639..d9b4feee2 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -34,6 +34,7 @@
1u << VIRTIO_RING_F_INDIRECT_DESC | \
1ULL << VIRTIO_F_VERSION_1 | \
1ULL << VIRTIO_F_IN_ORDER | \
+ 1ULL << VIRTIO_F_RING_PACKED | \
1ULL << VIRTIO_F_IOMMU_PLATFORM)
#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* [dpdk-dev] [PATCH v10 9/9] net/virtio-user: disable packed virtqueues when CTRL_VQ enabled
2018-11-02 9:07 [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Jens Freimann
` (7 preceding siblings ...)
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 8/9] net/virtio: enable packed virtqueues by default Jens Freimann
@ 2018-11-02 9:07 ` Jens Freimann
2018-11-02 14:02 ` [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Tiwei Bie
9 siblings, 0 replies; 13+ messages in thread
From: Jens Freimann @ 2018-11-02 9:07 UTC (permalink / raw)
To: dev; +Cc: tiwei.bie, maxime.coquelin, Gavin.Hu
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_user/virtio_user_dev.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 20816c936..3105befb2 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -474,6 +474,7 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
* so if necessary, we just claim to support CQ
*/
dev->frontend_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+ dev->unsupported_features |= (1ULL << VIRTIO_F_RING_PACKED);
} else {
dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
/* Also disable features that depend on VIRTIO_NET_F_CTRL_VQ */
--
2.17.1
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues
2018-11-02 9:07 [dpdk-dev] [PATCH v10 0/9] implement packed virtqueues Jens Freimann
` (8 preceding siblings ...)
2018-11-02 9:07 ` [dpdk-dev] [PATCH v10 9/9] net/virtio-user: disable packed virtqueues when CTRL_VQ enabled Jens Freimann
@ 2018-11-02 14:02 ` Tiwei Bie
9 siblings, 0 replies; 13+ messages in thread
From: Tiwei Bie @ 2018-11-02 14:02 UTC (permalink / raw)
To: Jens Freimann; +Cc: dev, maxime.coquelin, Gavin.Hu
On Fri, Nov 02, 2018 at 10:07:40AM +0100, Jens Freimann wrote:
> This is a basic implementation of packed virtqueues as specified in the
> Virtio 1.1 draft. A compiled version of the current draft is available
> at https://github.com/oasis-tcs/virtio-docs.git (or as .pdf at
> https://github.com/oasis-tcs/virtio-docs/blob/master/virtio-v1.1-packed-wd10.pdf
>
> A packed virtqueue is different from a split virtqueue in that it
> consists of only a single descriptor ring that replaces available and
> used ring, index and descriptor pointers.
>
> Each descriptor is readable and writable and has a flags field. These flags
> will mark if a descriptor is available or used. To detect new available descriptors
> even after the ring has wrapped, device and driver each have a
> single-bit wrap counter that is flipped from 0 to 1 and vice versa every time
> the last descriptor in the ring is used/made available.
Do you have the performance numbers which can show the
performance improvements in driver and vhost?
Thanks
>
> v9-v10:
> * don't mix index into buffer list and descriptors
> * whitespace and formatting issues
> * remove "VQ:" in dump virtqueue patch
> * add extra packed vring struct to virtqueue and change function
> prototypes and code accordingly
> * move wrap_counters to virtqueue
> * make if-conditions for packed and !packed more clear in
> set_rxtx_funcs()
> * initialize wrap counters in first patch, instead of rx and tx
> implementation patch
> * make virtio-user not supported with packed virtqueues, to
> be fixed in other patch set?
>
> v8-v9:
> * fix virtio_ring_free_chain_packed() to handle descriptors
> correctly in case of out-of-order
> * fix check in virtqueue_xmit_cleanup_packed() to improve performance
>
> v7->v8:
> * move desc_is_used change to correct patch
> * remove trailing newline
> * correct xmit code, flags update and memory barrier
> * move packed desc init to dedicated function, split
> and packed variant
>
>
> Jens Freimann (9):
> net/virtio: vring init for packed queues
> net/virtio: add packed virtqueue defines
> net/virtio: add packed virtqueue helpers
> net/virtio: dump packed virtqueue data
> net/virtio: implement transmit path for packed queues
> net/virtio: implement receive path for packed queues
> net/virtio: add virtio send command packed queue support
> net/virtio: enable packed virtqueues by default
> net/virtio-user: disable packed virtqueues when CTRL_VQ enabled
>
> drivers/net/virtio/virtio_ethdev.c | 202 +++++--
> drivers/net/virtio/virtio_ethdev.h | 5 +
> drivers/net/virtio/virtio_pci.h | 7 +
> drivers/net/virtio/virtio_ring.h | 64 +-
> drivers/net/virtio/virtio_rxtx.c | 561 +++++++++++++++++-
> .../net/virtio/virtio_user/virtio_user_dev.c | 1 +
> drivers/net/virtio/virtqueue.c | 22 +
> drivers/net/virtio/virtqueue.h | 69 ++-
> 8 files changed, 859 insertions(+), 72 deletions(-)
>
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 13+ messages in thread