From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com,
eperezma@redhat.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH v1 11/21] net/virtio: extract virtqueue init from virtio queue init
Date: Wed, 30 Nov 2022 16:56:29 +0100 [thread overview]
Message-ID: <20221130155639.150553-12-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20221130155639.150553-1-maxime.coquelin@redhat.com>
This patch extracts the virtqueue initialization out of
the Virtio ethdev queue initialization, as preliminary
work to provide a way for Virtio-user to allocate its
shadow control virtqueue.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 261 ++--------------------------
drivers/net/virtio/virtqueue.c | 266 +++++++++++++++++++++++++++++
drivers/net/virtio/virtqueue.h | 5 +
3 files changed, 282 insertions(+), 250 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 46dd5606f6..8f657d2d90 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -221,173 +221,18 @@ virtio_get_nr_vq(struct virtio_hw *hw)
return nr_vq;
}
-static void
-virtio_init_vring(struct virtqueue *vq)
-{
- int size = vq->vq_nentries;
- uint8_t *ring_mem = vq->vq_ring_virt_mem;
-
- PMD_INIT_FUNC_TRACE();
-
- memset(ring_mem, 0, vq->vq_ring_size);
-
- vq->vq_used_cons_idx = 0;
- vq->vq_desc_head_idx = 0;
- vq->vq_avail_idx = 0;
- vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
- vq->vq_free_cnt = vq->vq_nentries;
- memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
- if (virtio_with_packed_queue(vq->hw)) {
- vring_init_packed(&vq->vq_packed.ring, ring_mem,
- VIRTIO_VRING_ALIGN, size);
- vring_desc_init_packed(vq, size);
- } else {
- struct vring *vr = &vq->vq_split.ring;
-
- vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
- vring_desc_init_split(vr->desc, size);
- }
- /*
- * Disable device(host) interrupting guest
- */
- virtqueue_disable_intr(vq);
-}
-
static void
virtio_control_queue_notify(struct virtqueue *vq, __rte_unused void *cookie)
{
virtqueue_notify(vq);
}
-static int
-virtio_alloc_queue_headers(struct virtqueue *vq, int numa_node, const char *name)
-{
- char hdr_name[VIRTQUEUE_MAX_NAME_SZ];
- const struct rte_memzone **hdr_mz;
- rte_iova_t *hdr_mem;
- ssize_t size;
- int queue_type;
-
- queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
- switch (queue_type) {
- case VTNET_TQ:
- /*
- * For each xmit packet, allocate a virtio_net_hdr
- * and indirect ring elements
- */
- size = vq->vq_nentries * sizeof(struct virtio_tx_region);
- hdr_mz = &vq->txq.hdr_mz;
- hdr_mem = &vq->txq.hdr_mem;
- break;
- case VTNET_CQ:
- /* Allocate a page for control vq command, data and status */
- size = rte_mem_page_size();
- hdr_mz = &vq->cq.hdr_mz;
- hdr_mem = &vq->cq.hdr_mem;
- break;
- case VTNET_RQ:
- /* fallthrough */
- default:
- return 0;
- }
-
- snprintf(hdr_name, sizeof(hdr_name), "%s_hdr", name);
- *hdr_mz = rte_memzone_reserve_aligned(hdr_name, size, numa_node,
- RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
- if (*hdr_mz == NULL) {
- if (rte_errno == EEXIST)
- *hdr_mz = rte_memzone_lookup(hdr_name);
- if (*hdr_mz == NULL)
- return -ENOMEM;
- }
-
- memset((*hdr_mz)->addr, 0, size);
-
- if (vq->hw->use_va)
- *hdr_mem = (uintptr_t)(*hdr_mz)->addr;
- else
- *hdr_mem = (uintptr_t)(*hdr_mz)->iova;
-
- return 0;
-}
-
-static void
-virtio_free_queue_headers(struct virtqueue *vq)
-{
- const struct rte_memzone **hdr_mz;
- rte_iova_t *hdr_mem;
- int queue_type;
-
- queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
- switch (queue_type) {
- case VTNET_TQ:
- hdr_mz = &vq->txq.hdr_mz;
- hdr_mem = &vq->txq.hdr_mem;
- break;
- case VTNET_CQ:
- hdr_mz = &vq->cq.hdr_mz;
- hdr_mem = &vq->cq.hdr_mem;
- break;
- case VTNET_RQ:
- /* fallthrough */
- default:
- return;
- }
-
- rte_memzone_free(*hdr_mz);
- *hdr_mz = NULL;
- *hdr_mem = 0;
-}
-
-static int
-virtio_rxq_sw_ring_alloc(struct virtqueue *vq, int numa_node)
-{
- void *sw_ring;
- struct rte_mbuf *mbuf;
- size_t size;
-
- /* SW ring is only used with vectorized datapath */
- if (!vq->hw->use_vec_rx)
- return 0;
-
- size = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq->vq_nentries) * sizeof(vq->rxq.sw_ring[0]);
-
- sw_ring = rte_zmalloc_socket("sw_ring", size, RTE_CACHE_LINE_SIZE, numa_node);
- if (!sw_ring) {
- PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
- return -ENOMEM;
- }
-
- mbuf = rte_zmalloc_socket("sw_ring", sizeof(*mbuf), RTE_CACHE_LINE_SIZE, numa_node);
- if (!mbuf) {
- PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
- rte_free(sw_ring);
- return -ENOMEM;
- }
-
- vq->rxq.sw_ring = sw_ring;
- vq->rxq.fake_mbuf = mbuf;
-
- return 0;
-}
-
-static void
-virtio_rxq_sw_ring_free(struct virtqueue *vq)
-{
- rte_free(vq->rxq.fake_mbuf);
- vq->rxq.fake_mbuf = NULL;
- rte_free(vq->rxq.sw_ring);
- vq->rxq.sw_ring = NULL;
-}
-
static int
virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
{
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
- const struct rte_memzone *mz = NULL;
- unsigned int vq_size, size;
+ unsigned int vq_size;
struct virtio_hw *hw = dev->data->dev_private;
- struct virtnet_ctl *cvq = NULL;
struct virtqueue *vq;
int queue_type = virtio_get_queue_type(hw, queue_idx);
int ret;
@@ -414,87 +259,19 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, queue_idx);
- size = RTE_ALIGN_CEIL(sizeof(*vq) +
- vq_size * sizeof(struct vq_desc_extra),
- RTE_CACHE_LINE_SIZE);
-
-
- vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
- numa_node);
- if (vq == NULL) {
- PMD_INIT_LOG(ERR, "can not allocate vq");
+ vq = virtqueue_alloc(hw, queue_idx, vq_size, queue_type, numa_node, vq_name);
+ if (!vq) {
+ PMD_INIT_LOG(ERR, "virtqueue init failed");
return -ENOMEM;
}
- hw->vqs[queue_idx] = vq;
- vq->hw = hw;
- vq->vq_queue_index = queue_idx;
- vq->vq_nentries = vq_size;
- if (virtio_with_packed_queue(hw)) {
- vq->vq_packed.used_wrap_counter = 1;
- vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
- vq->vq_packed.event_flags_shadow = 0;
- if (queue_type == VTNET_RQ)
- vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
- }
-
- /*
- * Reserve a memzone for vring elements
- */
- size = vring_size(hw, vq_size, VIRTIO_VRING_ALIGN);
- vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_VRING_ALIGN);
- PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
- size, vq->vq_ring_size);
-
- mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
- numa_node, RTE_MEMZONE_IOVA_CONTIG,
- VIRTIO_VRING_ALIGN);
- if (mz == NULL) {
- if (rte_errno == EEXIST)
- mz = rte_memzone_lookup(vq_name);
- if (mz == NULL) {
- ret = -ENOMEM;
- goto free_vq;
- }
- }
-
- memset(mz->addr, 0, mz->len);
-
- vq->mz = mz;
- if (hw->use_va)
- vq->vq_ring_mem = (uintptr_t)mz->addr;
- else
- vq->vq_ring_mem = mz->iova;
-
- vq->vq_ring_virt_mem = mz->addr;
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem);
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: %p", vq->vq_ring_virt_mem);
-
- virtio_init_vring(vq);
+ hw->vqs[queue_idx] = vq;
- ret = virtio_alloc_queue_headers(vq, numa_node, vq_name);
- if (ret) {
- PMD_INIT_LOG(ERR, "Failed to alloc queue headers");
- goto free_mz;
- }
-
- if (queue_type == VTNET_RQ) {
- ret = virtio_rxq_sw_ring_alloc(vq, numa_node);
- if (ret)
- goto free_hdr_mz;
- } else if (queue_type == VTNET_TQ) {
- virtqueue_txq_indirect_headers_init(vq);
- } else if (queue_type == VTNET_CQ) {
- cvq = &vq->cq;
- hw->cvq = cvq;
+ if (queue_type == VTNET_CQ) {
+ hw->cvq = &vq->cq;
vq->cq.notify_queue = &virtio_control_queue_notify;
}
- if (hw->use_va)
- vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr);
- else
- vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
-
if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
PMD_INIT_LOG(ERR, "setup_queue failed");
ret = -EINVAL;
@@ -504,15 +281,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
return 0;
clean_vq:
- hw->cvq = NULL;
- if (queue_type == VTNET_RQ)
- virtio_rxq_sw_ring_free(vq);
-free_hdr_mz:
- virtio_free_queue_headers(vq);
-free_mz:
- rte_memzone_free(mz);
-free_vq:
- rte_free(vq);
+ if (queue_type == VTNET_CQ)
+ hw->cvq = NULL;
+ virtqueue_free(vq);
hw->vqs[queue_idx] = NULL;
return ret;
@@ -523,7 +294,6 @@ virtio_free_queues(struct virtio_hw *hw)
{
uint16_t nr_vq = virtio_get_nr_vq(hw);
struct virtqueue *vq;
- int queue_type;
uint16_t i;
if (hw->vqs == NULL)
@@ -533,16 +303,7 @@ virtio_free_queues(struct virtio_hw *hw)
vq = hw->vqs[i];
if (!vq)
continue;
-
- queue_type = virtio_get_queue_type(hw, i);
- if (queue_type == VTNET_RQ) {
- rte_free(vq->rxq.fake_mbuf);
- rte_free(vq->rxq.sw_ring);
- }
-
- virtio_free_queue_headers(vq);
- rte_memzone_free(vq->mz);
- rte_free(vq);
+ virtqueue_free(vq);
hw->vqs[i] = NULL;
}
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 7a84796513..1d836f2530 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -2,8 +2,12 @@
* Copyright(c) 2010-2015 Intel Corporation
*/
#include <stdint.h>
+#include <unistd.h>
+#include <rte_eal_paging.h>
+#include <rte_malloc.h>
#include <rte_mbuf.h>
+#include <rte_memzone.h>
#include "virtqueue.h"
#include "virtio_logs.h"
@@ -259,3 +263,265 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
return 0;
}
+
+
+static void
+virtio_init_vring(struct virtqueue *vq)
+{
+ int size = vq->vq_nentries;
+ uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(ring_mem, 0, vq->vq_ring_size);
+
+ vq->vq_used_cons_idx = 0;
+ vq->vq_desc_head_idx = 0;
+ vq->vq_avail_idx = 0;
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+ memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
+ if (virtio_with_packed_queue(vq->hw)) {
+ vring_init_packed(&vq->vq_packed.ring, ring_mem,
+ VIRTIO_VRING_ALIGN, size);
+ vring_desc_init_packed(vq, size);
+ } else {
+ struct vring *vr = &vq->vq_split.ring;
+
+ vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
+ vring_desc_init_split(vr->desc, size);
+ }
+ /*
+ * Disable device(host) interrupting guest
+ */
+ virtqueue_disable_intr(vq);
+}
+
+static int
+virtio_alloc_queue_headers(struct virtqueue *vq, int numa_node, const char *name)
+{
+ char hdr_name[VIRTQUEUE_MAX_NAME_SZ];
+ const struct rte_memzone **hdr_mz;
+ rte_iova_t *hdr_mem;
+ ssize_t size;
+ int queue_type;
+
+ queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
+ switch (queue_type) {
+ case VTNET_TQ:
+ /*
+ * For each xmit packet, allocate a virtio_net_hdr
+ * and indirect ring elements
+ */
+ size = vq->vq_nentries * sizeof(struct virtio_tx_region);
+ hdr_mz = &vq->txq.hdr_mz;
+ hdr_mem = &vq->txq.hdr_mem;
+ break;
+ case VTNET_CQ:
+ /* Allocate a page for control vq command, data and status */
+ size = rte_mem_page_size();
+ hdr_mz = &vq->cq.hdr_mz;
+ hdr_mem = &vq->cq.hdr_mem;
+ break;
+ case VTNET_RQ:
+ /* fallthrough */
+ default:
+ return 0;
+ }
+
+ snprintf(hdr_name, sizeof(hdr_name), "%s_hdr", name);
+ *hdr_mz = rte_memzone_reserve_aligned(hdr_name, size, numa_node,
+ RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
+ if (*hdr_mz == NULL) {
+ if (rte_errno == EEXIST)
+ *hdr_mz = rte_memzone_lookup(hdr_name);
+ if (*hdr_mz == NULL)
+ return -ENOMEM;
+ }
+
+ memset((*hdr_mz)->addr, 0, size);
+
+ if (vq->hw->use_va)
+ *hdr_mem = (uintptr_t)(*hdr_mz)->addr;
+ else
+ *hdr_mem = (uintptr_t)(*hdr_mz)->iova;
+
+ return 0;
+}
+
+static void
+virtio_free_queue_headers(struct virtqueue *vq)
+{
+ const struct rte_memzone **hdr_mz;
+ rte_iova_t *hdr_mem;
+ int queue_type;
+
+ queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
+ switch (queue_type) {
+ case VTNET_TQ:
+ hdr_mz = &vq->txq.hdr_mz;
+ hdr_mem = &vq->txq.hdr_mem;
+ break;
+ case VTNET_CQ:
+ hdr_mz = &vq->cq.hdr_mz;
+ hdr_mem = &vq->cq.hdr_mem;
+ break;
+ case VTNET_RQ:
+ /* fallthrough */
+ default:
+ return;
+ }
+
+ rte_memzone_free(*hdr_mz);
+ *hdr_mz = NULL;
+ *hdr_mem = 0;
+}
+
+static int
+virtio_rxq_sw_ring_alloc(struct virtqueue *vq, int numa_node)
+{
+ void *sw_ring;
+ struct rte_mbuf *mbuf;
+ size_t size;
+
+ /* SW ring is only used with vectorized datapath */
+ if (!vq->hw->use_vec_rx)
+ return 0;
+
+ size = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq->vq_nentries) * sizeof(vq->rxq.sw_ring[0]);
+
+ sw_ring = rte_zmalloc_socket("sw_ring", size, RTE_CACHE_LINE_SIZE, numa_node);
+ if (!sw_ring) {
+ PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
+ return -ENOMEM;
+ }
+
+ mbuf = rte_zmalloc_socket("sw_ring", sizeof(*mbuf), RTE_CACHE_LINE_SIZE, numa_node);
+ if (!mbuf) {
+ PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
+ rte_free(sw_ring);
+ return -ENOMEM;
+ }
+
+ vq->rxq.sw_ring = sw_ring;
+ vq->rxq.fake_mbuf = mbuf;
+
+ return 0;
+}
+
+static void
+virtio_rxq_sw_ring_free(struct virtqueue *vq)
+{
+ rte_free(vq->rxq.fake_mbuf);
+ vq->rxq.fake_mbuf = NULL;
+ rte_free(vq->rxq.sw_ring);
+ vq->rxq.sw_ring = NULL;
+}
+
+struct virtqueue *
+virtqueue_alloc(struct virtio_hw *hw, uint16_t index, uint16_t num, int type,
+ int node, const char *name)
+{
+ struct virtqueue *vq;
+ const struct rte_memzone *mz;
+ unsigned int size;
+
+ size = sizeof(*vq) + num * sizeof(struct vq_desc_extra);
+ size = RTE_ALIGN_CEIL(size, RTE_CACHE_LINE_SIZE);
+
+ vq = rte_zmalloc_socket(name, size, RTE_CACHE_LINE_SIZE, node);
+ if (vq == NULL) {
+ PMD_INIT_LOG(ERR, "can not allocate vq");
+ return NULL;
+ }
+
+ vq->hw = hw;
+ vq->vq_queue_index = index;
+ vq->vq_nentries = num;
+ if (virtio_with_packed_queue(hw)) {
+ vq->vq_packed.used_wrap_counter = 1;
+ vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
+ vq->vq_packed.event_flags_shadow = 0;
+ if (type == VTNET_RQ)
+ vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+ }
+
+ /*
+ * Reserve a memzone for vring elements
+ */
+ size = vring_size(hw, num, VIRTIO_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_VRING_ALIGN);
+ PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);
+
+ mz = rte_memzone_reserve_aligned(name, vq->vq_ring_size, node,
+ RTE_MEMZONE_IOVA_CONTIG, VIRTIO_VRING_ALIGN);
+ if (mz == NULL) {
+ if (rte_errno == EEXIST)
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL)
+ goto free_vq;
+ }
+
+ memset(mz->addr, 0, mz->len);
+ vq->mz = mz;
+ vq->vq_ring_virt_mem = mz->addr;
+
+ if (hw->use_va) {
+ vq->vq_ring_mem = (uintptr_t)mz->addr;
+ vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr);
+ } else {
+ vq->vq_ring_mem = mz->iova;
+ vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
+ }
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem);
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: %p", vq->vq_ring_virt_mem);
+
+ virtio_init_vring(vq);
+
+ if (virtio_alloc_queue_headers(vq, node, name)) {
+ PMD_INIT_LOG(ERR, "Failed to alloc queue headers");
+ goto free_mz;
+ }
+
+ switch (type) {
+ case VTNET_RQ:
+ if (virtio_rxq_sw_ring_alloc(vq, node))
+ goto free_hdr_mz;
+ break;
+ case VTNET_TQ:
+ virtqueue_txq_indirect_headers_init(vq);
+ break;
+ }
+
+ return vq;
+
+free_hdr_mz:
+ virtio_free_queue_headers(vq);
+free_mz:
+ rte_memzone_free(mz);
+free_vq:
+ rte_free(vq);
+
+ return NULL;
+}
+
+void
+virtqueue_free(struct virtqueue *vq)
+{
+ int type;
+
+ type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
+ switch (type) {
+ case VTNET_RQ:
+ virtio_rxq_sw_ring_free(vq);
+ break;
+ case VTNET_TQ:
+ case VTNET_CQ:
+ virtio_free_queue_headers(vq);
+ break;
+ }
+
+ rte_memzone_free(vq->mz);
+ rte_free(vq);
+}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index d7f8ee79bb..9d4aba11a3 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -385,6 +385,11 @@ int virtqueue_txvq_reset_packed(struct virtqueue *vq);
void virtqueue_txq_indirect_headers_init(struct virtqueue *vq);
+struct virtqueue *virtqueue_alloc(struct virtio_hw *hw, uint16_t index,
+ uint16_t num, int type, int node, const char *name);
+
+void virtqueue_free(struct virtqueue *vq);
+
static inline int
virtqueue_full(const struct virtqueue *vq)
{
--
2.38.1
next prev parent reply other threads:[~2022-11-30 15:58 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-30 15:56 [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 01/21] net/virtio: move CVQ code into a dedicated file Maxime Coquelin
2023-01-30 7:50 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 02/21] net/virtio: introduce notify callback for control queue Maxime Coquelin
2023-01-30 7:51 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 03/21] net/virtio: virtqueue headers alloc refactoring Maxime Coquelin
2023-01-30 7:51 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 04/21] net/virtio: remove port ID info from Rx queue Maxime Coquelin
2023-01-30 7:51 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 05/21] net/virtio: remove unused fields in Tx queue struct Maxime Coquelin
2023-01-30 7:51 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 06/21] net/virtio: remove unused queue ID field in Rx queue Maxime Coquelin
2023-01-30 7:52 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 07/21] net/virtio: remove unused Port ID in control queue Maxime Coquelin
2023-01-30 7:52 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 08/21] net/virtio: move vring memzone to virtqueue struct Maxime Coquelin
2023-01-30 7:52 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 09/21] net/virtio: refactor indirect desc headers init Maxime Coquelin
2023-01-30 7:52 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 10/21] net/virtio: alloc Rx SW ring only if vectorized path Maxime Coquelin
2023-01-30 7:49 ` Xia, Chenbo
2023-02-07 10:12 ` Maxime Coquelin
2022-11-30 15:56 ` Maxime Coquelin [this message]
2023-01-30 7:53 ` [PATCH v1 11/21] net/virtio: extract virtqueue init from virtio queue init Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 12/21] net/virtio-user: fix device starting failure handling Maxime Coquelin
2023-01-31 5:20 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 13/21] net/virtio-user: simplify queues setup Maxime Coquelin
2023-01-31 5:21 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 14/21] net/virtio-user: use proper type for number of queue pairs Maxime Coquelin
2023-01-31 5:21 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 15/21] net/virtio-user: get max number of queue pairs from device Maxime Coquelin
2023-01-31 5:21 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 16/21] net/virtio-user: allocate shadow control queue Maxime Coquelin
2023-01-31 5:21 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 17/21] net/virtio-user: send shadow virtqueue info to the backend Maxime Coquelin
2023-01-31 5:22 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 18/21] net/virtio-user: add new callback to enable control queue Maxime Coquelin
2023-01-31 5:22 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 19/21] net/virtio-user: forward control messages to shadow queue Maxime Coquelin
2022-11-30 16:54 ` Stephen Hemminger
2022-12-06 12:58 ` Maxime Coquelin
2022-11-30 15:56 ` [PATCH v1 20/21] net/virtio-user: advertize control VQ support with vDPA Maxime Coquelin
2023-01-31 5:24 ` Xia, Chenbo
2022-11-30 15:56 ` [PATCH v1 21/21] net/virtio-user: remove max queues limitation Maxime Coquelin
2023-01-31 5:19 ` Xia, Chenbo
2023-02-07 14:14 ` Maxime Coquelin
2023-01-30 5:57 ` [PATCH v1 00/21] Add control queue & MQ support to Virtio-user vDPA Xia, Chenbo
2023-02-07 10:08 ` Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221130155639.150553-12-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=chenbo.xia@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=eperezma@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).