From: Ouyang Changchun <changchun.ouyang@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v3 2/9] lib_vhost: Support multiple queues in virtio dev
Date: Mon, 15 Jun 2015 15:56:39 +0800 [thread overview]
Message-ID: <1434355006-30583-3-git-send-email-changchun.ouyang@intel.com> (raw)
In-Reply-To: <1434355006-30583-1-git-send-email-changchun.ouyang@intel.com>
Each virtio device could have multiple queues, say 2 or 4, at most 8.
Enabling this feature allows virtio device/port on guest has the ability to
use different vCPU to receive/transmit packets from/to each queue.
In multiple queues mode, virtio device readiness means all queues of
this virtio device are ready, cleanup/destroy a virtio device also
requires clearing all queues belong to it.
Changes in v3:
- fix coding style
- check virtqueue idx validity
Changes in v2:
- remove the q_num_set api
- add the qp_num_get api
- determine the queue pair num from qemu message
- rework for reset owner message handler
- dynamically alloc mem for dev virtqueue
- queue pair num could be 0x8000
- fix checkpatch errors
Signed-off-by: Changchun Ouyang <changchun.ouyang@intel.com>
---
lib/librte_vhost/rte_virtio_net.h | 10 +-
lib/librte_vhost/vhost-net.h | 1 +
lib/librte_vhost/vhost_rxtx.c | 49 +++++---
lib/librte_vhost/vhost_user/vhost-net-user.c | 4 +-
lib/librte_vhost/vhost_user/virtio-net-user.c | 76 +++++++++---
lib/librte_vhost/vhost_user/virtio-net-user.h | 2 +
lib/librte_vhost/virtio-net.c | 161 +++++++++++++++++---------
7 files changed, 216 insertions(+), 87 deletions(-)
diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index 5d38185..873be3e 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -59,7 +59,6 @@ struct rte_mbuf;
/* Backend value set by guest. */
#define VIRTIO_DEV_STOPPED -1
-
/* Enum for virtqueue management. */
enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
@@ -96,13 +95,14 @@ struct vhost_virtqueue {
* Device structure contains all configuration information relating to the device.
*/
struct virtio_net {
- struct vhost_virtqueue *virtqueue[VIRTIO_QNUM]; /**< Contains all virtqueue information. */
struct virtio_memory *mem; /**< QEMU memory and memory region information. */
+ struct vhost_virtqueue **virtqueue; /**< Contains all virtqueue information. */
uint64_t features; /**< Negotiated feature set. */
uint64_t device_fh; /**< device identifier. */
uint32_t flags; /**< Device flags. Only used to check if device is running on data core. */
#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
char ifname[IF_NAME_SZ]; /**< Name of the tap device or socket path. */
+ uint32_t num_virt_queues;
void *priv; /**< private context */
} __rte_cache_aligned;
@@ -220,4 +220,10 @@ uint16_t rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
uint16_t rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
+/**
+ * This function get the queue pair number of one vhost device.
+ * @return
+ * num of queue pair of specified virtio device.
+ */
+uint16_t rte_vhost_qp_num_get(struct virtio_net *dev);
#endif /* _VIRTIO_NET_H_ */
diff --git a/lib/librte_vhost/vhost-net.h b/lib/librte_vhost/vhost-net.h
index c69b60b..7dff14d 100644
--- a/lib/librte_vhost/vhost-net.h
+++ b/lib/librte_vhost/vhost-net.h
@@ -115,4 +115,5 @@ struct vhost_net_device_ops {
struct vhost_net_device_ops const *get_virtio_net_callbacks(void);
+int alloc_vring_queue_pair(struct virtio_net *dev, uint16_t qp_idx);
#endif /* _VHOST_NET_CDEV_H_ */
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 2da4a02..d2a7143 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -43,6 +43,18 @@
#define MAX_PKT_BURST 32
/**
+ * Check the virtqueue idx validility,
+ * return 1 if pass, otherwise 0.
+ */
+static inline uint8_t __attribute__((always_inline))
+check_virtqueue_idx(uint16_t virtq_idx, uint8_t is_tx, uint32_t virtq_num)
+{
+ if ((is_tx ^ (virtq_idx & 0x1)) || (virtq_idx >= virtq_num))
+ return 0;
+ return 1;
+}
+
+/**
* This function adds buffers to the virtio devices RX virtqueue. Buffers can
* be received from the physical port or from another virtio device. A packet
* count is returned to indicate the number of packets that are succesfully
@@ -67,12 +79,15 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
uint8_t success = 0;
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
- if (unlikely(queue_id != VIRTIO_RXQ)) {
- LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+ if (unlikely(check_virtqueue_idx(queue_id, 0,
+ VIRTIO_QNUM * dev->num_virt_queues) == 0)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
+ __func__, dev->device_fh, queue_id);
return 0;
}
- vq = dev->virtqueue[VIRTIO_RXQ];
+ vq = dev->virtqueue[queue_id];
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
/*
@@ -188,8 +203,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
}
static inline uint32_t __attribute__((always_inline))
-copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
- uint16_t res_end_idx, struct rte_mbuf *pkt)
+copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t queue_id,
+ uint16_t res_base_idx, uint16_t res_end_idx,
+ struct rte_mbuf *pkt)
{
uint32_t vec_idx = 0;
uint32_t entry_success = 0;
@@ -217,9 +233,9 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
* Convert from gpa to vva
* (guest physical addr -> vhost virtual addr)
*/
- vq = dev->virtqueue[VIRTIO_RXQ];
vb_addr =
gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+ vq = dev->virtqueue[queue_id];
vb_hdr_addr = vb_addr;
/* Prefetch buffer address. */
@@ -407,11 +423,15 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
dev->device_fh);
- if (unlikely(queue_id != VIRTIO_RXQ)) {
- LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+ if (unlikely(check_virtqueue_idx(queue_id, 0,
+ VIRTIO_QNUM * dev->num_virt_queues) == 0)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
+ __func__, dev->device_fh, queue_id);
+ return 0;
}
- vq = dev->virtqueue[VIRTIO_RXQ];
+ vq = dev->virtqueue[queue_id];
count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
if (count == 0)
@@ -493,7 +513,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
res_end_idx = res_cur_idx;
- entry_success = copy_from_mbuf_to_vring(dev, res_base_idx,
+ entry_success = copy_from_mbuf_to_vring(dev, queue_id, res_base_idx,
res_end_idx, pkts[pkt_idx]);
rte_compiler_barrier();
@@ -543,12 +563,15 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
uint16_t free_entries, entry_success = 0;
uint16_t avail_idx;
- if (unlikely(queue_id != VIRTIO_TXQ)) {
- LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
+ if (unlikely(check_virtqueue_idx(queue_id, 1,
+ VIRTIO_QNUM * dev->num_virt_queues) == 0)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "%s (%"PRIu64"): virtqueue idx:%d invalid.\n",
+ __func__, dev->device_fh, queue_id);
return 0;
}
- vq = dev->virtqueue[VIRTIO_TXQ];
+ vq = dev->virtqueue[queue_id];
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
/* If there are no available buffers then return. */
diff --git a/lib/librte_vhost/vhost_user/vhost-net-user.c b/lib/librte_vhost/vhost_user/vhost-net-user.c
index 31f1215..b66a653 100644
--- a/lib/librte_vhost/vhost_user/vhost-net-user.c
+++ b/lib/librte_vhost/vhost_user/vhost-net-user.c
@@ -378,7 +378,9 @@ vserver_message_handler(int connfd, void *dat, int *remove)
ops->set_owner(ctx);
break;
case VHOST_USER_RESET_OWNER:
- ops->reset_owner(ctx);
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "(%"PRIu64") VHOST_NET_RESET_OWNER\n", ctx.fh);
+ user_reset_owner(ctx, &msg.payload.state);
break;
case VHOST_USER_SET_MEM_TABLE:
diff --git a/lib/librte_vhost/vhost_user/virtio-net-user.c b/lib/librte_vhost/vhost_user/virtio-net-user.c
index c1ffc38..b4de86d 100644
--- a/lib/librte_vhost/vhost_user/virtio-net-user.c
+++ b/lib/librte_vhost/vhost_user/virtio-net-user.c
@@ -209,30 +209,46 @@ static int
virtio_is_ready(struct virtio_net *dev)
{
struct vhost_virtqueue *rvq, *tvq;
+ uint32_t q_idx;
/* mq support in future.*/
- rvq = dev->virtqueue[VIRTIO_RXQ];
- tvq = dev->virtqueue[VIRTIO_TXQ];
- if (rvq && tvq && rvq->desc && tvq->desc &&
- (rvq->kickfd != (eventfd_t)-1) &&
- (rvq->callfd != (eventfd_t)-1) &&
- (tvq->kickfd != (eventfd_t)-1) &&
- (tvq->callfd != (eventfd_t)-1)) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "virtio is now ready for processing.\n");
- return 1;
+ for (q_idx = 0; q_idx < dev->num_virt_queues; q_idx++) {
+ uint32_t virt_rx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+ uint32_t virt_tx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+
+ rvq = dev->virtqueue[virt_rx_q_idx];
+ tvq = dev->virtqueue[virt_tx_q_idx];
+ if ((rvq == NULL) || (tvq == NULL) ||
+ (rvq->desc == NULL) || (tvq->desc == NULL) ||
+ (rvq->kickfd == (eventfd_t)-1) ||
+ (rvq->callfd == (eventfd_t)-1) ||
+ (tvq->kickfd == (eventfd_t)-1) ||
+ (tvq->callfd == (eventfd_t)-1)) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "virtio isn't ready for processing.\n");
+ return 0;
+ }
}
RTE_LOG(INFO, VHOST_CONFIG,
- "virtio isn't ready for processing.\n");
- return 0;
+ "virtio is now ready for processing.\n");
+ return 1;
}
void
user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
{
struct vhost_vring_file file;
+ struct virtio_net *dev = get_device(ctx);
+ uint32_t cur_qp_idx;
file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+ cur_qp_idx = (file.index & (~0x1)) >> 1;
+
+ if (dev->num_virt_queues < cur_qp_idx + 1) {
+ if (alloc_vring_queue_pair(dev, cur_qp_idx) == 0)
+ dev->num_virt_queues = cur_qp_idx + 1;
+ }
+
if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
file.fd = -1;
else
@@ -290,13 +306,37 @@ user_get_vring_base(struct vhost_device_ctx ctx,
* sent and only sent in vhost_vring_stop.
* TODO: cleanup the vring, it isn't usable since here.
*/
- if (((int)dev->virtqueue[VIRTIO_RXQ]->kickfd) >= 0) {
- close(dev->virtqueue[VIRTIO_RXQ]->kickfd);
- dev->virtqueue[VIRTIO_RXQ]->kickfd = (eventfd_t)-1;
+ if (((int)dev->virtqueue[state->index]->kickfd) >= 0) {
+ close(dev->virtqueue[state->index]->kickfd);
+ dev->virtqueue[state->index]->kickfd = (eventfd_t)-1;
}
- if (((int)dev->virtqueue[VIRTIO_TXQ]->kickfd) >= 0) {
- close(dev->virtqueue[VIRTIO_TXQ]->kickfd);
- dev->virtqueue[VIRTIO_TXQ]->kickfd = (eventfd_t)-1;
+
+ return 0;
+}
+
+/*
+ * when virtio is stopped, qemu will send us the RESET_OWNER message.
+ */
+int
+user_reset_owner(struct vhost_device_ctx ctx,
+ struct vhost_vring_state *state)
+{
+ struct virtio_net *dev = get_device(ctx);
+
+ /* We have to stop the queue (virtio) if it is running. */
+ if (dev->flags & VIRTIO_DEV_RUNNING)
+ notify_ops->destroy_device(dev);
+
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "reset owner --- state idx:%d state num:%d\n", state->index, state->num);
+ /*
+ * Based on current qemu vhost-user implementation, this message is
+ * sent and only sent in vhost_net_stop_one.
+ * TODO: cleanup the vring, it isn't usable since here.
+ */
+ if (((int)dev->virtqueue[state->index]->kickfd) >= 0) {
+ close(dev->virtqueue[state->index]->kickfd);
+ dev->virtqueue[state->index]->kickfd = (eventfd_t)-1;
}
return 0;
diff --git a/lib/librte_vhost/vhost_user/virtio-net-user.h b/lib/librte_vhost/vhost_user/virtio-net-user.h
index df24860..2429836 100644
--- a/lib/librte_vhost/vhost_user/virtio-net-user.h
+++ b/lib/librte_vhost/vhost_user/virtio-net-user.h
@@ -46,4 +46,6 @@ void user_set_vring_kick(struct vhost_device_ctx, struct VhostUserMsg *);
int user_get_vring_base(struct vhost_device_ctx, struct vhost_vring_state *);
void user_destroy_device(struct vhost_device_ctx);
+
+int user_reset_owner(struct vhost_device_ctx ctx, struct vhost_vring_state *state);
#endif
diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
index fced2ab..aaea7d5 100644
--- a/lib/librte_vhost/virtio-net.c
+++ b/lib/librte_vhost/virtio-net.c
@@ -67,10 +67,10 @@ static struct virtio_net_config_ll *ll_root;
#define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
(1ULL << VIRTIO_NET_F_CTRL_VQ) | \
(1ULL << VIRTIO_NET_F_CTRL_RX) | \
- (1ULL << VHOST_F_LOG_ALL))
+ (1ULL << VHOST_F_LOG_ALL)) | \
+ (1ULL << VIRTIO_NET_F_MQ))
static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
-
/*
* Converts QEMU virtual address to Vhost virtual address. This function is
* used to convert the ring addresses to our address space.
@@ -178,6 +178,8 @@ add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
static void
cleanup_device(struct virtio_net *dev)
{
+ uint32_t qp_idx;
+
/* Unmap QEMU memory file if mapped. */
if (dev->mem) {
munmap((void *)(uintptr_t)dev->mem->mapped_address,
@@ -186,14 +188,18 @@ cleanup_device(struct virtio_net *dev)
}
/* Close any event notifiers opened by device. */
- if ((int)dev->virtqueue[VIRTIO_RXQ]->callfd >= 0)
- close((int)dev->virtqueue[VIRTIO_RXQ]->callfd);
- if ((int)dev->virtqueue[VIRTIO_RXQ]->kickfd >= 0)
- close((int)dev->virtqueue[VIRTIO_RXQ]->kickfd);
- if ((int)dev->virtqueue[VIRTIO_TXQ]->callfd >= 0)
- close((int)dev->virtqueue[VIRTIO_TXQ]->callfd);
- if ((int)dev->virtqueue[VIRTIO_TXQ]->kickfd >= 0)
- close((int)dev->virtqueue[VIRTIO_TXQ]->kickfd);
+ for (qp_idx = 0; qp_idx < dev->num_virt_queues; qp_idx++) {
+ uint32_t virt_rx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+ uint32_t virt_tx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+ if ((int)dev->virtqueue[virt_rx_q_idx]->callfd >= 0)
+ close((int)dev->virtqueue[virt_rx_q_idx]->callfd);
+ if ((int)dev->virtqueue[virt_rx_q_idx]->kickfd >= 0)
+ close((int)dev->virtqueue[virt_rx_q_idx]->kickfd);
+ if ((int)dev->virtqueue[virt_tx_q_idx]->callfd >= 0)
+ close((int)dev->virtqueue[virt_tx_q_idx]->callfd);
+ if ((int)dev->virtqueue[virt_tx_q_idx]->kickfd >= 0)
+ close((int)dev->virtqueue[virt_tx_q_idx]->kickfd);
+ }
}
/*
@@ -202,9 +208,17 @@ cleanup_device(struct virtio_net *dev)
static void
free_device(struct virtio_net_config_ll *ll_dev)
{
- /* Free any malloc'd memory */
- free(ll_dev->dev.virtqueue[VIRTIO_RXQ]);
- free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
+ uint32_t qp_idx;
+
+ /*
+ * Free any malloc'd memory.
+ */
+ /* Free every queue pair. */
+ for (qp_idx = 0; qp_idx < ll_dev->dev.num_virt_queues; qp_idx++) {
+ uint32_t virt_rx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+ free(ll_dev->dev.virtqueue[virt_rx_q_idx]);
+ }
+ free(ll_dev->dev.virtqueue);
free(ll_dev);
}
@@ -238,6 +252,27 @@ rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
}
/*
+ * Initialise all variables in vring queue pair.
+ */
+static void
+init_vring_queue_pair(struct virtio_net *dev, uint16_t qp_idx)
+{
+ uint32_t virt_rx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+ uint32_t virt_tx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+ memset(dev->virtqueue[virt_rx_q_idx], 0, sizeof(struct vhost_virtqueue));
+ memset(dev->virtqueue[virt_tx_q_idx], 0, sizeof(struct vhost_virtqueue));
+
+ dev->virtqueue[virt_rx_q_idx]->kickfd = (eventfd_t)-1;
+ dev->virtqueue[virt_rx_q_idx]->callfd = (eventfd_t)-1;
+ dev->virtqueue[virt_tx_q_idx]->kickfd = (eventfd_t)-1;
+ dev->virtqueue[virt_tx_q_idx]->callfd = (eventfd_t)-1;
+
+ /* Backends are set to -1 indicating an inactive device. */
+ dev->virtqueue[virt_rx_q_idx]->backend = VIRTIO_DEV_STOPPED;
+ dev->virtqueue[virt_tx_q_idx]->backend = VIRTIO_DEV_STOPPED;
+}
+
+/*
* Initialise all variables in device structure.
*/
static void
@@ -254,17 +289,31 @@ init_device(struct virtio_net *dev)
/* Set everything to 0. */
memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
(sizeof(struct virtio_net) - (size_t)vq_offset));
- memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
- memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
- dev->virtqueue[VIRTIO_RXQ]->kickfd = (eventfd_t)-1;
- dev->virtqueue[VIRTIO_RXQ]->callfd = (eventfd_t)-1;
- dev->virtqueue[VIRTIO_TXQ]->kickfd = (eventfd_t)-1;
- dev->virtqueue[VIRTIO_TXQ]->callfd = (eventfd_t)-1;
+ init_vring_queue_pair(dev, 0);
+ dev->num_virt_queues = 1;
+}
- /* Backends are set to -1 indicating an inactive device. */
- dev->virtqueue[VIRTIO_RXQ]->backend = VIRTIO_DEV_STOPPED;
- dev->virtqueue[VIRTIO_TXQ]->backend = VIRTIO_DEV_STOPPED;
+/*
+ * Alloc mem for vring queue pair.
+ */
+int
+alloc_vring_queue_pair(struct virtio_net *dev, uint16_t qp_idx)
+{
+ struct vhost_virtqueue *virtqueue = NULL;
+ uint32_t virt_rx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+ uint32_t virt_tx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+
+ virtqueue = malloc(sizeof(struct vhost_virtqueue) * VIRTIO_QNUM);
+ if (virtqueue == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to allocate memory for virt qp:%d.\n", qp_idx);
+ return -1;
+ }
+
+ dev->virtqueue[virt_rx_q_idx] = virtqueue;
+ dev->virtqueue[virt_tx_q_idx] = virtqueue + VIRTIO_TXQ;
+ return 0;
}
/*
@@ -276,7 +325,6 @@ static int
new_device(struct vhost_device_ctx ctx)
{
struct virtio_net_config_ll *new_ll_dev;
- struct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;
/* Setup device and virtqueues. */
new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
@@ -287,28 +335,22 @@ new_device(struct vhost_device_ctx ctx)
return -1;
}
- virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
- if (virtqueue_rx == NULL) {
- free(new_ll_dev);
+ new_ll_dev->dev.virtqueue =
+ malloc(VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX * sizeof(struct vhost_virtqueue *));
+ if (new_ll_dev->dev.virtqueue == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to allocate memory for rxq.\n",
+ "(%"PRIu64") Failed to allocate memory for dev.virtqueue.\n",
ctx.fh);
+ free(new_ll_dev);
return -1;
}
- virtqueue_tx = malloc(sizeof(struct vhost_virtqueue));
- if (virtqueue_tx == NULL) {
- free(virtqueue_rx);
+ if (alloc_vring_queue_pair(&new_ll_dev->dev, 0) == -1) {
+ free(new_ll_dev->dev.virtqueue);
free(new_ll_dev);
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%"PRIu64") Failed to allocate memory for txq.\n",
- ctx.fh);
return -1;
}
- new_ll_dev->dev.virtqueue[VIRTIO_RXQ] = virtqueue_rx;
- new_ll_dev->dev.virtqueue[VIRTIO_TXQ] = virtqueue_tx;
-
/* Initialise device and virtqueues. */
init_device(&new_ll_dev->dev);
@@ -392,7 +434,7 @@ set_owner(struct vhost_device_ctx ctx)
* Called from CUSE IOCTL: VHOST_RESET_OWNER
*/
static int
-reset_owner(struct vhost_device_ctx ctx)
+reset_owner(__rte_unused struct vhost_device_ctx ctx)
{
struct virtio_net_config_ll *ll_dev;
@@ -430,6 +472,7 @@ static int
set_features(struct vhost_device_ctx ctx, uint64_t *pu)
{
struct virtio_net *dev;
+ uint32_t q_idx;
dev = get_device(ctx);
if (dev == NULL)
@@ -441,22 +484,26 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu)
dev->features = *pu;
/* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
- if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") Mergeable RX buffers enabled\n",
- dev->device_fh);
- dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
- sizeof(struct virtio_net_hdr_mrg_rxbuf);
- dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
- sizeof(struct virtio_net_hdr_mrg_rxbuf);
- } else {
- LOG_DEBUG(VHOST_CONFIG,
- "(%"PRIu64") Mergeable RX buffers disabled\n",
- dev->device_fh);
- dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
- sizeof(struct virtio_net_hdr);
- dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
- sizeof(struct virtio_net_hdr);
+ for (q_idx = 0; q_idx < dev->num_virt_queues; q_idx++) {
+ uint32_t virt_rx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_RXQ;
+ uint32_t virt_tx_q_idx = q_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+ if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") Mergeable RX buffers enabled\n",
+ dev->device_fh);
+ dev->virtqueue[virt_rx_q_idx]->vhost_hlen =
+ sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ dev->virtqueue[virt_tx_q_idx]->vhost_hlen =
+ sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ } else {
+ LOG_DEBUG(VHOST_CONFIG,
+ "(%"PRIu64") Mergeable RX buffers disabled\n",
+ dev->device_fh);
+ dev->virtqueue[virt_rx_q_idx]->vhost_hlen =
+ sizeof(struct virtio_net_hdr);
+ dev->virtqueue[virt_tx_q_idx]->vhost_hlen =
+ sizeof(struct virtio_net_hdr);
+ }
}
return 0;
}
@@ -737,6 +784,14 @@ int rte_vhost_feature_enable(uint64_t feature_mask)
return -1;
}
+uint16_t rte_vhost_qp_num_get(struct virtio_net *dev)
+{
+ if (dev == NULL)
+ return 0;
+
+ return dev->num_virt_queues;
+}
+
/*
* Register ops so that we can add/remove device to data core.
*/
--
1.8.4.2
next prev parent reply other threads:[~2015-06-15 7:57 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-05-21 7:49 [dpdk-dev] [PATCH 0/6] Support multiple queues in vhost Ouyang Changchun
2015-05-21 7:49 ` [dpdk-dev] [PATCH 1/6] ixgbe: Support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-08-24 10:41 ` Qiu, Michael
2015-08-25 0:38 ` Ouyang, Changchun
2015-05-21 7:49 ` [dpdk-dev] [PATCH 2/6] lib_vhost: Support multiple queues in virtio dev Ouyang Changchun
2015-06-03 2:47 ` Xie, Huawei
2015-05-21 7:49 ` [dpdk-dev] [PATCH 3/6] lib_vhost: Set memory layout for multiple queues mode Ouyang Changchun
2015-06-02 3:33 ` Xie, Huawei
2015-05-21 7:49 ` [dpdk-dev] [PATCH 4/6] vhost: Add new command line option: rxq Ouyang Changchun
2015-05-22 1:39 ` Thomas F Herbert
2015-05-22 6:05 ` Ouyang, Changchun
2015-05-22 12:51 ` Thomas F Herbert
2015-05-23 1:25 ` Ouyang, Changchun
2015-05-26 7:21 ` Ouyang, Changchun
2015-05-21 7:49 ` [dpdk-dev] [PATCH 5/6] vhost: Support multiple queues Ouyang Changchun
2015-05-21 7:49 ` [dpdk-dev] [PATCH 6/6] virtio: Resolve for control queue Ouyang Changchun
2015-05-22 1:13 ` [dpdk-dev] [PATCH 0/6] Support multiple queues in vhost Thomas F Herbert
2015-05-22 6:08 ` Ouyang, Changchun
2015-06-10 5:52 ` [dpdk-dev] [PATCH v2 0/7] " Ouyang Changchun
2015-06-10 5:52 ` [dpdk-dev] [PATCH v2 1/7] ixgbe: Support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-06-10 5:52 ` [dpdk-dev] [PATCH v2 2/7] lib_vhost: Support multiple queues in virtio dev Ouyang Changchun
2015-06-11 9:54 ` Panu Matilainen
2015-06-10 5:52 ` [dpdk-dev] [PATCH v2 3/7] lib_vhost: Set memory layout for multiple queues mode Ouyang Changchun
2015-06-10 5:52 ` [dpdk-dev] [PATCH v2 4/7] vhost: Add new command line option: rxq Ouyang Changchun
2015-06-10 5:52 ` [dpdk-dev] [PATCH v2 5/7] vhost: Support multiple queues Ouyang Changchun
2015-06-10 5:52 ` [dpdk-dev] [PATCH v2 6/7] virtio: Resolve for control queue Ouyang Changchun
2015-06-10 5:52 ` [dpdk-dev] [PATCH v2 7/7] vhost: Add per queue stats info Ouyang Changchun
2015-06-15 7:56 ` [dpdk-dev] [PATCH v3 0/9] Support multiple queues in vhost Ouyang Changchun
2015-06-15 7:56 ` [dpdk-dev] [PATCH v3 1/9] ixgbe: Support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-06-15 7:56 ` Ouyang Changchun [this message]
2015-06-18 13:16 ` [dpdk-dev] [PATCH v3 2/9] lib_vhost: Support multiple queues in virtio dev Flavio Leitner
2015-06-19 1:06 ` Ouyang, Changchun
2015-06-18 13:34 ` Flavio Leitner
2015-06-19 1:17 ` Ouyang, Changchun
2015-06-15 7:56 ` [dpdk-dev] [PATCH v3 3/9] lib_vhost: Set memory layout for multiple queues mode Ouyang Changchun
2015-06-15 7:56 ` [dpdk-dev] [PATCH v3 4/9] lib_vhost: Check the virtqueue address's validity Ouyang Changchun
2015-06-15 7:56 ` [dpdk-dev] [PATCH v3 5/9] vhost: Add new command line option: rxq Ouyang Changchun
2015-06-15 7:56 ` [dpdk-dev] [PATCH v3 6/9] vhost: Support multiple queues Ouyang Changchun
2015-06-15 7:56 ` [dpdk-dev] [PATCH v3 7/9] virtio: Resolve for control queue Ouyang Changchun
2015-06-15 7:56 ` [dpdk-dev] [PATCH v3 8/9] vhost: Add per queue stats info Ouyang Changchun
2015-06-15 7:56 ` [dpdk-dev] [PATCH v3 9/9] doc: Update doc for vhost multiple queues Ouyang Changchun
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 00/12] Support multiple queues in vhost Ouyang Changchun
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 01/12] ixgbe: support VMDq RSS in non-SRIOV environment Ouyang Changchun
2015-08-12 8:22 ` Vincent JARDIN
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 02/12] vhost: support multiple queues in virtio dev Ouyang Changchun
2015-08-13 12:52 ` Flavio Leitner
2015-08-14 2:29 ` Ouyang, Changchun
2015-08-14 12:16 ` Flavio Leitner
2015-08-19 3:52 ` Yuanhan Liu
2015-08-19 5:54 ` Ouyang, Changchun
2015-08-19 6:28 ` Yuanhan Liu
2015-08-19 6:39 ` Yuanhan Liu
2015-09-03 2:27 ` Tetsuya Mukawa
2015-09-06 2:25 ` Ouyang, Changchun
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 03/12] vhost: update version map file Ouyang Changchun
2015-08-12 8:24 ` Panu Matilainen
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 04/12] vhost: set memory layout for multiple queues mode Ouyang Changchun
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 05/12] vhost: check the virtqueue address's validity Ouyang Changchun
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 06/12] vhost: support protocol feature Ouyang Changchun
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 07/12] vhost: add new command line option: rxq Ouyang Changchun
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 08/12] vhost: support multiple queues Ouyang Changchun
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 09/12] virtio: resolve for control queue Ouyang Changchun
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 10/12] vhost: add per queue stats info Ouyang Changchun
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 11/12] vhost: alloc core to virtq Ouyang Changchun
2015-08-12 8:02 ` [dpdk-dev] [PATCH v4 12/12] doc: update doc for vhost multiple queues Ouyang Changchun
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1434355006-30583-3-git-send-email-changchun.ouyang@intel.com \
--to=changchun.ouyang@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).