From: Huawei Xie <huawei.xie@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v5 06/11] lib/librte_vhost: fixes serious coding style issues
Date: Fri, 26 Sep 2014 17:45:53 +0800 [thread overview]
Message-ID: <1411724758-27488-7-git-send-email-huawei.xie@intel.com> (raw)
In-Reply-To: <1411724758-27488-1-git-send-email-huawei.xie@intel.com>
fixes checkpatch reported issues.
Signed-off-by: Huawei Xie <huawei.xie@intel.com>
---
lib/librte_vhost/rte_virtio_net.h | 55 +++++----
lib/librte_vhost/vhost-net-cdev.c | 236 +++++++++++++++++++-------------------
lib/librte_vhost/vhost-net-cdev.h | 33 +++---
lib/librte_vhost/vhost_rxtx.c | 57 +++++----
lib/librte_vhost/virtio-net.c | 149 ++++++++++++++----------
5 files changed, 281 insertions(+), 249 deletions(-)
diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index 99ddfc1..06cbdf7 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -66,19 +66,18 @@ struct buf_vector {
/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
-struct vhost_virtqueue
-{
- struct vring_desc *desc; /* Virtqueue descriptor ring. */
- struct vring_avail *avail; /* Virtqueue available ring. */
- struct vring_used *used; /* Virtqueue used ring. */
- uint32_t size; /* Size of descriptor ring. */
- uint32_t backend; /* Backend value to determine if device should started/stopped. */
- uint16_t vhost_hlen; /* Vhost header length (varies depending on RX merge buffers. */
+struct vhost_virtqueue {
+ struct vring_desc *desc; /* Virtqueue descriptor ring. */
+ struct vring_avail *avail; /* Virtqueue available ring. */
+ struct vring_used *used; /* Virtqueue used ring. */
+ uint32_t size; /* Size of descriptor ring. */
+ uint32_t backend; /* Backend value to determine if device should started/stopped. */
+ uint16_t vhost_hlen; /* Vhost header length (varies depending on RX merge buffers. */
volatile uint16_t last_used_idx; /* Last index used on the available ring */
volatile uint16_t last_used_idx_res; /* Used for multiple devices reserving buffers. */
- eventfd_t callfd; /* Currently unused as polling mode is enabled. */
- eventfd_t kickfd; /* Used to notify the guest (trigger interrupt). */
- struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */
+ eventfd_t callfd; /* Currently unused as polling mode is enabled. */
+ eventfd_t kickfd; /* Used to notify the guest (trigger interrupt). */
+ struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */
} __rte_cache_aligned;
@@ -86,11 +85,11 @@ struct vhost_virtqueue
* Information relating to memory regions including offsets to addresses in QEMUs memory file.
*/
struct virtio_memory_regions {
- uint64_t guest_phys_address; /* Base guest physical address of region. */
+ uint64_t guest_phys_address; /* Base guest physical address of region. */
uint64_t guest_phys_address_end; /* End guest physical address of region. */
- uint64_t memory_size; /* Size of region. */
- uint64_t userspace_address; /* Base userspace address of region. */
- uint64_t address_offset; /* Offset of region for address translation. */
+ uint64_t memory_size; /* Size of region. */
+ uint64_t userspace_address; /* Base userspace address of region. */
+ uint64_t address_offset; /* Offset of region for address translation. */
};
@@ -98,31 +97,31 @@ struct virtio_memory_regions {
* Memory structure includes region and mapping information.
*/
struct virtio_memory {
- uint64_t base_address; /**< Base QEMU userspace address of the memory file. */
- uint64_t mapped_address; /**< Mapped address of memory file base in our applications memory space. */
- uint64_t mapped_size; /**< Total size of memory file. */
- uint32_t nregions; /**< Number of memory regions. */
- struct virtio_memory_regions regions[0]; /**< Memory region information. */
+ uint64_t base_address; /**< Base QEMU userspace address of the memory file. */
+ uint64_t mapped_address; /**< Mapped address of memory file base in our applications memory space. */
+ uint64_t mapped_size; /**< Total size of memory file. */
+ uint32_t nregions; /**< Number of memory regions. */
+ struct virtio_memory_regions regions[0]; /**< Memory region information. */
};
/**
* Device structure contains all configuration information relating to the device.
*/
struct virtio_net {
- struct vhost_virtqueue *virtqueue[VIRTIO_QNUM]; /**< Contains all virtqueue information. */
- struct virtio_memory *mem; /**< QEMU memory and memory region information. */
- uint64_t features; /**< Negotiated feature set. */
- uint64_t device_fh; /**< Device identifier. */
- uint32_t flags; /**< Device flags. Only used to check if device is running on data core. */
- void *priv;
+ struct vhost_virtqueue *virtqueue[VIRTIO_QNUM]; /**< Contains all virtqueue information. */
+ struct virtio_memory *mem; /**< QEMU memory and memory region information. */
+ uint64_t features; /**< Negotiated feature set. */
+ uint64_t device_fh; /**< Device identifier. */
+ uint32_t flags; /**< Device flags. Only used to check if device is running on data core. */
+ void *priv; /**< User context. */
} __rte_cache_aligned;
/**
* Device operations to add/remove device.
*/
struct virtio_net_device_ops {
- int (*new_device)(struct virtio_net *); /**< Add device. */
- void (*destroy_device)(struct virtio_net *); /**< Remove device. */
+ int (*new_device)(struct virtio_net *); /**< Add device. */
+ void (*destroy_device)(struct virtio_net *); /**< Remove device. */
};
diff --git a/lib/librte_vhost/vhost-net-cdev.c b/lib/librte_vhost/vhost-net-cdev.c
index e73bf23..499e9b3 100644
--- a/lib/librte_vhost/vhost-net-cdev.c
+++ b/lib/librte_vhost/vhost-net-cdev.c
@@ -46,9 +46,9 @@
#include "vhost-net-cdev.h"
-#define FUSE_OPT_DUMMY "\0\0"
-#define FUSE_OPT_FORE "-f\0\0"
-#define FUSE_OPT_NOMULTI "-s\0\0"
+#define FUSE_OPT_DUMMY "\0\0"
+#define FUSE_OPT_FORE "-f\0\0"
+#define FUSE_OPT_NOMULTI "-s\0\0"
static const uint32_t default_major = 231;
static const uint32_t default_minor = 1;
@@ -113,61 +113,61 @@ vhost_net_release(fuse_req_t req, struct fuse_file_info *fi)
* Boilerplate code for CUSE IOCTL
* Implicit arguments: ctx, req, result.
*/
-#define VHOST_IOCTL(func) do { \
- result = (func)(ctx); \
- fuse_reply_ioctl(req, result, NULL, 0); \
-} while(0)
+#define VHOST_IOCTL(func) do { \
+ result = (func)(ctx); \
+ fuse_reply_ioctl(req, result, NULL, 0); \
+} while (0)
/*
* Boilerplate IOCTL RETRY
* Implicit arguments: req.
*/
-#define VHOST_IOCTL_RETRY(size_r, size_w) do { \
- struct iovec iov_r = { arg, (size_r) }; \
- struct iovec iov_w = { arg, (size_w) }; \
+#define VHOST_IOCTL_RETRY(size_r, size_w) do { \
+ struct iovec iov_r = { arg, (size_r) }; \
+ struct iovec iov_w = { arg, (size_w) }; \
fuse_reply_ioctl_retry(req, &iov_r, (size_r)?1:0, &iov_w, (size_w)?1:0); \
-} while(0) \
+} while (0)
/*
* Boilerplate code for CUSE Read IOCTL
* Implicit arguments: ctx, req, result, in_bufsz, in_buf.
*/
-#define VHOST_IOCTL_R(type, var, func) do { \
- if (!in_bufsz) { \
- VHOST_IOCTL_RETRY(sizeof(type), 0); \
- } else { \
- (var) = *(const type * ) in_buf; \
- result = func(ctx, &(var)); \
- fuse_reply_ioctl(req, result, NULL, 0); \
- } \
-} while(0) \
+#define VHOST_IOCTL_R(type, var, func) do { \
+ if (!in_bufsz) { \
+ VHOST_IOCTL_RETRY(sizeof(type), 0); \
+ } else { \
+ (var) = *(const type*) in_buf; \
+ result = func(ctx, &(var)); \
+ fuse_reply_ioctl(req, result, NULL, 0); \
+ } \
+} while (0)
/*
* Boilerplate code for CUSE Write IOCTL
* Implicit arguments: ctx, req, result, out_bufsz.
*/
-#define VHOST_IOCTL_W(type, var, func) do { \
- if (!out_bufsz) { \
- VHOST_IOCTL_RETRY(0, sizeof(type)); \
- } else { \
- result = (func)(ctx, &(var)); \
+#define VHOST_IOCTL_W(type, var, func) do { \
+ if (!out_bufsz) { \
+ VHOST_IOCTL_RETRY(0, sizeof(type)); \
+ } else { \
+ result = (func)(ctx, &(var)); \
fuse_reply_ioctl(req, result, &(var), sizeof(type)); \
- } \
-} while(0) \
+ } \
+} while (0)
/*
* Boilerplate code for CUSE Read/Write IOCTL
* Implicit arguments: ctx, req, result, in_bufsz, in_buf.
*/
-#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do { \
- if (!in_bufsz) { \
- VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2)); \
- } else { \
- (var1) = *(const type1* ) (in_buf); \
- result = (func)(ctx, (var1), &(var2)); \
- fuse_reply_ioctl(req, result, &(var2), sizeof(type2)); \
- } \
-} while(0) \
+#define VHOST_IOCTL_RW(type1, var1, type2, var2, func) do { \
+ if (!in_bufsz) { \
+ VHOST_IOCTL_RETRY(sizeof(type1), sizeof(type2)); \
+ } else { \
+ (var1) = *(const type1*) (in_buf); \
+ result = (func)(ctx, (var1), &(var2)); \
+ fuse_reply_ioctl(req, result, &(var2), sizeof(type2)); \
+ } \
+} while (0)
/*
* The IOCTLs are handled using CUSE/FUSE in userspace. Depending on
@@ -187,106 +187,104 @@ vhost_net_ioctl(fuse_req_t req, int cmd, void *arg,
uint32_t index;
int result = 0;
- switch(cmd)
- {
- case VHOST_NET_SET_BACKEND:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
- break;
+ switch (cmd) {
+ case VHOST_NET_SET_BACKEND:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
+ break;
- case VHOST_GET_FEATURES:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
- VHOST_IOCTL_W(uint64_t, features, ops->get_features);
- break;
+ case VHOST_GET_FEATURES:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
+ VHOST_IOCTL_W(uint64_t, features, ops->get_features);
+ break;
- case VHOST_SET_FEATURES:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
- VHOST_IOCTL_R(uint64_t, features, ops->set_features);
+ case VHOST_SET_FEATURES:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
+ VHOST_IOCTL_R(uint64_t, features, ops->set_features);
break;
- case VHOST_RESET_OWNER:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
- VHOST_IOCTL(ops->reset_owner);
- break;
+ case VHOST_RESET_OWNER:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
+ VHOST_IOCTL(ops->reset_owner);
+ break;
- case VHOST_SET_OWNER:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
- VHOST_IOCTL(ops->set_owner);
- break;
+ case VHOST_SET_OWNER:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
+ VHOST_IOCTL(ops->set_owner);
+ break;
- case VHOST_SET_MEM_TABLE:
- /*TODO fix race condition.*/
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
- static struct vhost_memory mem_temp;
-
- switch(in_bufsz){
- case 0:
- VHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0);
- break;
-
- case sizeof(struct vhost_memory):
- mem_temp = *(const struct vhost_memory *) in_buf;
-
- if (mem_temp.nregions > 0) {
- VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) + (sizeof(struct vhost_memory_region) * mem_temp.nregions), 0);
- } else {
- result = -1;
- fuse_reply_ioctl(req, result, NULL, 0);
- }
- break;
-
- default:
- result = ops->set_mem_table(ctx, in_buf, mem_temp.nregions);
- if (result)
- fuse_reply_err(req, EINVAL);
- else
- fuse_reply_ioctl(req, result, NULL, 0);
+ case VHOST_SET_MEM_TABLE:
+ /*TODO fix race condition.*/
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
+ static struct vhost_memory mem_temp;
- }
-
- break;
-
- case VHOST_SET_VRING_NUM:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_num);
+ switch (in_bufsz) {
+ case 0:
+ VHOST_IOCTL_RETRY(sizeof(struct vhost_memory), 0);
break;
- case VHOST_SET_VRING_BASE:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_base);
- break;
+ case sizeof(struct vhost_memory):
+ mem_temp = *(const struct vhost_memory *) in_buf;
- case VHOST_GET_VRING_BASE:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
- VHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state, state, ops->get_vring_base);
- break;
-
- case VHOST_SET_VRING_ADDR:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_addr, addr, ops->set_vring_addr);
- break;
-
- case VHOST_SET_VRING_KICK:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);
- break;
-
- case VHOST_SET_VRING_CALL:
- LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
- VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);
+ if (mem_temp.nregions > 0) {
+ VHOST_IOCTL_RETRY(sizeof(struct vhost_memory) + (sizeof(struct vhost_memory_region) * mem_temp.nregions), 0);
+ } else {
+ result = -1;
+ fuse_reply_ioctl(req, result, NULL, 0);
+ }
break;
default:
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
- result = -1;
- fuse_reply_ioctl(req, result, NULL, 0);
+ result = ops->set_mem_table(ctx, in_buf, mem_temp.nregions);
+ if (result)
+ fuse_reply_err(req, EINVAL);
+ else
+ fuse_reply_ioctl(req, result, NULL, 0);
+
+ }
+
+ break;
+
+ case VHOST_SET_VRING_NUM:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_num);
+ break;
+
+ case VHOST_SET_VRING_BASE:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_base);
+ break;
+
+ case VHOST_GET_VRING_BASE:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
+ VHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state, state, ops->get_vring_base);
+ break;
+
+ case VHOST_SET_VRING_ADDR:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_addr, addr, ops->set_vring_addr);
+ break;
+
+ case VHOST_SET_VRING_KICK:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);
+ break;
+
+ case VHOST_SET_VRING_CALL:
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
+ VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);
+ break;
+
+ default:
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
+ result = -1;
+ fuse_reply_ioctl(req, result, NULL, 0);
}
- if (result < 0) {
+ if (result < 0)
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
- } else {
+ else
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
- }
}
/*
diff --git a/lib/librte_vhost/vhost-net-cdev.h b/lib/librte_vhost/vhost-net-cdev.h
index d9a5a9a..f685f2b 100644
--- a/lib/librte_vhost/vhost-net-cdev.h
+++ b/lib/librte_vhost/vhost-net-cdev.h
@@ -76,10 +76,9 @@
/*
* Structure used to identify device context.
*/
-struct vhost_device_ctx
-{
+struct vhost_device_ctx {
pid_t pid; /* PID of process calling the IOCTL. */
- uint64_t fh; /* Populated with fi->fh to track the device index. */
+ uint64_t fh; /* Populated with fi->fh to track the device index. */
};
/*
@@ -87,26 +86,26 @@ struct vhost_device_ctx
* functions are called in CUSE context and are used to configure devices.
*/
struct vhost_net_device_ops {
- int (* new_device) (struct vhost_device_ctx);
- void (* destroy_device) (struct vhost_device_ctx);
+ int (*new_device)(struct vhost_device_ctx);
+ void (*destroy_device)(struct vhost_device_ctx);
- int (* get_features) (struct vhost_device_ctx, uint64_t *);
- int (* set_features) (struct vhost_device_ctx, uint64_t *);
+ int (*get_features)(struct vhost_device_ctx, uint64_t *);
+ int (*set_features)(struct vhost_device_ctx, uint64_t *);
- int (* set_mem_table) (struct vhost_device_ctx, const void *, uint32_t);
+ int (*set_mem_table)(struct vhost_device_ctx, const void *, uint32_t);
- int (* set_vring_num) (struct vhost_device_ctx, struct vhost_vring_state *);
- int (* set_vring_addr) (struct vhost_device_ctx, struct vhost_vring_addr *);
- int (* set_vring_base) (struct vhost_device_ctx, struct vhost_vring_state *);
- int (* get_vring_base) (struct vhost_device_ctx, uint32_t, struct vhost_vring_state *);
+ int (*set_vring_num)(struct vhost_device_ctx, struct vhost_vring_state *);
+ int (*set_vring_addr)(struct vhost_device_ctx, struct vhost_vring_addr *);
+ int (*set_vring_base)(struct vhost_device_ctx, struct vhost_vring_state *);
+ int (*get_vring_base)(struct vhost_device_ctx, uint32_t, struct vhost_vring_state *);
- int (* set_vring_kick) (struct vhost_device_ctx, struct vhost_vring_file *);
- int (* set_vring_call) (struct vhost_device_ctx, struct vhost_vring_file *);
+ int (*set_vring_kick)(struct vhost_device_ctx, struct vhost_vring_file *);
+ int (*set_vring_call)(struct vhost_device_ctx, struct vhost_vring_file *);
- int (* set_backend) (struct vhost_device_ctx, struct vhost_vring_file *);
+ int (*set_backend)(struct vhost_device_ctx, struct vhost_vring_file *);
- int (* set_owner) (struct vhost_device_ctx);
- int (* reset_owner) (struct vhost_device_ctx);
+ int (*set_owner)(struct vhost_device_ctx);
+ int (*reset_owner)(struct vhost_device_ctx);
};
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 688e661..7c2c47a 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -50,13 +50,14 @@
* added to the RX queue. This function works when mergeable is disabled.
*/
static inline uint32_t __attribute__((always_inline))
-virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count)
+virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
{
struct vhost_virtqueue *vq;
struct vring_desc *desc;
struct rte_mbuf *buff;
/* The virtio_hdr is initialised to 0. */
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};
+ struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
uint64_t buff_addr = 0;
uint64_t buff_hdr_addr = 0;
uint32_t head[VHOST_MAX_PKT_BURST], packet_len = 0;
@@ -74,7 +75,10 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts,
vq = dev->virtqueue[VIRTIO_RXQ];
count = (count > VHOST_MAX_PKT_BURST) ? VHOST_MAX_PKT_BURST : count;
- /* As many data cores may want access to available buffers, they need to be reserved. */
+ /*
+ * As many data cores may want access to available buffers,
+ * they need to be reserved.
+ */
do {
res_base_idx = vq->last_used_idx_res;
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
@@ -89,20 +93,22 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts,
res_end_idx = res_base_idx + count;
/* vq->last_used_idx_res is atomically updated. */
- /* TODO: Allow to disable cmpset if no concurrency in application */
+ /* TODO: Allow to disable cmpset if no concurrency in application. */
success = rte_atomic16_cmpset(&vq->last_used_idx_res,
res_base_idx, res_end_idx);
/* If there is contention here and failed, try again. */
} while (unlikely(success == 0));
res_cur_idx = res_base_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
+ dev->device_fh, res_cur_idx, res_end_idx);
/* Prefetch available ring to retrieve indexes. */
rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (head_idx = 0; head_idx < count; head_idx++)
- head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];
+ head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) &
+ (vq->size - 1)];
/*Prefetch descriptor index. */
rte_prefetch0(&vq->desc[head[packet_success]]);
@@ -113,18 +119,18 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts,
buff = pkts[packet_success];
- /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
+ /* gpa to vva (guest physical addr -> vhost virtual addr) */
buff_addr = gpa_to_vva(dev, desc->addr);
/* Prefetch buffer address. */
- rte_prefetch0((void*)(uintptr_t)buff_addr);
+ rte_prefetch0((void *)(uintptr_t)buff_addr);
/* Copy virtio_hdr to packet and increment buffer address */
buff_hdr_addr = buff_addr;
packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
/*
- * If the descriptors are chained the header and data are placed in
- * separate buffers.
+ * If the descriptors are chained the header and data are
+ * placed in separate buffers.
*/
if (desc->flags & VRING_DESC_F_NEXT) {
desc->len = vq->vhost_hlen;
@@ -143,7 +149,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts,
vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
/* Copy mbuf data to buffer */
- /* TODO fixme for sg mbuf and the case that desc couldn't hold the mbuf data */
+ /* FIXME for sg mbuf and the case that desc couldn't hold the mbuf data */
rte_memcpy((void *)(uintptr_t)buff_addr,
rte_pktmbuf_mtod(buff, const void *),
rte_pktmbuf_data_len(buff));
@@ -152,10 +158,11 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts,
res_cur_idx++;
packet_success++;
-
- rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void *)&virtio_hdr,
- vq->vhost_hlen);
- VHOST_PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);
+
+ rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
+ (const void *)&virtio_hdr, vq->vhost_hlen);
+ VHOST_PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr,
+ vq->vhost_hlen, 1);
if (res_cur_idx < res_end_idx) {
/* Prefetch descriptor index. */
rte_prefetch0(&vq->desc[head[packet_success]]);
@@ -386,8 +393,8 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t res_base_idx,
* This function works for mergeable RX.
*/
static inline uint32_t __attribute__((always_inline))
-virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts,
- uint32_t count)
+virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
{
struct vhost_virtqueue *vq;
uint32_t pkt_idx = 0, entry_success = 0;
@@ -507,7 +514,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf *
}
uint32_t
-rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count)
+rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
{
if (unlikely(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)))
return virtio_dev_merge_rx(dev, queue_id, pkts, count);
@@ -518,7 +526,8 @@ rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mb
uint32_t
-rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint32_t count)
+rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint32_t count)
{
struct rte_mbuf *m, *prev;
struct vhost_virtqueue *vq;
@@ -555,7 +564,8 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_me
/* Limit to MAX_PKT_BURST. */
free_entries = RTE_MIN(free_entries, VHOST_MAX_PKT_BURST);
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n",
+ dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < free_entries; i++)
head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
@@ -596,7 +606,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_me
vb_offset = 0;
vb_avail = desc->len;
-
+
seg_avail = 0;
/* Allocate an mbuf and populate the structure. */
m = rte_pktmbuf_alloc(mbuf_pool);
@@ -611,7 +621,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_me
VHOST_PRINT_PACKET(dev, (uintptr_t)vb_addr, desc->len, 0);
-
+
seg_num++;
cur = m;
prev = m;
@@ -681,7 +691,8 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_me
prev->next = cur;
prev = cur;
seg_offset = 0;
- seg_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
+ seg_avail = cur->buf_len -
+ RTE_PKTMBUF_HEADROOM;
}
desc = &vq->desc[desc->next];
diff --git a/lib/librte_vhost/virtio-net.c b/lib/librte_vhost/virtio-net.c
index 9852961..616af5f 100644
--- a/lib/librte_vhost/virtio-net.c
+++ b/lib/librte_vhost/virtio-net.c
@@ -56,16 +56,16 @@
* Device linked list structure for configuration.
*/
struct virtio_net_config_ll {
- struct virtio_net dev; /* Virtio device. */
- struct virtio_net_config_ll *next; /* Next entry on linked list. */
+ struct virtio_net dev; /* Virtio device.*/
+ struct virtio_net_config_ll *next; /* Next entry on linked list.*/
};
static const char eventfd_cdev[] = "/dev/eventfd-link";
/* device ops to add/remove device to data core. */
-static struct virtio_net_device_ops const * notify_ops;
+static struct virtio_net_device_ops const *notify_ops;
/* Root address of the linked list in the configuration core. */
-static struct virtio_net_config_ll *ll_root = NULL;
+static struct virtio_net_config_ll *ll_root;
/* Features supported by this library. */
#define VHOST_SUPPORTED_FEATURES (1ULL << VIRTIO_NET_F_MRG_RXBUF)
@@ -81,16 +81,15 @@ static const uint32_t BUFSIZE = PATH_MAX;
#define PROCMAP_SZ 8
/* Structure containing information gathered from maps file. */
-struct procmap
-{
- uint64_t va_start; /* Start virtual address in file. */
- uint64_t len; /* Size of file. */
- uint64_t pgoff; /* Not used. */
- uint32_t maj; /* Not used. */
- uint32_t min; /* Not used. */
- uint32_t ino; /* Not used. */
- char prot[PROT_SZ]; /* Not used. */
- char fname[PATH_MAX]; /* File name. */
+struct procmap {
+ uint64_t va_start; /* Start virtual address in file. */
+ uint64_t len; /* Size of file. */
+ uint64_t pgoff; /* Not used. */
+ uint32_t maj; /* Not used. */
+ uint32_t min; /* Not used. */
+ uint32_t ino; /* Not used. */
+ char prot[PROT_SZ]; /* Not used. */
+ char fname[PATH_MAX];/* File name. */
};
/*
@@ -109,8 +108,9 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
region = &dev->mem->regions[regionidx];
if ((qemu_va >= region->userspace_address) &&
(qemu_va <= region->userspace_address +
- region->memory_size)) {
- vhost_va = dev->mem->mapped_address + qemu_va - dev->mem->base_address;
+ region->memory_size)) {
+ vhost_va = dev->mem->mapped_address + qemu_va -
+ dev->mem->base_address;
break;
}
}
@@ -121,7 +121,8 @@ qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
* Locate the file containing QEMU's memory space and map it to our address space.
*/
static int
-host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, uint64_t addr)
+host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
+ pid_t pid, uint64_t addr)
{
struct dirent *dptr = NULL;
struct procmap procmap;
@@ -134,20 +135,22 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
char resolved_path[PATH_MAX];
FILE *fmap;
void *map;
- uint8_t found = 0;
- char line[BUFSIZE];
+ uint8_t found = 0;
+ char line[BUFSIZE];
char dlm[] = "- : ";
char *str, *sp, *in[PROCMAP_SZ];
char *end = NULL;
/* Path where mem files are located. */
- snprintf (procdir, PATH_MAX, "/proc/%u/fd/", pid);
+ snprintf(procdir, PATH_MAX, "/proc/%u/fd/", pid);
/* Maps file used to locate mem file. */
- snprintf (mapfile, PATH_MAX, "/proc/%u/maps", pid);
+ snprintf(mapfile, PATH_MAX, "/proc/%u/maps", pid);
fmap = fopen(mapfile, "r");
if (fmap == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to open maps file for pid %d\n", dev->device_fh, pid);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to open maps file for pid %d\n",
+ dev->device_fh, pid);
return -1;
}
@@ -157,7 +160,8 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
errno = 0;
/* Split line in to fields. */
for (i = 0; i < PROCMAP_SZ; i++) {
- if (((in[i] = strtok_r(str, &dlm[i], &sp)) == NULL) || (errno != 0)) {
+ in[i] = strtok_r(str, &dlm[i], &sp);
+ if ((in[i] == NULL) || (errno != 0)) {
fclose(fmap);
return -1;
}
@@ -220,7 +224,7 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
/* Find the guest memory file among the process fds. */
dp = opendir(procdir);
if (dp == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory \n", dev->device_fh, pid);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory\n", dev->device_fh, pid);
return -1;
}
@@ -229,7 +233,8 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
/* Read the fd directory contents. */
while (NULL != (dptr = readdir(dp))) {
- snprintf (memfile, PATH_MAX, "/proc/%u/fd/%s", pid, dptr->d_name);
+ snprintf(memfile, PATH_MAX, "/proc/%u/fd/%s",
+ pid, dptr->d_name);
realpath(memfile, resolved_path);
if (resolved_path == NULL) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to resolve fd directory\n", dev->device_fh);
@@ -257,8 +262,9 @@ host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, u
return -1;
}
- map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE , MAP_POPULATE|MAP_SHARED, fd, 0);
- close (fd);
+ map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE ,
+ MAP_POPULATE|MAP_SHARED, fd, 0);
+ close(fd);
if (map == MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Error mapping the file %s for pid %d\n", dev->device_fh, memfile, pid);
@@ -304,9 +310,8 @@ get_device(struct vhost_device_ctx ctx)
ll_dev = get_config_ll_entry(ctx);
/* If a matching entry is found in the linked list, return the device in that entry. */
- if (ll_dev) {
+ if (ll_dev)
return &ll_dev->dev;
- }
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
return NULL;
@@ -351,7 +356,8 @@ cleanup_device(struct virtio_net *dev)
{
/* Unmap QEMU memory file if mapped. */
if (dev->mem) {
- munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
+ munmap((void *)(uintptr_t)dev->mem->mapped_address,
+ (size_t)dev->mem->mapped_size);
free(dev->mem);
}
@@ -381,7 +387,8 @@ free_device(struct virtio_net_config_ll *ll_dev)
* Remove an entry from the device configuration linked list.
*/
static struct virtio_net_config_ll *
-rm_config_ll_entry(struct virtio_net_config_ll *ll_dev, struct virtio_net_config_ll *ll_dev_last)
+rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
+ struct virtio_net_config_ll *ll_dev_last)
{
/* First remove the device and then clean it up. */
if (ll_dev == ll_root) {
@@ -398,7 +405,8 @@ rm_config_ll_entry(struct virtio_net_config_ll *ll_dev, struct virtio_net_config
} else {
cleanup_device(&ll_dev->dev);
free_device(ll_dev);
- RTE_LOG(ERR, VHOST_CONFIG, "Remove entry from config_ll failed\n");
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Remove entry from config_ll failed\n");
return NULL;
}
}
@@ -416,7 +424,7 @@ init_device(struct virtio_net *dev)
vq_offset = offsetof(struct virtio_net, mem);
/* Set everything to 0. */
- memset((void*)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
+ memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
(sizeof(struct virtio_net) - (size_t)vq_offset));
memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
@@ -440,14 +448,18 @@ new_device(struct vhost_device_ctx ctx)
/* Setup device and virtqueues. */
new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
if (new_ll_dev == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev.\n", ctx.fh);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to allocate memory for dev.\n",
+ ctx.fh);
return -1;
}
virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
if (virtqueue_rx == NULL) {
free(new_ll_dev);
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_rx.\n", ctx.fh);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to allocate memory for rxq.\n",
+ ctx.fh);
return -1;
}
@@ -455,7 +467,9 @@ new_device(struct vhost_device_ctx ctx)
if (virtqueue_tx == NULL) {
free(virtqueue_rx);
free(new_ll_dev);
- RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_tx.\n", ctx.fh);
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%"PRIu64") Failed to allocate memory for txq.\n",
+ ctx.fh);
return -1;
}
@@ -573,12 +587,16 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu)
/* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers enabled\n", dev->device_fh);
- dev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
- dev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else {
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers disabled\n", dev->device_fh);
- dev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);
- dev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);
+ dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr);
+ dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
+ sizeof(struct virtio_net_hdr);
}
return 0;
}
@@ -590,7 +608,8 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu)
* storing offsets used to translate buffer addresses.
*/
static int
-set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_t nregions)
+set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
+ uint32_t nregions)
{
struct virtio_net *dev;
struct vhost_memory_region *mem_regions;
@@ -603,12 +622,14 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
return -1;
if (dev->mem) {
- munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
+ munmap((void *)(uintptr_t)dev->mem->mapped_address,
+ (size_t)dev->mem->mapped_size);
free(dev->mem);
}
/* Malloc the memory structure depending on the number of regions. */
- mem = calloc(1, sizeof(struct virtio_memory) + (sizeof(struct virtio_memory_regions) * nregions));
+ mem = calloc(1, sizeof(struct virtio_memory) +
+ (sizeof(struct virtio_memory_regions) * nregions));
if (mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev->mem.\n", dev->device_fh);
return -1;
@@ -616,19 +637,24 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_
mem->nregions = nregions;
- mem_regions = (void*)(uintptr_t)((uint64_t)(uintptr_t)mem_regions_addr + size);
+ mem_regions = (void *)(uintptr_t)
+ ((uint64_t)(uintptr_t)mem_regions_addr + size);
for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
/* Populate the region structure for each region. */
- mem->regions[regionidx].guest_phys_address = mem_regions[regionidx].guest_phys_addr;
- mem->regions[regionidx].guest_phys_address_end = mem->regions[regionidx].guest_phys_address +
+ mem->regions[regionidx].guest_phys_address =
+ mem_regions[regionidx].guest_phys_addr;
+ mem->regions[regionidx].guest_phys_address_end =
+ mem->regions[regionidx].guest_phys_address +
mem_regions[regionidx].memory_size;
- mem->regions[regionidx].memory_size = mem_regions[regionidx].memory_size;
- mem->regions[regionidx].userspace_address = mem_regions[regionidx].userspace_addr;
+ mem->regions[regionidx].memory_size =
+ mem_regions[regionidx].memory_size;
+ mem->regions[regionidx].userspace_address =
+ mem_regions[regionidx].userspace_addr;
LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
- regionidx, (void*)(uintptr_t)mem->regions[regionidx].guest_phys_address,
- (void*)(uintptr_t)mem->regions[regionidx].userspace_address,
+ regionidx, (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
+ (void *)(uintptr_t)mem->regions[regionidx].userspace_address,
mem->regions[regionidx].memory_size);
/*set the base address mapping*/
@@ -728,19 +754,19 @@ set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
vq = dev->virtqueue[addr->index];
/* The addresses are converted from QEMU virtual to Vhost virtual. */
- vq->desc = (struct vring_desc*)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
+ vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
if (vq->desc == 0) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find descriptor ring address.\n", dev->device_fh);
return -1;
}
- vq->avail = (struct vring_avail*)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
+ vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
if (vq->avail == 0) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find available ring address.\n", dev->device_fh);
return -1;
}
- vq->used = (struct vring_used*)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
+ vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
if (vq->used == 0) {
RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find used ring address.\n", dev->device_fh);
return -1;
@@ -778,7 +804,8 @@ set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
* We send the virtio device our available ring last used index.
*/
static int
-get_vring_base(struct vhost_device_ctx ctx, uint32_t index, struct vhost_vring_state *state)
+get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
+ struct vhost_vring_state *state)
{
struct virtio_net *dev;
@@ -885,7 +912,7 @@ set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
eventfd_call.target_pid = ctx.pid;
if (eventfd_copy(dev, &eventfd_call))
- return -1;
+ return -1;
return 0;
}
@@ -903,9 +930,8 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
struct virtio_net *dev;
dev = get_device(ctx);
- if (dev == NULL) {
+ if (dev == NULL)
return -1;
- }
/* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
dev->virtqueue[file->index]->backend = file->fd;
@@ -917,9 +943,8 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
return notify_ops->new_device(dev);
/* Otherwise we remove it. */
} else
- if (file->fd == VIRTIO_DEV_STOPPED) {
+ if (file->fd == VIRTIO_DEV_STOPPED)
notify_ops->destroy_device(dev);
- }
return 0;
}
@@ -927,8 +952,7 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
* Function pointers are set for the device operations to allow CUSE to call functions
* when an IOCTL, device_add or device_release is received.
*/
-static const struct vhost_net_device_ops vhost_device_ops =
-{
+static const struct vhost_net_device_ops vhost_device_ops = {
.new_device = new_device,
.destroy_device = destroy_device,
@@ -960,7 +984,8 @@ get_virtio_net_callbacks(void)
return &vhost_device_ops;
}
-int rte_vhost_enable_guest_notification(struct virtio_net *dev, uint16_t queue_id, int enable)
+int rte_vhost_enable_guest_notification(struct virtio_net *dev,
+ uint16_t queue_id, int enable)
{
if (enable) {
RTE_LOG(ERR, VHOST_CONFIG, "guest notification isn't supported.\n");
--
1.8.1.4
next prev parent reply other threads:[~2014-09-26 9:40 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-09-26 9:45 [dpdk-dev] [PATCH v5 00/11] user space vhost library and vhost example Huawei Xie
2014-09-26 9:45 ` [dpdk-dev] [PATCH v5 01/11] lib/librte_vhost: move src files in vhost example to vhost lib directory Huawei Xie
2014-09-29 19:41 ` Thomas Monjalon
2014-09-30 2:07 ` Xie, Huawei
2014-09-30 4:35 ` Thomas Monjalon
2014-09-26 9:45 ` [dpdk-dev] [PATCH v5 02/11] lib/librte_vhost: refactor vhost lib for subsequent transform Huawei Xie
2014-09-29 19:55 ` Thomas Monjalon
2014-09-29 20:00 ` Thomas Monjalon
2014-09-26 9:45 ` [dpdk-dev] [PATCH v5 03/11] lib/librte_vhost: vhost lib transform Huawei Xie
2014-09-29 19:51 ` Thomas Monjalon
2014-09-26 9:45 ` [dpdk-dev] [PATCH v5 04/11] lib/librte_vhost: merge vhost merge-able rx. merge vhost tx fix Huawei Xie
2014-09-26 9:45 ` [dpdk-dev] [PATCH v5 05/11] lib/librte_vhost: merge Oliver's mbuf change Huawei Xie
2014-09-29 19:44 ` Thomas Monjalon
[not found] ` <C37D651A908B024F974696C65296B57B0F2AF2C0@SHSMSX101.ccr.corp.intel.com>
2014-09-30 3:41 ` Ouyang, Changchun
2014-09-30 4:46 ` Thomas Monjalon
2014-09-30 6:43 ` Xie, Huawei
2014-09-30 8:06 ` Thomas Monjalon
2014-09-26 9:45 ` Huawei Xie [this message]
2014-09-26 9:45 ` [dpdk-dev] [PATCH v5 07/11] lib/librte_vhost: add vhost support in DPDK makefile Huawei Xie
2014-09-26 9:45 ` [dpdk-dev] [PATCH v5 08/11] examples/vhost: copy old vhost example src file Huawei Xie
2014-09-26 9:45 ` [dpdk-dev] [PATCH v5 09/11] examples/vhost: vhost example based on vhost lib API Huawei Xie
2014-09-26 9:45 ` [dpdk-dev] [PATCH v5 10/11] examples/vhost: merge oliver's mbuf changes to vhost example Huawei Xie
2014-09-26 9:45 ` [dpdk-dev] [PATCH v5 11/11] examples/vhost: add vhost example Makefile Huawei Xie
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1411724758-27488-7-git-send-email-huawei.xie@intel.com \
--to=huawei.xie@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).