* [dpdk-dev] [PATCH v4 1/5] vhost: unify struct VhostUserMsg usage
2018-09-22 21:16 [dpdk-dev] [PATCH v4 0/5] vhost: vhost_user.c code cleanup Nikolay Nikolaev
@ 2018-09-22 21:16 ` Nikolay Nikolaev
2018-09-24 9:55 ` Burakov, Anatoly
2018-09-22 21:16 ` [dpdk-dev] [PATCH v4 2/5] vhost: make message handling functions prepare the reply Nikolay Nikolaev
` (3 subsequent siblings)
4 siblings, 1 reply; 7+ messages in thread
From: Nikolay Nikolaev @ 2018-09-22 21:16 UTC (permalink / raw)
To: maxime.coquelin, tiwei.bie, zhihong.wang; +Cc: dev
Do not use the typedef version of struct VhostUserMsg. Also unify the
related parameter name.
Signed-off-by: Nikolay Nikolaev <nicknickolaev@gmail.com>
---
lib/librte_vhost/vhost_user.c | 41 +++++++++++++++++++++--------------------
1 file changed, 21 insertions(+), 20 deletions(-)
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 63d145b2d..505db3bfc 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -250,7 +250,7 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
*/
static int
vhost_user_set_vring_num(struct virtio_net *dev,
- VhostUserMsg *msg)
+ struct VhostUserMsg *msg)
{
struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
@@ -611,7 +611,7 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
* This function then converts these to our address space.
*/
static int
-vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg)
+vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg)
{
struct vhost_virtqueue *vq;
struct vhost_vring_addr *addr = &msg->payload.addr;
@@ -648,7 +648,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg)
*/
static int
vhost_user_set_vring_base(struct virtio_net *dev,
- VhostUserMsg *msg)
+ struct VhostUserMsg *msg)
{
dev->virtqueue[msg->payload.state.index]->last_used_idx =
msg->payload.state.num;
@@ -780,10 +780,10 @@ vhost_memory_changed(struct VhostUserMemory *new,
}
static int
-vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
+vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg)
{
struct virtio_net *dev = *pdev;
- struct VhostUserMemory memory = pmsg->payload.memory;
+ struct VhostUserMemory memory = msg->payload.memory;
struct rte_vhost_mem_region *reg;
void *mmap_addr;
uint64_t mmap_size;
@@ -804,7 +804,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
"(%d) memory regions not changed\n", dev->vid);
for (i = 0; i < memory.nregions; i++)
- close(pmsg->fds[i]);
+ close(msg->fds[i]);
return 0;
}
@@ -845,7 +845,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
dev->mem->nregions = memory.nregions;
for (i = 0; i < memory.nregions; i++) {
- fd = pmsg->fds[i];
+ fd = msg->fds[i];
reg = &dev->mem->regions[i];
reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
@@ -994,16 +994,16 @@ virtio_is_ready(struct virtio_net *dev)
}
static void
-vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
+vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *msg)
{
struct vhost_vring_file file;
struct vhost_virtqueue *vq;
- file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
- if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
+ file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+ if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
file.fd = VIRTIO_INVALID_EVENTFD;
else
- file.fd = pmsg->fds[0];
+ file.fd = msg->fds[0];
RTE_LOG(INFO, VHOST_CONFIG,
"vring call idx:%d file:%d\n", file.index, file.fd);
@@ -1015,17 +1015,17 @@ vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
}
static int
-vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
+vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg)
{
struct vhost_vring_file file;
struct vhost_virtqueue *vq;
struct virtio_net *dev = *pdev;
- file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
- if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
+ file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+ if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
file.fd = VIRTIO_INVALID_EVENTFD;
else
- file.fd = pmsg->fds[0];
+ file.fd = msg->fds[0];
RTE_LOG(INFO, VHOST_CONFIG,
"vring kick idx:%d file:%d\n", file.index, file.fd);
@@ -1073,7 +1073,7 @@ free_zmbufs(struct vhost_virtqueue *vq)
*/
static int
vhost_user_get_vring_base(struct virtio_net *dev,
- VhostUserMsg *msg)
+ struct VhostUserMsg *msg)
{
struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
@@ -1126,7 +1126,7 @@ vhost_user_get_vring_base(struct virtio_net *dev,
*/
static int
vhost_user_set_vring_enable(struct virtio_net *dev,
- VhostUserMsg *msg)
+ struct VhostUserMsg *msg)
{
int enable = (int)msg->payload.state.num;
int index = (int)msg->payload.state.index;
@@ -1485,7 +1485,8 @@ send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg,
* Allocate a queue pair if it hasn't been allocated yet
*/
static int
-vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
+vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
+ struct VhostUserMsg *msg)
{
uint16_t vring_idx;
@@ -1818,9 +1819,9 @@ vhost_user_msg_handler(int vid, int fd)
}
static int process_slave_message_reply(struct virtio_net *dev,
- const VhostUserMsg *msg)
+ const struct VhostUserMsg *msg)
{
- VhostUserMsg msg_reply;
+ struct VhostUserMsg msg_reply;
int ret;
if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH v4 4/5] vhost: unify message handling function signature
2018-09-22 21:16 [dpdk-dev] [PATCH v4 0/5] vhost: vhost_user.c code cleanup Nikolay Nikolaev
` (2 preceding siblings ...)
2018-09-22 21:16 ` [dpdk-dev] [PATCH v4 3/5] vhost: handle unsupported message types in functions Nikolay Nikolaev
@ 2018-09-22 21:16 ` Nikolay Nikolaev
2018-09-22 21:16 ` [dpdk-dev] [PATCH v4 5/5] vhost: message handling implemented as a callback array Nikolay Nikolaev
4 siblings, 0 replies; 7+ messages in thread
From: Nikolay Nikolaev @ 2018-09-22 21:16 UTC (permalink / raw)
To: maxime.coquelin, tiwei.bie, zhihong.wang; +Cc: dev
Each vhost-user message handling function will return an int result
which is described in the new enum vh_result: error, OK and reply.
All functions will now have two arguments, virtio_net double pointer
and VhostUserMsg pointer.
Signed-off-by: Nikolay Nikolaev <nicknickolaev@gmail.com>
---
lib/librte_vhost/vhost_user.c | 213 ++++++++++++++++++++++++-----------------
1 file changed, 126 insertions(+), 87 deletions(-)
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 1627d594e..051740477 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -71,6 +71,16 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
[VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
};
+/* The possible results of a message handling function */
+enum vh_result {
+ /* Message handling failed */
+ VH_RESULT_ERR = -1,
+ /* Message handling successful */
+ VH_RESULT_OK = 0,
+ /* Message handling successful and reply prepared */
+ VH_RESULT_REPLY = 1,
+};
+
static uint64_t
get_blk_size(int fd)
{
@@ -127,27 +137,31 @@ vhost_backend_cleanup(struct virtio_net *dev)
* the device hasn't been initialised.
*/
static int
-vhost_user_set_owner(void)
+vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
+ VhostUserMsg * msg __rte_unused)
{
- return 0;
+ return VH_RESULT_OK;
}
static int
-vhost_user_reset_owner(struct virtio_net *dev)
+vhost_user_reset_owner(struct virtio_net **pdev,
+ VhostUserMsg * msg __rte_unused)
{
+ struct virtio_net *dev = *pdev;
vhost_destroy_device_notify(dev);
cleanup_device(dev, 0);
reset_device(dev);
- return 0;
+ return VH_RESULT_OK;
}
/*
* The features that we support are requested.
*/
-static uint64_t
-vhost_user_get_features(struct virtio_net *dev, VhostUserMsg *msg)
+static int
+vhost_user_get_features(struct virtio_net **pdev, VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
uint64_t features = 0;
rte_vhost_driver_get_features(dev->ifname, &features);
@@ -155,15 +169,16 @@ vhost_user_get_features(struct virtio_net *dev, VhostUserMsg *msg)
msg->payload.u64 = features;
msg->size = sizeof(msg->payload.u64);
- return features;
+ return VH_RESULT_REPLY;
}
/*
* The queue number that we support are requested.
*/
-static uint32_t
-vhost_user_get_queue_num(struct virtio_net *dev, VhostUserMsg *msg)
+static int
+vhost_user_get_queue_num(struct virtio_net **pdev, VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
uint32_t queue_num = 0;
rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
@@ -171,15 +186,17 @@ vhost_user_get_queue_num(struct virtio_net *dev, VhostUserMsg *msg)
msg->payload.u64 = (uint64_t)queue_num;
msg->size = sizeof(msg->payload.u64);
- return queue_num;
+ return VH_RESULT_REPLY;
}
/*
* We receive the negotiated features supported by us and the virtio device.
*/
static int
-vhost_user_set_features(struct virtio_net *dev, uint64_t features)
+vhost_user_set_features(struct virtio_net **pdev, VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
+ uint64_t features = msg->payload.u64;
uint64_t vhost_features = 0;
struct rte_vdpa_device *vdpa_dev;
int did = -1;
@@ -189,12 +206,12 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) received invalid negotiated features.\n",
dev->vid);
- return -1;
+ return VH_RESULT_ERR;
}
if (dev->flags & VIRTIO_DEV_RUNNING) {
if (dev->features == features)
- return 0;
+ return VH_RESULT_OK;
/*
* Error out if master tries to change features while device is
@@ -205,7 +222,7 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) features changed while device is running.\n",
dev->vid);
- return -1;
+ return VH_RESULT_ERR;
}
if (dev->notify_ops->features_changed)
@@ -250,16 +267,17 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
if (vdpa_dev && vdpa_dev->ops->set_features)
vdpa_dev->ops->set_features(dev->vid);
- return 0;
+ return VH_RESULT_OK;
}
/*
* The virtio device sends us the size of the descriptor ring.
*/
static int
-vhost_user_set_vring_num(struct virtio_net *dev,
+vhost_user_set_vring_num(struct virtio_net **pdev,
struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
vq->size = msg->payload.state.num;
@@ -272,7 +290,7 @@ vhost_user_set_vring_num(struct virtio_net *dev,
if ((vq->size & (vq->size - 1)) || vq->size > 32768) {
RTE_LOG(ERR, VHOST_CONFIG,
"invalid virtqueue size %u\n", vq->size);
- return -1;
+ return VH_RESULT_ERR;
}
if (dev->dequeue_zero_copy) {
@@ -298,7 +316,7 @@ vhost_user_set_vring_num(struct virtio_net *dev,
if (!vq->shadow_used_packed) {
RTE_LOG(ERR, VHOST_CONFIG,
"failed to allocate memory for shadow used ring.\n");
- return -1;
+ return VH_RESULT_ERR;
}
} else {
@@ -308,7 +326,7 @@ vhost_user_set_vring_num(struct virtio_net *dev,
if (!vq->shadow_used_split) {
RTE_LOG(ERR, VHOST_CONFIG,
"failed to allocate memory for shadow used ring.\n");
- return -1;
+ return VH_RESULT_ERR;
}
}
@@ -318,10 +336,10 @@ vhost_user_set_vring_num(struct virtio_net *dev,
if (!vq->batch_copy_elems) {
RTE_LOG(ERR, VHOST_CONFIG,
"failed to allocate memory for batching copy.\n");
- return -1;
+ return VH_RESULT_ERR;
}
- return 0;
+ return VH_RESULT_OK;
}
/*
@@ -621,12 +639,12 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
static int
vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
struct vhost_virtqueue *vq;
struct vhost_vring_addr *addr = &msg->payload.addr;
- struct virtio_net *dev = *pdev;
if (dev->mem == NULL)
- return -1;
+ return VH_RESULT_ERR;
/* addr->index refers to the queue index. The txq 1, rxq is 0. */
vq = dev->virtqueue[msg->payload.addr.index];
@@ -643,27 +661,28 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg)
(1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
dev = translate_ring_addresses(dev, msg->payload.addr.index);
if (!dev)
- return -1;
+ return VH_RESULT_ERR;
*pdev = dev;
}
- return 0;
+ return VH_RESULT_OK;
}
/*
* The virtio device sends us the available ring last used index.
*/
static int
-vhost_user_set_vring_base(struct virtio_net *dev,
+vhost_user_set_vring_base(struct virtio_net **pdev,
struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
dev->virtqueue[msg->payload.state.index]->last_used_idx =
msg->payload.state.num;
dev->virtqueue[msg->payload.state.index]->last_avail_idx =
msg->payload.state.num;
- return 0;
+ return VH_RESULT_OK;
}
static int
@@ -804,7 +823,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg)
if (memory.nregions > VHOST_MEMORY_MAX_NREGIONS) {
RTE_LOG(ERR, VHOST_CONFIG,
"too many memory regions (%u)\n", memory.nregions);
- return -1;
+ return VH_RESULT_ERR;
}
if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
@@ -814,7 +833,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg)
for (i = 0; i < memory.nregions; i++)
close(msg->fds[i]);
- return 0;
+ return VH_RESULT_OK;
}
if (dev->mem) {
@@ -838,7 +857,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg)
"(%d) failed to allocate memory "
"for dev->guest_pages\n",
dev->vid);
- return -1;
+ return VH_RESULT_ERR;
}
}
@@ -848,7 +867,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg)
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev->mem\n",
dev->vid);
- return -1;
+ return VH_RESULT_ERR;
}
dev->mem->nregions = memory.nregions;
@@ -945,7 +964,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg)
dev = translate_ring_addresses(dev, i);
if (!dev)
- return -1;
+ return VH_RESULT_ERR;
*pdev = dev;
}
@@ -953,13 +972,13 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg)
dump_guest_pages(dev);
- return 0;
+ return VH_RESULT_OK;
err_mmap:
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
- return -1;
+ return VH_RESULT_ERR;
}
static bool
@@ -1001,9 +1020,10 @@ virtio_is_ready(struct virtio_net *dev)
return 1;
}
-static void
-vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *msg)
+static int
+vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
struct vhost_vring_file file;
struct vhost_virtqueue *vq;
@@ -1020,22 +1040,26 @@ vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *msg)
close(vq->callfd);
vq->callfd = file.fd;
+
+ return VH_RESULT_OK;
}
-static void vhost_user_set_vring_err(struct virtio_net *dev __rte_unused,
+static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
VhostUserMsg *msg)
{
if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
close(msg->fds[0]);
RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
+
+ return VH_RESULT_OK;
}
static int
vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
struct vhost_vring_file file;
struct vhost_virtqueue *vq;
- struct virtio_net *dev = *pdev;
file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
@@ -1048,7 +1072,7 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg)
/* Interpret ring addresses only when ring is started. */
dev = translate_ring_addresses(dev, file.index);
if (!dev)
- return -1;
+ return VH_RESULT_ERR;
*pdev = dev;
@@ -1065,7 +1089,8 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg)
if (vq->kickfd >= 0)
close(vq->kickfd);
vq->kickfd = file.fd;
- return 0;
+
+ return VH_RESULT_OK;
}
static void
@@ -1088,9 +1113,10 @@ free_zmbufs(struct vhost_virtqueue *vq)
* when virtio is stopped, qemu will send us the GET_VRING_BASE message.
*/
static int
-vhost_user_get_vring_base(struct virtio_net *dev,
+vhost_user_get_vring_base(struct virtio_net **pdev,
struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
/* We have to stop the queue (virtio) if it is running. */
@@ -1135,7 +1161,7 @@ vhost_user_get_vring_base(struct virtio_net *dev,
msg->size = sizeof(msg->payload.state);
- return 0;
+ return VH_RESULT_OK;
}
/*
@@ -1143,9 +1169,10 @@ vhost_user_get_vring_base(struct virtio_net *dev,
* enable the virtio queue pair.
*/
static int
-vhost_user_set_vring_enable(struct virtio_net *dev,
+vhost_user_set_vring_enable(struct virtio_net **pdev,
struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
int enable = (int)msg->payload.state.num;
int index = (int)msg->payload.state.index;
struct rte_vdpa_device *vdpa_dev;
@@ -1166,13 +1193,14 @@ vhost_user_set_vring_enable(struct virtio_net *dev,
dev->virtqueue[index]->enabled = enable;
- return 0;
+ return VH_RESULT_OK;
}
-static void
-vhost_user_get_protocol_features(struct virtio_net *dev,
+static int
+vhost_user_get_protocol_features(struct virtio_net **pdev,
struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
uint64_t features, protocol_features;
rte_vhost_driver_get_features(dev->ifname, &features);
@@ -1189,40 +1217,46 @@ vhost_user_get_protocol_features(struct virtio_net *dev,
msg->payload.u64 = protocol_features;
msg->size = sizeof(msg->payload.u64);
+
+ return VH_RESULT_OK;
}
static int
-vhost_user_set_protocol_features(struct virtio_net *dev,
- uint64_t protocol_features)
+vhost_user_set_protocol_features(struct virtio_net **pdev,
+ VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
+ uint64_t protocol_features = msg->payload.u64;
if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) received invalid protocol features.\n",
dev->vid);
- return -1;
+ return VH_RESULT_ERR;
}
dev->protocol_features = protocol_features;
- return 0;
+
+ return VH_RESULT_OK;
}
static int
-vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
+vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
int fd = msg->fds[0];
uint64_t size, off;
void *addr;
if (fd < 0) {
RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
- return -1;
+ return VH_RESULT_ERR;
}
if (msg->size != sizeof(VhostUserLog)) {
RTE_LOG(ERR, VHOST_CONFIG,
"invalid log base msg size: %"PRId32" != %d\n",
msg->size, (int)sizeof(VhostUserLog));
- return -1;
+ return VH_RESULT_ERR;
}
size = msg->payload.log.mmap_size;
@@ -1233,7 +1267,7 @@ vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
RTE_LOG(ERR, VHOST_CONFIG,
"log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
off, size);
- return -1;
+ return VH_RESULT_ERR;
}
RTE_LOG(INFO, VHOST_CONFIG,
@@ -1248,7 +1282,7 @@ vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
close(fd);
if (addr == MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
- return -1;
+ return VH_RESULT_ERR;
}
/*
@@ -1264,14 +1298,16 @@ vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
msg->size = sizeof(msg->payload.u64);
- return 0;
+ return VH_RESULT_OK;
}
-static void
-vhost_user_set_log_fd(struct virtio_net *dev __rte_unused, VhostUserMsg *msg)
+static int
+vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused, VhostUserMsg *msg)
{
close(msg->fds[0]);
RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
+
+ return VH_RESULT_OK;
}
/*
@@ -1283,8 +1319,9 @@ vhost_user_set_log_fd(struct virtio_net *dev __rte_unused, VhostUserMsg *msg)
* a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
*/
static int
-vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
+vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
uint8_t *mac = (uint8_t *)&msg->payload.u64;
struct rte_vdpa_device *vdpa_dev;
int did = -1;
@@ -1308,40 +1345,42 @@ vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
if (vdpa_dev && vdpa_dev->ops->migration_done)
vdpa_dev->ops->migration_done(dev->vid);
- return 0;
+ return VH_RESULT_OK;
}
static int
-vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
+vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
if (msg->payload.u64 < VIRTIO_MIN_MTU ||
msg->payload.u64 > VIRTIO_MAX_MTU) {
RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
msg->payload.u64);
- return -1;
+ return VH_RESULT_ERR;
}
dev->mtu = msg->payload.u64;
- return 0;
+ return VH_RESULT_OK;
}
static int
-vhost_user_set_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
+vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg)
{
+ struct virtio_net *dev = *pdev;
int fd = msg->fds[0];
if (fd < 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"Invalid file descriptor for slave channel (%d)\n",
fd);
- return -1;
+ return VH_RESULT_ERR;
}
dev->slave_req_fd = fd;
- return 0;
+ return VH_RESULT_OK;
}
static int
@@ -1406,7 +1445,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
len = imsg->size;
vva = qva_to_vva(dev, imsg->uaddr, &len);
if (!vva)
- return -1;
+ return VH_RESULT_ERR;
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
@@ -1432,10 +1471,10 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
default:
RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
imsg->type);
- return -1;
+ return VH_RESULT_ERR;
}
- return 0;
+ return VH_RESULT_OK;
}
/* return bytes# of read on success or negative val on failure. */
@@ -1685,26 +1724,26 @@ vhost_user_msg_handler(int vid, int fd)
switch (msg.request.master) {
case VHOST_USER_GET_FEATURES:
- vhost_user_get_features(dev, &msg);
+ ret = vhost_user_get_features(&dev, &msg);
send_vhost_reply(fd, &msg);
break;
case VHOST_USER_SET_FEATURES:
- ret = vhost_user_set_features(dev, msg.payload.u64);
+ ret = vhost_user_set_features(&dev, &msg);
break;
case VHOST_USER_GET_PROTOCOL_FEATURES:
- vhost_user_get_protocol_features(dev, &msg);
+ ret = vhost_user_get_protocol_features(&dev, &msg);
send_vhost_reply(fd, &msg);
break;
case VHOST_USER_SET_PROTOCOL_FEATURES:
- ret = vhost_user_set_protocol_features(dev, msg.payload.u64);
+ ret = vhost_user_set_protocol_features(&dev, &msg);
break;
case VHOST_USER_SET_OWNER:
- ret = vhost_user_set_owner();
+ ret = vhost_user_set_owner(&dev, &msg);
break;
case VHOST_USER_RESET_OWNER:
- ret = vhost_user_reset_owner(dev);
+ ret = vhost_user_reset_owner(&dev, &msg);
break;
case VHOST_USER_SET_MEM_TABLE:
@@ -1712,28 +1751,28 @@ vhost_user_msg_handler(int vid, int fd)
break;
case VHOST_USER_SET_LOG_BASE:
- ret = vhost_user_set_log_base(dev, &msg);
+ ret = vhost_user_set_log_base(&dev, &msg);
if (ret)
goto skip_to_reply;
/* it needs a reply */
send_vhost_reply(fd, &msg);
break;
case VHOST_USER_SET_LOG_FD:
- vhost_user_set_log_fd(dev, &msg);
+ ret = vhost_user_set_log_fd(&dev, &msg);
break;
case VHOST_USER_SET_VRING_NUM:
- ret = vhost_user_set_vring_num(dev, &msg);
+ ret = vhost_user_set_vring_num(&dev, &msg);
break;
case VHOST_USER_SET_VRING_ADDR:
ret = vhost_user_set_vring_addr(&dev, &msg);
break;
case VHOST_USER_SET_VRING_BASE:
- ret = vhost_user_set_vring_base(dev, &msg);
+ ret = vhost_user_set_vring_base(&dev, &msg);
break;
case VHOST_USER_GET_VRING_BASE:
- ret = vhost_user_get_vring_base(dev, &msg);
+ ret = vhost_user_get_vring_base(&dev, &msg);
if (ret)
goto skip_to_reply;
send_vhost_reply(fd, &msg);
@@ -1743,31 +1782,31 @@ vhost_user_msg_handler(int vid, int fd)
ret = vhost_user_set_vring_kick(&dev, &msg);
break;
case VHOST_USER_SET_VRING_CALL:
- vhost_user_set_vring_call(dev, &msg);
+ ret = vhost_user_set_vring_call(&dev, &msg);
break;
case VHOST_USER_SET_VRING_ERR:
- vhost_user_set_vring_err(dev, &msg);
+ ret = vhost_user_set_vring_err(&dev, &msg);
break;
case VHOST_USER_GET_QUEUE_NUM:
- vhost_user_get_queue_num(dev, &msg);
+ ret = vhost_user_get_queue_num(&dev, &msg);
send_vhost_reply(fd, &msg);
break;
case VHOST_USER_SET_VRING_ENABLE:
- ret = vhost_user_set_vring_enable(dev, &msg);
+ ret = vhost_user_set_vring_enable(&dev, &msg);
break;
case VHOST_USER_SEND_RARP:
- ret = vhost_user_send_rarp(dev, &msg);
+ ret = vhost_user_send_rarp(&dev, &msg);
break;
case VHOST_USER_NET_SET_MTU:
- ret = vhost_user_net_set_mtu(dev, &msg);
+ ret = vhost_user_net_set_mtu(&dev, &msg);
break;
case VHOST_USER_SET_SLAVE_REQ_FD:
- ret = vhost_user_set_req_fd(dev, &msg);
+ ret = vhost_user_set_req_fd(&dev, &msg);
break;
case VHOST_USER_IOTLB_MSG:
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH v4 5/5] vhost: message handling implemented as a callback array
2018-09-22 21:16 [dpdk-dev] [PATCH v4 0/5] vhost: vhost_user.c code cleanup Nikolay Nikolaev
` (3 preceding siblings ...)
2018-09-22 21:16 ` [dpdk-dev] [PATCH v4 4/5] vhost: unify message handling function signature Nikolay Nikolaev
@ 2018-09-22 21:16 ` Nikolay Nikolaev
4 siblings, 0 replies; 7+ messages in thread
From: Nikolay Nikolaev @ 2018-09-22 21:16 UTC (permalink / raw)
To: maxime.coquelin, tiwei.bie, zhihong.wang; +Cc: dev
Introduce vhost_message_handlers, which maps the message request
type to the message handler. Then replace the switch construct
with a map and call.
Failing vhost_user_set_features is fatal and all processing should
stop immediately and propagate the error to the upper layers. Change
the code accordingly to reflect that.
Signed-off-by: Nikolay Nikolaev <nicknickolaev@gmail.com>
---
lib/librte_vhost/vhost_user.c | 149 +++++++++++++++--------------------------
1 file changed, 56 insertions(+), 93 deletions(-)
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 051740477..0c2faa6af 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -1477,6 +1477,34 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
return VH_RESULT_OK;
}
+typedef int (*vhost_message_handler_t)(struct virtio_net **pdev, VhostUserMsg * msg);
+static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
+ [VHOST_USER_NONE] = NULL,
+ [VHOST_USER_GET_FEATURES] = vhost_user_get_features,
+ [VHOST_USER_SET_FEATURES] = vhost_user_set_features,
+ [VHOST_USER_SET_OWNER] = vhost_user_set_owner,
+ [VHOST_USER_RESET_OWNER] = vhost_user_reset_owner,
+ [VHOST_USER_SET_MEM_TABLE] = vhost_user_set_mem_table,
+ [VHOST_USER_SET_LOG_BASE] = vhost_user_set_log_base,
+ [VHOST_USER_SET_LOG_FD] = vhost_user_set_log_fd,
+ [VHOST_USER_SET_VRING_NUM] = vhost_user_set_vring_num,
+ [VHOST_USER_SET_VRING_ADDR] = vhost_user_set_vring_addr,
+ [VHOST_USER_SET_VRING_BASE] = vhost_user_set_vring_base,
+ [VHOST_USER_GET_VRING_BASE] = vhost_user_get_vring_base,
+ [VHOST_USER_SET_VRING_KICK] = vhost_user_set_vring_kick,
+ [VHOST_USER_SET_VRING_CALL] = vhost_user_set_vring_call,
+ [VHOST_USER_SET_VRING_ERR] = vhost_user_set_vring_err,
+ [VHOST_USER_GET_PROTOCOL_FEATURES] = vhost_user_get_protocol_features,
+ [VHOST_USER_SET_PROTOCOL_FEATURES] = vhost_user_set_protocol_features,
+ [VHOST_USER_GET_QUEUE_NUM] = vhost_user_get_queue_num,
+ [VHOST_USER_SET_VRING_ENABLE] = vhost_user_set_vring_enable,
+ [VHOST_USER_SEND_RARP] = vhost_user_send_rarp,
+ [VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu,
+ [VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd,
+ [VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg,
+};
+
+
/* return bytes# of read on success or negative val on failure. */
static int
read_vhost_message(int sockfd, struct VhostUserMsg *msg)
@@ -1630,6 +1658,7 @@ vhost_user_msg_handler(int vid, int fd)
int ret;
int unlock_required = 0;
uint32_t skip_master = 0;
+ int request;
dev = get_device(vid);
if (dev == NULL)
@@ -1722,100 +1751,34 @@ vhost_user_msg_handler(int vid, int fd)
goto skip_to_post_handle;
}
- switch (msg.request.master) {
- case VHOST_USER_GET_FEATURES:
- ret = vhost_user_get_features(&dev, &msg);
- send_vhost_reply(fd, &msg);
- break;
- case VHOST_USER_SET_FEATURES:
- ret = vhost_user_set_features(&dev, &msg);
- break;
-
- case VHOST_USER_GET_PROTOCOL_FEATURES:
- ret = vhost_user_get_protocol_features(&dev, &msg);
- send_vhost_reply(fd, &msg);
- break;
- case VHOST_USER_SET_PROTOCOL_FEATURES:
- ret = vhost_user_set_protocol_features(&dev, &msg);
- break;
-
- case VHOST_USER_SET_OWNER:
- ret = vhost_user_set_owner(&dev, &msg);
- break;
- case VHOST_USER_RESET_OWNER:
- ret = vhost_user_reset_owner(&dev, &msg);
- break;
-
- case VHOST_USER_SET_MEM_TABLE:
- ret = vhost_user_set_mem_table(&dev, &msg);
- break;
-
- case VHOST_USER_SET_LOG_BASE:
- ret = vhost_user_set_log_base(&dev, &msg);
- if (ret)
- goto skip_to_reply;
- /* it needs a reply */
- send_vhost_reply(fd, &msg);
- break;
- case VHOST_USER_SET_LOG_FD:
- ret = vhost_user_set_log_fd(&dev, &msg);
- break;
-
- case VHOST_USER_SET_VRING_NUM:
- ret = vhost_user_set_vring_num(&dev, &msg);
- break;
- case VHOST_USER_SET_VRING_ADDR:
- ret = vhost_user_set_vring_addr(&dev, &msg);
- break;
- case VHOST_USER_SET_VRING_BASE:
- ret = vhost_user_set_vring_base(&dev, &msg);
- break;
-
- case VHOST_USER_GET_VRING_BASE:
- ret = vhost_user_get_vring_base(&dev, &msg);
- if (ret)
- goto skip_to_reply;
- send_vhost_reply(fd, &msg);
- break;
-
- case VHOST_USER_SET_VRING_KICK:
- ret = vhost_user_set_vring_kick(&dev, &msg);
- break;
- case VHOST_USER_SET_VRING_CALL:
- ret = vhost_user_set_vring_call(&dev, &msg);
- break;
-
- case VHOST_USER_SET_VRING_ERR:
- ret = vhost_user_set_vring_err(&dev, &msg);
- break;
-
- case VHOST_USER_GET_QUEUE_NUM:
- ret = vhost_user_get_queue_num(&dev, &msg);
- send_vhost_reply(fd, &msg);
- break;
-
- case VHOST_USER_SET_VRING_ENABLE:
- ret = vhost_user_set_vring_enable(&dev, &msg);
- break;
- case VHOST_USER_SEND_RARP:
- ret = vhost_user_send_rarp(&dev, &msg);
- break;
-
- case VHOST_USER_NET_SET_MTU:
- ret = vhost_user_net_set_mtu(&dev, &msg);
- break;
-
- case VHOST_USER_SET_SLAVE_REQ_FD:
- ret = vhost_user_set_req_fd(&dev, &msg);
- break;
-
- case VHOST_USER_IOTLB_MSG:
- ret = vhost_user_iotlb_msg(&dev, &msg);
- break;
+ request = msg.request.master;
+ if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
+ if (!vhost_message_handlers[request])
+ goto skip_to_post_handle;
+ ret = vhost_message_handlers[request](&dev, &msg);
- default:
- ret = -1;
- break;
+ switch (ret) {
+ case VH_RESULT_ERR:
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Processing %s failed.\n",
+ vhost_message_str[request]);
+ break;
+ case VH_RESULT_OK:
+ RTE_LOG(DEBUG, VHOST_CONFIG,
+ "Processing %s succeeded.\n",
+ vhost_message_str[request]);
+ break;
+ case VH_RESULT_REPLY:
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "Processing %s succeeded and needs reply.\n",
+ vhost_message_str[request]);
+ send_vhost_reply(fd, &msg);
+ break;
+ }
+ } else {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Requested invalid message type %d.\n", request);
+ ret = VH_RESULT_ERR;
}
skip_to_post_handle:
^ permalink raw reply [flat|nested] 7+ messages in thread