* [PATCH] vhost: use another variable to store vhost msg result code
@ 2022-07-18 2:06 Andy Pei
2022-09-22 13:26 ` Xia, Chenbo
2022-09-23 2:32 ` [PATCH v2] vhost: use dedicated variable for vhost message " Andy Pei
0 siblings, 2 replies; 7+ messages in thread
From: Andy Pei @ 2022-07-18 2:06 UTC (permalink / raw)
To: dev; +Cc: maxime.coquelin, Chenbo.Xia, WenwuX.Ma
Currently in function vhost_user_msg_handler, variable ret is used to
store both vhost msg result code and function call return value.
After this patch, variable ret is used only to store function call
return value, a new variable msg_result is used to store vhost msg
result. This can improve readability.
Signed-off-by: Andy Pei <andy.pei@intel.com>
---
lib/vhost/vhost_user.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 4ad28ba..dac06c9 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -2969,6 +2969,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
struct vhu_msg_context ctx;
vhost_message_handler_t *msg_handler;
struct rte_vdpa_device *vdpa_dev;
+ int msg_result = RTE_VHOST_MSG_RESULT_OK;
int ret;
int unlock_required = 0;
bool handled;
@@ -3061,8 +3062,8 @@ static int is_vring_iotlb(struct virtio_net *dev,
handled = false;
if (dev->extern_ops.pre_msg_handle) {
RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
- ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
- switch (ret) {
+ msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
+ switch (msg_result) {
case RTE_VHOST_MSG_RESULT_REPLY:
send_vhost_reply(dev, fd, &ctx);
/* Fall-through */
@@ -3080,12 +3081,12 @@ static int is_vring_iotlb(struct virtio_net *dev,
goto skip_to_post_handle;
if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0) {
- ret = RTE_VHOST_MSG_RESULT_ERR;
+ msg_result = RTE_VHOST_MSG_RESULT_ERR;
} else {
- ret = msg_handler->callback(&dev, &ctx, fd);
+ msg_result = msg_handler->callback(&dev, &ctx, fd);
}
- switch (ret) {
+ switch (msg_result) {
case RTE_VHOST_MSG_RESULT_ERR:
VHOST_LOG_CONFIG(dev->ifname, ERR,
"processing %s failed.\n",
@@ -3110,11 +3111,11 @@ static int is_vring_iotlb(struct virtio_net *dev,
}
skip_to_post_handle:
- if (ret != RTE_VHOST_MSG_RESULT_ERR &&
+ if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
dev->extern_ops.post_msg_handle) {
RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
- ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
- switch (ret) {
+ msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
+ switch (msg_result) {
case RTE_VHOST_MSG_RESULT_REPLY:
send_vhost_reply(dev, fd, &ctx);
/* Fall-through */
@@ -3133,7 +3134,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
"vhost message (req: %d) was not handled.\n",
request);
close_msg_fds(&ctx);
- ret = RTE_VHOST_MSG_RESULT_ERR;
+ msg_result = RTE_VHOST_MSG_RESULT_ERR;
}
/*
@@ -3142,17 +3143,16 @@ static int is_vring_iotlb(struct virtio_net *dev,
* VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
*/
if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
- ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
+ ctx.msg.payload.u64 = msg_result == RTE_VHOST_MSG_RESULT_ERR;
ctx.msg.size = sizeof(ctx.msg.payload.u64);
ctx.fd_num = 0;
send_vhost_reply(dev, fd, &ctx);
- } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
+ } else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message handling failed.\n");
ret = -1;
goto unlock;
}
- ret = 0;
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
bool cur_ready = vq_is_ready(dev, vq);
--
1.8.3.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [PATCH] vhost: use another variable to store vhost msg result code
2022-07-18 2:06 [PATCH] vhost: use another variable to store vhost msg result code Andy Pei
@ 2022-09-22 13:26 ` Xia, Chenbo
2022-09-23 2:29 ` Pei, Andy
2022-09-23 2:32 ` [PATCH v2] vhost: use dedicated variable for vhost message " Andy Pei
1 sibling, 1 reply; 7+ messages in thread
From: Xia, Chenbo @ 2022-09-22 13:26 UTC (permalink / raw)
To: Pei, Andy, dev; +Cc: maxime.coquelin, Ma, WenwuX
> -----Original Message-----
> From: Pei, Andy <andy.pei@intel.com>
> Sent: Monday, July 18, 2022 10:07 AM
> To: dev@dpdk.org
> Cc: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>; Ma,
> WenwuX <wenwux.ma@intel.com>
> Subject: [PATCH] vhost: use another variable to store vhost msg result
> code
Patch looks good. I suggest to use title:
vhost: use dedicated variable for vhost message result code\\
Thanks,
Chenbo
>
> Currently in function vhost_user_msg_handler, variable ret is used to
> store both vhost msg result code and function call return value.
> After this patch, variable ret is used only to store function call
> return value, a new variable msg_result is used to store vhost msg
> result. This can improve readability.
>
> Signed-off-by: Andy Pei <andy.pei@intel.com>
> ---
> lib/vhost/vhost_user.c | 24 ++++++++++++------------
> 1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 4ad28ba..dac06c9 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> @@ -2969,6 +2969,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> struct vhu_msg_context ctx;
> vhost_message_handler_t *msg_handler;
> struct rte_vdpa_device *vdpa_dev;
> + int msg_result = RTE_VHOST_MSG_RESULT_OK;
> int ret;
> int unlock_required = 0;
> bool handled;
> @@ -3061,8 +3062,8 @@ static int is_vring_iotlb(struct virtio_net *dev,
> handled = false;
> if (dev->extern_ops.pre_msg_handle) {
> RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
> - ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> - switch (ret) {
> + msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> + switch (msg_result) {
> case RTE_VHOST_MSG_RESULT_REPLY:
> send_vhost_reply(dev, fd, &ctx);
> /* Fall-through */
> @@ -3080,12 +3081,12 @@ static int is_vring_iotlb(struct virtio_net *dev,
> goto skip_to_post_handle;
>
> if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0)
> {
> - ret = RTE_VHOST_MSG_RESULT_ERR;
> + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> } else {
> - ret = msg_handler->callback(&dev, &ctx, fd);
> + msg_result = msg_handler->callback(&dev, &ctx, fd);
> }
>
> - switch (ret) {
> + switch (msg_result) {
> case RTE_VHOST_MSG_RESULT_ERR:
> VHOST_LOG_CONFIG(dev->ifname, ERR,
> "processing %s failed.\n",
> @@ -3110,11 +3111,11 @@ static int is_vring_iotlb(struct virtio_net *dev,
> }
>
> skip_to_post_handle:
> - if (ret != RTE_VHOST_MSG_RESULT_ERR &&
> + if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
> dev->extern_ops.post_msg_handle) {
> RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
> - ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
> - switch (ret) {
> + msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid,
> &ctx);
> + switch (msg_result) {
> case RTE_VHOST_MSG_RESULT_REPLY:
> send_vhost_reply(dev, fd, &ctx);
> /* Fall-through */
> @@ -3133,7 +3134,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> "vhost message (req: %d) was not handled.\n",
> request);
> close_msg_fds(&ctx);
> - ret = RTE_VHOST_MSG_RESULT_ERR;
> + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> }
>
> /*
> @@ -3142,17 +3143,16 @@ static int is_vring_iotlb(struct virtio_net *dev,
> * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
> */
> if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
> - ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
> + ctx.msg.payload.u64 = msg_result == RTE_VHOST_MSG_RESULT_ERR;
> ctx.msg.size = sizeof(ctx.msg.payload.u64);
> ctx.fd_num = 0;
> send_vhost_reply(dev, fd, &ctx);
> - } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
> + } else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
> VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message handling
> failed.\n");
> ret = -1;
> goto unlock;
> }
>
> - ret = 0;
> for (i = 0; i < dev->nr_vring; i++) {
> struct vhost_virtqueue *vq = dev->virtqueue[i];
> bool cur_ready = vq_is_ready(dev, vq);
> --
> 1.8.3.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [PATCH] vhost: use another variable to store vhost msg result code
2022-09-22 13:26 ` Xia, Chenbo
@ 2022-09-23 2:29 ` Pei, Andy
0 siblings, 0 replies; 7+ messages in thread
From: Pei, Andy @ 2022-09-23 2:29 UTC (permalink / raw)
To: Xia, Chenbo, dev; +Cc: maxime.coquelin, Ma, WenwuX
HI Chenbo,
Thanks for your reply.
I think your suggestion is good, and I will send a V2 patch to address this.
> -----Original Message-----
> From: Xia, Chenbo <chenbo.xia@intel.com>
> Sent: Thursday, September 22, 2022 9:27 PM
> To: Pei, Andy <andy.pei@intel.com>; dev@dpdk.org
> Cc: maxime.coquelin@redhat.com; Ma, WenwuX <WenwuX.Ma@intel.com>
> Subject: RE: [PATCH] vhost: use another variable to store vhost msg result code
>
> > -----Original Message-----
> > From: Pei, Andy <andy.pei@intel.com>
> > Sent: Monday, July 18, 2022 10:07 AM
> > To: dev@dpdk.org
> > Cc: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>;
> > Ma, WenwuX <wenwux.ma@intel.com>
> > Subject: [PATCH] vhost: use another variable to store vhost msg result
> > code
>
> Patch looks good. I suggest to use title:
> vhost: use dedicated variable for vhost message result code\\
>
> Thanks,
> Chenbo
>
> >
> > Currently in function vhost_user_msg_handler, variable ret is used to
> > store both vhost msg result code and function call return value.
> > After this patch, variable ret is used only to store function call
> > return value, a new variable msg_result is used to store vhost msg
> > result. This can improve readability.
> >
> > Signed-off-by: Andy Pei <andy.pei@intel.com>
> > ---
> > lib/vhost/vhost_user.c | 24 ++++++++++++------------
> > 1 file changed, 12 insertions(+), 12 deletions(-)
> >
> > diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index
> > 4ad28ba..dac06c9 100644
> > --- a/lib/vhost/vhost_user.c
> > +++ b/lib/vhost/vhost_user.c
> > @@ -2969,6 +2969,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > struct vhu_msg_context ctx;
> > vhost_message_handler_t *msg_handler;
> > struct rte_vdpa_device *vdpa_dev;
> > + int msg_result = RTE_VHOST_MSG_RESULT_OK;
> > int ret;
> > int unlock_required = 0;
> > bool handled;
> > @@ -3061,8 +3062,8 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > handled = false;
> > if (dev->extern_ops.pre_msg_handle) {
> > RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) !=
> 0);
> > - ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> > - switch (ret) {
> > + msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid,
> &ctx);
> > + switch (msg_result) {
> > case RTE_VHOST_MSG_RESULT_REPLY:
> > send_vhost_reply(dev, fd, &ctx);
> > /* Fall-through */
> > @@ -3080,12 +3081,12 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > goto skip_to_post_handle;
> >
> > if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0)
> > {
> > - ret = RTE_VHOST_MSG_RESULT_ERR;
> > + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> > } else {
> > - ret = msg_handler->callback(&dev, &ctx, fd);
> > + msg_result = msg_handler->callback(&dev, &ctx, fd);
> > }
> >
> > - switch (ret) {
> > + switch (msg_result) {
> > case RTE_VHOST_MSG_RESULT_ERR:
> > VHOST_LOG_CONFIG(dev->ifname, ERR,
> > "processing %s failed.\n",
> > @@ -3110,11 +3111,11 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > }
> >
> > skip_to_post_handle:
> > - if (ret != RTE_VHOST_MSG_RESULT_ERR &&
> > + if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
> > dev->extern_ops.post_msg_handle) {
> > RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) !=
> 0);
> > - ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
> > - switch (ret) {
> > + msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid,
> > &ctx);
> > + switch (msg_result) {
> > case RTE_VHOST_MSG_RESULT_REPLY:
> > send_vhost_reply(dev, fd, &ctx);
> > /* Fall-through */
> > @@ -3133,7 +3134,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > "vhost message (req: %d) was not handled.\n",
> > request);
> > close_msg_fds(&ctx);
> > - ret = RTE_VHOST_MSG_RESULT_ERR;
> > + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> > }
> >
> > /*
> > @@ -3142,17 +3143,16 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
> > */
> > if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
> > - ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
> > + ctx.msg.payload.u64 = msg_result ==
> RTE_VHOST_MSG_RESULT_ERR;
> > ctx.msg.size = sizeof(ctx.msg.payload.u64);
> > ctx.fd_num = 0;
> > send_vhost_reply(dev, fd, &ctx);
> > - } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
> > + } else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
> > VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message
> handling
> > failed.\n");
> > ret = -1;
> > goto unlock;
> > }
> >
> > - ret = 0;
> > for (i = 0; i < dev->nr_vring; i++) {
> > struct vhost_virtqueue *vq = dev->virtqueue[i];
> > bool cur_ready = vq_is_ready(dev, vq);
> > --
> > 1.8.3.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v2] vhost: use dedicated variable for vhost message result code
2022-07-18 2:06 [PATCH] vhost: use another variable to store vhost msg result code Andy Pei
2022-09-22 13:26 ` Xia, Chenbo
@ 2022-09-23 2:32 ` Andy Pei
2022-09-26 6:57 ` Xia, Chenbo
1 sibling, 1 reply; 7+ messages in thread
From: Andy Pei @ 2022-09-23 2:32 UTC (permalink / raw)
To: dev; +Cc: chenbo.xia, maxime.coquelin
Currently in function vhost_user_msg_handler, variable ret is used to
store both vhost msg result code and function call return value.
After this patch, variable ret is used only to store function call
return value, a new dedicated variable msg_result is used to
store vhost msg result. This can improve readability.
Signed-off-by: Andy Pei <andy.pei@intel.com>
---
lib/vhost/vhost_user.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 0182090..6d93495 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -2954,6 +2954,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
struct vhu_msg_context ctx;
vhost_message_handler_t *msg_handler;
struct rte_vdpa_device *vdpa_dev;
+ int msg_result = RTE_VHOST_MSG_RESULT_OK;
int ret;
int unlock_required = 0;
bool handled;
@@ -3046,8 +3047,8 @@ static int is_vring_iotlb(struct virtio_net *dev,
handled = false;
if (dev->extern_ops.pre_msg_handle) {
RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
- ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
- switch (ret) {
+ msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
+ switch (msg_result) {
case RTE_VHOST_MSG_RESULT_REPLY:
send_vhost_reply(dev, fd, &ctx);
/* Fall-through */
@@ -3065,12 +3066,12 @@ static int is_vring_iotlb(struct virtio_net *dev,
goto skip_to_post_handle;
if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0) {
- ret = RTE_VHOST_MSG_RESULT_ERR;
+ msg_result = RTE_VHOST_MSG_RESULT_ERR;
} else {
- ret = msg_handler->callback(&dev, &ctx, fd);
+ msg_result = msg_handler->callback(&dev, &ctx, fd);
}
- switch (ret) {
+ switch (msg_result) {
case RTE_VHOST_MSG_RESULT_ERR:
VHOST_LOG_CONFIG(dev->ifname, ERR,
"processing %s failed.\n",
@@ -3095,11 +3096,11 @@ static int is_vring_iotlb(struct virtio_net *dev,
}
skip_to_post_handle:
- if (ret != RTE_VHOST_MSG_RESULT_ERR &&
+ if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
dev->extern_ops.post_msg_handle) {
RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
- ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
- switch (ret) {
+ msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
+ switch (msg_result) {
case RTE_VHOST_MSG_RESULT_REPLY:
send_vhost_reply(dev, fd, &ctx);
/* Fall-through */
@@ -3118,7 +3119,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
"vhost message (req: %d) was not handled.\n",
request);
close_msg_fds(&ctx);
- ret = RTE_VHOST_MSG_RESULT_ERR;
+ msg_result = RTE_VHOST_MSG_RESULT_ERR;
}
/*
@@ -3127,17 +3128,16 @@ static int is_vring_iotlb(struct virtio_net *dev,
* VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
*/
if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
- ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
+ ctx.msg.payload.u64 = msg_result == RTE_VHOST_MSG_RESULT_ERR;
ctx.msg.size = sizeof(ctx.msg.payload.u64);
ctx.fd_num = 0;
send_vhost_reply(dev, fd, &ctx);
- } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
+ } else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message handling failed.\n");
ret = -1;
goto unlock;
}
- ret = 0;
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
bool cur_ready = vq_is_ready(dev, vq);
--
1.8.3.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [PATCH v2] vhost: use dedicated variable for vhost message result code
2022-09-23 2:32 ` [PATCH v2] vhost: use dedicated variable for vhost message " Andy Pei
@ 2022-09-26 6:57 ` Xia, Chenbo
2022-09-29 8:38 ` Xia, Chenbo
0 siblings, 1 reply; 7+ messages in thread
From: Xia, Chenbo @ 2022-09-26 6:57 UTC (permalink / raw)
To: Pei, Andy, dev; +Cc: maxime.coquelin
> -----Original Message-----
> From: Pei, Andy <andy.pei@intel.com>
> Sent: Friday, September 23, 2022 10:33 AM
> To: dev@dpdk.org
> Cc: Xia, Chenbo <chenbo.xia@intel.com>; maxime.coquelin@redhat.com
> Subject: [PATCH v2] vhost: use dedicated variable for vhost message result
> code
>
> Currently in function vhost_user_msg_handler, variable ret is used to
> store both vhost msg result code and function call return value.
> After this patch, variable ret is used only to store function call
> return value, a new dedicated variable msg_result is used to
> store vhost msg result. This can improve readability.
>
> Signed-off-by: Andy Pei <andy.pei@intel.com>
> ---
> lib/vhost/vhost_user.c | 24 ++++++++++++------------
> 1 file changed, 12 insertions(+), 12 deletions(-)
>
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 0182090..6d93495 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> @@ -2954,6 +2954,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> struct vhu_msg_context ctx;
> vhost_message_handler_t *msg_handler;
> struct rte_vdpa_device *vdpa_dev;
> + int msg_result = RTE_VHOST_MSG_RESULT_OK;
> int ret;
> int unlock_required = 0;
> bool handled;
> @@ -3046,8 +3047,8 @@ static int is_vring_iotlb(struct virtio_net *dev,
> handled = false;
> if (dev->extern_ops.pre_msg_handle) {
> RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
> - ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> - switch (ret) {
> + msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> + switch (msg_result) {
> case RTE_VHOST_MSG_RESULT_REPLY:
> send_vhost_reply(dev, fd, &ctx);
> /* Fall-through */
> @@ -3065,12 +3066,12 @@ static int is_vring_iotlb(struct virtio_net *dev,
> goto skip_to_post_handle;
>
> if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0)
> {
> - ret = RTE_VHOST_MSG_RESULT_ERR;
> + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> } else {
> - ret = msg_handler->callback(&dev, &ctx, fd);
> + msg_result = msg_handler->callback(&dev, &ctx, fd);
> }
>
> - switch (ret) {
> + switch (msg_result) {
> case RTE_VHOST_MSG_RESULT_ERR:
> VHOST_LOG_CONFIG(dev->ifname, ERR,
> "processing %s failed.\n",
> @@ -3095,11 +3096,11 @@ static int is_vring_iotlb(struct virtio_net *dev,
> }
>
> skip_to_post_handle:
> - if (ret != RTE_VHOST_MSG_RESULT_ERR &&
> + if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
> dev->extern_ops.post_msg_handle) {
> RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
> - ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
> - switch (ret) {
> + msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid,
> &ctx);
> + switch (msg_result) {
> case RTE_VHOST_MSG_RESULT_REPLY:
> send_vhost_reply(dev, fd, &ctx);
> /* Fall-through */
> @@ -3118,7 +3119,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> "vhost message (req: %d) was not handled.\n",
> request);
> close_msg_fds(&ctx);
> - ret = RTE_VHOST_MSG_RESULT_ERR;
> + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> }
>
> /*
> @@ -3127,17 +3128,16 @@ static int is_vring_iotlb(struct virtio_net *dev,
> * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
> */
> if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
> - ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
> + ctx.msg.payload.u64 = msg_result == RTE_VHOST_MSG_RESULT_ERR;
> ctx.msg.size = sizeof(ctx.msg.payload.u64);
> ctx.fd_num = 0;
> send_vhost_reply(dev, fd, &ctx);
> - } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
> + } else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
> VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message handling
> failed.\n");
> ret = -1;
> goto unlock;
> }
>
> - ret = 0;
> for (i = 0; i < dev->nr_vring; i++) {
> struct vhost_virtqueue *vq = dev->virtqueue[i];
> bool cur_ready = vq_is_ready(dev, vq);
> --
> 1.8.3.1
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [PATCH v2] vhost: use dedicated variable for vhost message result code
2022-09-26 6:57 ` Xia, Chenbo
@ 2022-09-29 8:38 ` Xia, Chenbo
2022-09-29 13:49 ` Pei, Andy
0 siblings, 1 reply; 7+ messages in thread
From: Xia, Chenbo @ 2022-09-29 8:38 UTC (permalink / raw)
To: Pei, Andy, dev; +Cc: maxime.coquelin
> -----Original Message-----
> From: Xia, Chenbo <chenbo.xia@intel.com>
> Sent: Monday, September 26, 2022 2:58 PM
> To: Pei, Andy <andy.pei@intel.com>; dev@dpdk.org
> Cc: maxime.coquelin@redhat.com
> Subject: RE: [PATCH v2] vhost: use dedicated variable for vhost message
> result code
>
> > -----Original Message-----
> > From: Pei, Andy <andy.pei@intel.com>
> > Sent: Friday, September 23, 2022 10:33 AM
> > To: dev@dpdk.org
> > Cc: Xia, Chenbo <chenbo.xia@intel.com>; maxime.coquelin@redhat.com
> > Subject: [PATCH v2] vhost: use dedicated variable for vhost message
> result
> > code
> >
> > Currently in function vhost_user_msg_handler, variable ret is used to
> > store both vhost msg result code and function call return value.
> > After this patch, variable ret is used only to store function call
> > return value, a new dedicated variable msg_result is used to
> > store vhost msg result. This can improve readability.
> >
> > Signed-off-by: Andy Pei <andy.pei@intel.com>
> > ---
> > lib/vhost/vhost_user.c | 24 ++++++++++++------------
> > 1 file changed, 12 insertions(+), 12 deletions(-)
> >
> > diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> > index 0182090..6d93495 100644
> > --- a/lib/vhost/vhost_user.c
> > +++ b/lib/vhost/vhost_user.c
> > @@ -2954,6 +2954,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > struct vhu_msg_context ctx;
> > vhost_message_handler_t *msg_handler;
> > struct rte_vdpa_device *vdpa_dev;
> > + int msg_result = RTE_VHOST_MSG_RESULT_OK;
> > int ret;
> > int unlock_required = 0;
> > bool handled;
> > @@ -3046,8 +3047,8 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > handled = false;
> > if (dev->extern_ops.pre_msg_handle) {
> > RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
> > - ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> > - switch (ret) {
> > + msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> > + switch (msg_result) {
> > case RTE_VHOST_MSG_RESULT_REPLY:
> > send_vhost_reply(dev, fd, &ctx);
> > /* Fall-through */
> > @@ -3065,12 +3066,12 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > goto skip_to_post_handle;
> >
> > if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) != 0)
> > {
> > - ret = RTE_VHOST_MSG_RESULT_ERR;
> > + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> > } else {
> > - ret = msg_handler->callback(&dev, &ctx, fd);
> > + msg_result = msg_handler->callback(&dev, &ctx, fd);
> > }
> >
> > - switch (ret) {
> > + switch (msg_result) {
> > case RTE_VHOST_MSG_RESULT_ERR:
> > VHOST_LOG_CONFIG(dev->ifname, ERR,
> > "processing %s failed.\n",
> > @@ -3095,11 +3096,11 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > }
> >
> > skip_to_post_handle:
> > - if (ret != RTE_VHOST_MSG_RESULT_ERR &&
> > + if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
> > dev->extern_ops.post_msg_handle) {
> > RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0);
> > - ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
> > - switch (ret) {
> > + msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid,
> > &ctx);
> > + switch (msg_result) {
> > case RTE_VHOST_MSG_RESULT_REPLY:
> > send_vhost_reply(dev, fd, &ctx);
> > /* Fall-through */
> > @@ -3118,7 +3119,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > "vhost message (req: %d) was not handled.\n",
> > request);
> > close_msg_fds(&ctx);
> > - ret = RTE_VHOST_MSG_RESULT_ERR;
> > + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> > }
> >
> > /*
> > @@ -3127,17 +3128,16 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
> > */
> > if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
> > - ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
> > + ctx.msg.payload.u64 = msg_result == RTE_VHOST_MSG_RESULT_ERR;
> > ctx.msg.size = sizeof(ctx.msg.payload.u64);
> > ctx.fd_num = 0;
> > send_vhost_reply(dev, fd, &ctx);
> > - } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
> > + } else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
> > VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message handling
> > failed.\n");
> > ret = -1;
> > goto unlock;
> > }
> >
> > - ret = 0;
> > for (i = 0; i < dev->nr_vring; i++) {
> > struct vhost_virtqueue *vq = dev->virtqueue[i];
> > bool cur_ready = vq_is_ready(dev, vq);
> > --
> > 1.8.3.1
>
> Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
Applied to next-virtio/main, thanks
^ permalink raw reply [flat|nested] 7+ messages in thread
* RE: [PATCH v2] vhost: use dedicated variable for vhost message result code
2022-09-29 8:38 ` Xia, Chenbo
@ 2022-09-29 13:49 ` Pei, Andy
0 siblings, 0 replies; 7+ messages in thread
From: Pei, Andy @ 2022-09-29 13:49 UTC (permalink / raw)
To: Xia, Chenbo, dev; +Cc: maxime.coquelin
Hi Chenbo,
Thanks for your efforts.
> -----Original Message-----
> From: Xia, Chenbo <chenbo.xia@intel.com>
> Sent: Thursday, September 29, 2022 4:38 PM
> To: Pei, Andy <andy.pei@intel.com>; dev@dpdk.org
> Cc: maxime.coquelin@redhat.com
> Subject: RE: [PATCH v2] vhost: use dedicated variable for vhost message
> result code
>
> > -----Original Message-----
> > From: Xia, Chenbo <chenbo.xia@intel.com>
> > Sent: Monday, September 26, 2022 2:58 PM
> > To: Pei, Andy <andy.pei@intel.com>; dev@dpdk.org
> > Cc: maxime.coquelin@redhat.com
> > Subject: RE: [PATCH v2] vhost: use dedicated variable for vhost
> > message result code
> >
> > > -----Original Message-----
> > > From: Pei, Andy <andy.pei@intel.com>
> > > Sent: Friday, September 23, 2022 10:33 AM
> > > To: dev@dpdk.org
> > > Cc: Xia, Chenbo <chenbo.xia@intel.com>; maxime.coquelin@redhat.com
> > > Subject: [PATCH v2] vhost: use dedicated variable for vhost message
> > result
> > > code
> > >
> > > Currently in function vhost_user_msg_handler, variable ret is used
> > > to store both vhost msg result code and function call return value.
> > > After this patch, variable ret is used only to store function call
> > > return value, a new dedicated variable msg_result is used to store
> > > vhost msg result. This can improve readability.
> > >
> > > Signed-off-by: Andy Pei <andy.pei@intel.com>
> > > ---
> > > lib/vhost/vhost_user.c | 24 ++++++++++++------------
> > > 1 file changed, 12 insertions(+), 12 deletions(-)
> > >
> > > diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index
> > > 0182090..6d93495 100644
> > > --- a/lib/vhost/vhost_user.c
> > > +++ b/lib/vhost/vhost_user.c
> > > @@ -2954,6 +2954,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > > struct vhu_msg_context ctx;
> > > vhost_message_handler_t *msg_handler;
> > > struct rte_vdpa_device *vdpa_dev;
> > > + int msg_result = RTE_VHOST_MSG_RESULT_OK;
> > > int ret;
> > > int unlock_required = 0;
> > > bool handled;
> > > @@ -3046,8 +3047,8 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > > handled = false;
> > > if (dev->extern_ops.pre_msg_handle) {
> > > RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context,
> msg) != 0);
> > > - ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx);
> > > - switch (ret) {
> > > + msg_result = (*dev->extern_ops.pre_msg_handle)(dev->vid,
> &ctx);
> > > + switch (msg_result) {
> > > case RTE_VHOST_MSG_RESULT_REPLY:
> > > send_vhost_reply(dev, fd, &ctx);
> > > /* Fall-through */
> > > @@ -3065,12 +3066,12 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > > goto skip_to_post_handle;
> > >
> > > if (!msg_handler->accepts_fd && validate_msg_fds(dev, &ctx, 0) !=
> > > 0) {
> > > - ret = RTE_VHOST_MSG_RESULT_ERR;
> > > + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> > > } else {
> > > - ret = msg_handler->callback(&dev, &ctx, fd);
> > > + msg_result = msg_handler->callback(&dev, &ctx, fd);
> > > }
> > >
> > > - switch (ret) {
> > > + switch (msg_result) {
> > > case RTE_VHOST_MSG_RESULT_ERR:
> > > VHOST_LOG_CONFIG(dev->ifname, ERR,
> > > "processing %s failed.\n",
> > > @@ -3095,11 +3096,11 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > > }
> > >
> > > skip_to_post_handle:
> > > - if (ret != RTE_VHOST_MSG_RESULT_ERR &&
> > > + if (msg_result != RTE_VHOST_MSG_RESULT_ERR &&
> > > dev->extern_ops.post_msg_handle) {
> > > RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context,
> msg) != 0);
> > > - ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx);
> > > - switch (ret) {
> > > + msg_result = (*dev->extern_ops.post_msg_handle)(dev->vid,
> > > &ctx);
> > > + switch (msg_result) {
> > > case RTE_VHOST_MSG_RESULT_REPLY:
> > > send_vhost_reply(dev, fd, &ctx);
> > > /* Fall-through */
> > > @@ -3118,7 +3119,7 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > > "vhost message (req: %d) was not handled.\n",
> > > request);
> > > close_msg_fds(&ctx);
> > > - ret = RTE_VHOST_MSG_RESULT_ERR;
> > > + msg_result = RTE_VHOST_MSG_RESULT_ERR;
> > > }
> > >
> > > /*
> > > @@ -3127,17 +3128,16 @@ static int is_vring_iotlb(struct virtio_net *dev,
> > > * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
> > > */
> > > if (ctx.msg.flags & VHOST_USER_NEED_REPLY) {
> > > - ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
> > > + ctx.msg.payload.u64 = msg_result ==
> RTE_VHOST_MSG_RESULT_ERR;
> > > ctx.msg.size = sizeof(ctx.msg.payload.u64);
> > > ctx.fd_num = 0;
> > > send_vhost_reply(dev, fd, &ctx);
> > > - } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
> > > + } else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
> > > VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message
> handling
> > > failed.\n");
> > > ret = -1;
> > > goto unlock;
> > > }
> > >
> > > - ret = 0;
> > > for (i = 0; i < dev->nr_vring; i++) {
> > > struct vhost_virtqueue *vq = dev->virtqueue[i];
> > > bool cur_ready = vq_is_ready(dev, vq);
> > > --
> > > 1.8.3.1
> >
> > Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
>
> Applied to next-virtio/main, thanks
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2022-09-29 13:49 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-07-18 2:06 [PATCH] vhost: use another variable to store vhost msg result code Andy Pei
2022-09-22 13:26 ` Xia, Chenbo
2022-09-23 2:29 ` Pei, Andy
2022-09-23 2:32 ` [PATCH v2] vhost: use dedicated variable for vhost message " Andy Pei
2022-09-26 6:57 ` Xia, Chenbo
2022-09-29 8:38 ` Xia, Chenbo
2022-09-29 13:49 ` Pei, Andy
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).