DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Zhihong Wang <zhihong.wang@intel.com>, dev@dpdk.org
Cc: jianfeng.tan@intel.com, tiwei.bie@intel.com,
	yliu@fridaylinux.org, cunming.liang@intel.com,
	xiao.w.wang@intel.com, dan.daly@intel.com
Subject: Re: [dpdk-dev] [PATCH v4 4/5] vhost: adapt vhost lib for selective datapath
Date: Sat, 31 Mar 2018 09:35:36 +0200	[thread overview]
Message-ID: <ea4dff26-d739-60b5-528e-4079ae04b3c6@redhat.com> (raw)
In-Reply-To: <20180310100148.12030-5-zhihong.wang@intel.com>



On 03/10/2018 11:01 AM, Zhihong Wang wrote:
> This patch adapts vhost lib for selective datapath by calling device ops
> at the corresponding stage.
> 
> Signed-off-by: Zhihong Wang <zhihong.wang@intel.com>
> ---
> Changes in v4:
> 
>   1. Remove the "engine" concept in the lib.
> 
> ---
> Changes in v2:
> 
>   1. Ensure negotiated capabilities are supported in vhost-user lib.
> 
>   2. Configure the data path at the right time.
> 
>   lib/librte_vhost/rte_vhost.h           | 27 ++++++++++
>   lib/librte_vhost/rte_vhost_version.map |  2 +
>   lib/librte_vhost/socket.c              | 94 ++++++++++++++++++++++++++++++++--
>   lib/librte_vhost/vhost.c               |  3 ++
>   lib/librte_vhost/vhost.h               |  2 +
>   lib/librte_vhost/vhost_user.c          | 54 +++++++++++++++++--
>   6 files changed, 172 insertions(+), 10 deletions(-)
> 
> diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
> index d50f4c67d..3c3334d3e 100644
> --- a/lib/librte_vhost/rte_vhost.h
> +++ b/lib/librte_vhost/rte_vhost.h
> @@ -279,6 +279,33 @@ int rte_vhost_driver_disable_features(const char *path, uint64_t features);
>   int rte_vhost_driver_get_features(const char *path, uint64_t *features);
>   
>   /**
> + * Get the protocol feature bits before feature negotiation.
> + *
> + * @param path
> + *  The vhost-user socket file path
> + * @param protocol_features
> + *  A pointer to store the queried protocol feature bits
> + * @return
> + *  0 on success, -1 on failure
> + */
> +int __rte_experimental
> +rte_vhost_driver_get_protocol_features(const char *path,
> +		uint64_t *protocol_features);
> +
> +/**
> + * Get the queue number bits before feature negotiation.
> + *
> + * @param path
> + *  The vhost-user socket file path
> + * @param queue_num
> + *  A pointer to store the queried queue number bits
> + * @return
> + *  0 on success, -1 on failure
> + */
> +int __rte_experimental
> +rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num);
> +
> +/**
>    * Get the feature bits after negotiation
>    *
>    * @param vid
> diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
> index 6e2d5364a..812ccd72b 100644
> --- a/lib/librte_vhost/rte_vhost_version.map
> +++ b/lib/librte_vhost/rte_vhost_version.map
> @@ -67,4 +67,6 @@ EXPERIMENTAL {
>   	rte_vhost_driver_set_vdpa_did;
>   	rte_vhost_driver_get_vdpa_did;
>   	rte_vhost_get_vdpa_did;
> +	rte_vhost_driver_get_protocol_features;
> +	rte_vhost_driver_get_queue_num;
>   } DPDK_18.02;
> diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
> index 3d58da94e..ba7b422a0 100644
> --- a/lib/librte_vhost/socket.c
> +++ b/lib/librte_vhost/socket.c
> @@ -216,6 +216,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
>   
>   	vhost_set_builtin_virtio_net(vid, vsocket->use_builtin_virtio_net);
>   
> +	vhost_set_vdpa_did(vid, vsocket->did);
> +
>   	if (vsocket->dequeue_zero_copy)
>   		vhost_enable_dequeue_zero_copy(vid);
>   
> @@ -648,20 +650,102 @@ int
>   rte_vhost_driver_get_features(const char *path, uint64_t *features)
>   {
>   	struct vhost_user_socket *vsocket;
> +	uint64_t vdpa_features;
> +	int did = -1;
> +	int ret = 0;
>   
>   	pthread_mutex_lock(&vhost_user.mutex);
>   	vsocket = find_vhost_user_socket(path);
> -	if (vsocket)
> -		*features = vsocket->features;
> +	if (vsocket) {
> +		did = vsocket->did;
> +		if (did < 0 || vdpa_devices[did]->ops->feature_get == NULL)
> +			*features = vsocket->features;
> +		else if (vdpa_devices[did]->ops->feature_get(did,
> +					&vdpa_features) < 0) {
> +			RTE_LOG(ERR, VHOST_CONFIG,
> +					"failed to get vdpa features "
> +					"for socket file %s.\n", path);
> +			ret = -1;
> +		} else
> +			*features = vsocket->features & vdpa_features;

It seems correct but it is not very intuitive.
Also, you have to put braces everywhere if one of the if/else if/else
have some.

What about something like this:

rte_vhost_driver_get_features(const char *path, uint64_t *features)
{
	struct vhost_user_socket *vsocket;
	uint64_t vdpa_features;
	int did = -1;
	int ret = 0;
	struct rte_vdpa_device *vdpa_dev;

	pthread_mutex_lock(&vhost_user.mutex);
	vsocket = find_vhost_user_socket(path);
	if (!vsocket) {
		RTE_LOG(ERR, VHOST_CONFIG,
			"socket file %s is not registered yet.\n"
			, path);
		ret = -1;
		goto out_unlock;
	}

	did = vsocket->did;
	vdpa_dev = rte_vdpa_device_get(did);
	if (!vdpa_dev || !vdpa->ops->feature_get) {
		*features = vsocket->features;
		goto out_unlock;
	}

	if (vdpa_dev->ops->feature_get(did, &vdpa_features) < 0) {
		RTE_LOG(ERR, VHOST_CONFIG,
				"failed to get vdpa features "
				"for socket file %s.\n", path);
		ret = -1;
		goto out_unlock;
	}

	*features = vsocket->features & vdpa_features;

out_unlock:
	pthread_mutex_unlock(&vhost_user.mutex);
	return ret;
}

with in rte_vdpa.h:

static inline struct rte_vdpa_device *
rte_vdpa_device_get(int did)
{
	if (did < 0 || did >= MAX_VHOST_DEVICE)
		return NULL;

	return vdpa_devices[did];
}


Doing this you have 3 checks in 1, so you avoid NULL pointer
de-referencing if did > 0 but invalid.

Same logic applies to functions below:

> +	} else {
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"socket file %s is not registered yet.\n", path);
> +		ret = -1;
> +	}
>   	pthread_mutex_unlock(&vhost_user.mutex);
>   
> -	if (!vsocket) {
> +	return ret;
> +}
> +
> +int
> +rte_vhost_driver_get_protocol_features(const char *path,
> +		uint64_t *protocol_features)
> +{
> +	struct vhost_user_socket *vsocket;
> +	uint64_t vdpa_protocol_features;
> +	int did = -1;
> +	int ret = 0;
> +
> +	pthread_mutex_lock(&vhost_user.mutex);
> +	vsocket = find_vhost_user_socket(path);
> +	if (vsocket) {
> +		did = vsocket->did;
> +		if (did < 0 || vdpa_devices[did]->ops->protocol_feature_get
> +				== NULL)
> +			*protocol_features = VHOST_USER_PROTOCOL_FEATURES;
> +		else if (vdpa_devices[did]->ops->protocol_feature_get(did,
> +					&vdpa_protocol_features) < 0) {
> +			RTE_LOG(ERR, VHOST_CONFIG,
> +					"failed to get vdpa protocol features "
> +					"for socket file %s.\n", path);
> +			ret = -1;
> +		} else
> +			*protocol_features = VHOST_USER_PROTOCOL_FEATURES
> +				& vdpa_protocol_features;
> +	} else {
>   		RTE_LOG(ERR, VHOST_CONFIG,
>   			"socket file %s is not registered yet.\n", path);
> -		return -1;
> +		ret = -1;
> +	}
> +	pthread_mutex_unlock(&vhost_user.mutex);
> +
> +	return ret;
> +}
> +
> +int
> +rte_vhost_driver_get_queue_num(const char *path,
> +		uint32_t *queue_num)
> +{
> +	struct vhost_user_socket *vsocket;
> +	uint32_t vdpa_queue_num;
> +	int did = -1;
> +	int ret = 0;
> +
> +	pthread_mutex_lock(&vhost_user.mutex);
> +	vsocket = find_vhost_user_socket(path);
> +	if (vsocket) {
> +		did = vsocket->did;
> +		if (did < 0 || vdpa_devices[did]->ops->queue_num_get == NULL)
> +			*queue_num = VHOST_MAX_QUEUE_PAIRS;
> +		else if (vdpa_devices[did]->ops->queue_num_get(
> +					did, &vdpa_queue_num) < 0) {
> +			RTE_LOG(ERR, VHOST_CONFIG,
> +					"failed to get vdpa queue number "
> +					"for socket file %s.\n", path);
> +			ret = -1;
> +		} else if (vdpa_queue_num > VHOST_MAX_QUEUE_PAIRS)
> +			*queue_num = VHOST_MAX_QUEUE_PAIRS;
> +		else
> +			*queue_num = vdpa_queue_num;
>   	} else {
> -		return 0;
> +		RTE_LOG(ERR, VHOST_CONFIG,
> +			"socket file %s is not registered yet.\n", path);
> +		ret = -1;
>   	}
> +	pthread_mutex_unlock(&vhost_user.mutex);
> +
> +	return ret;
>   }
>   
>   /*
> diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
> index 1740cc1ab..66b6b492f 100644
> --- a/lib/librte_vhost/vhost.c
> +++ b/lib/librte_vhost/vhost.c
> @@ -296,11 +296,14 @@ void
>   vhost_destroy_device(int vid)
>   {
>   	struct virtio_net *dev = get_device(vid);
> +	int did = dev->did;
>   
>   	if (dev == NULL)
>   		return;
>   
>   	if (dev->flags & VIRTIO_DEV_RUNNING) {
> +		if (did >= 0 && vdpa_devices[did]->ops->dev_close)
> +			vdpa_devices[did]->ops->dev_close(dev->vid);

Ditto.

>   		dev->flags &= ~VIRTIO_DEV_RUNNING;
>   		dev->notify_ops->destroy_device(vid);
>   	}
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index 2970c1eab..30e8a0cfe 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -27,6 +27,8 @@
>   #define VIRTIO_DEV_READY 2
>   /* Used to indicate that the built-in vhost net device backend is enabled */
>   #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
> +/* Used to indicate that the device has its own data path and configured */
> +#define VIRTIO_DEV_VDPA_CONFIGURED 8
>   
>   /* Backend value set by guest. */
>   #define VIRTIO_DEV_STOPPED -1
> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
> index 5c5361066..5049c8e55 100644
> --- a/lib/librte_vhost/vhost_user.c
> +++ b/lib/librte_vhost/vhost_user.c
> @@ -133,7 +133,11 @@ vhost_user_set_owner(void)
>   static int
>   vhost_user_reset_owner(struct virtio_net *dev)
>   {
> +	int did = dev->did;
> +
>   	if (dev->flags & VIRTIO_DEV_RUNNING) {
> +		if (did >= 0 && vdpa_devices[did]->ops->dev_close)
> +			vdpa_devices[did]->ops->dev_close(dev->vid);

Ditto.

>   		dev->flags &= ~VIRTIO_DEV_RUNNING;
>   		dev->notify_ops->destroy_device(dev->vid);
>   	}
> @@ -156,12 +160,25 @@ vhost_user_get_features(struct virtio_net *dev)
>   }
>   
>   /*
> + * The queue number that we support are requested.
> + */
> +static uint32_t
> +vhost_user_get_queue_num(struct virtio_net *dev)
> +{
> +	uint32_t queue_num = 0;
> +
> +	rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
> +	return (uint64_t)queue_num;
> +}
> +
> +/*
>    * We receive the negotiated features supported by us and the virtio device.
>    */
>   static int
>   vhost_user_set_features(struct virtio_net *dev, uint64_t features)
>   {
>   	uint64_t vhost_features = 0;
> +	int did = dev->did;
>   
>   	rte_vhost_driver_get_features(dev->ifname, &vhost_features);
>   	if (features & ~vhost_features) {
> @@ -191,6 +208,9 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
>   			dev->notify_ops->features_changed(dev->vid, features);
>   	}
>   
> +	if (did >= 0 && vdpa_devices[did]->ops->feature_set)
> +		vdpa_devices[did]->ops->feature_set(dev->vid);
> +
>   	dev->features = features;
>   	if (dev->features &
>   		((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
> @@ -933,14 +953,18 @@ vhost_user_get_vring_base(struct virtio_net *dev,
>   			  VhostUserMsg *msg)
>   {
>   	struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
> +	int did = dev->did;
>   
>   	/* We have to stop the queue (virtio) if it is running. */
>   	if (dev->flags & VIRTIO_DEV_RUNNING) {
> +		if (did >= 0 && vdpa_devices[did]->ops->dev_close)
> +			vdpa_devices[did]->ops->dev_close(dev->vid);
>   		dev->flags &= ~VIRTIO_DEV_RUNNING;
>   		dev->notify_ops->destroy_device(dev->vid);
>   	}
>   
>   	dev->flags &= ~VIRTIO_DEV_READY;
> +	dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
>   
>   	/* Here we are safe to get the last avail index */
>   	msg->payload.state.num = vq->last_avail_idx;
> @@ -983,16 +1007,22 @@ vhost_user_set_vring_enable(struct virtio_net *dev,
>   			    VhostUserMsg *msg)
>   {
>   	int enable = (int)msg->payload.state.num;
> +	int index = (int)msg->payload.state.index;
> +	int did = dev->did;
>   
>   	RTE_LOG(INFO, VHOST_CONFIG,
>   		"set queue enable: %d to qp idx: %d\n",
> -		enable, msg->payload.state.index);
> +		enable, index);
> +
> +	if (did >= 0 && vdpa_devices[did]->ops->vring_state_set)
> +		vdpa_devices[did]->ops->vring_state_set(dev->vid,
> +				index, enable);
>   
>   	if (dev->notify_ops->vring_state_changed)
>   		dev->notify_ops->vring_state_changed(dev->vid,
> -				msg->payload.state.index, enable);
> +				index, enable);
>   
> -	dev->virtqueue[msg->payload.state.index]->enabled = enable;
> +	dev->virtqueue[index]->enabled = enable;
>   
>   	return 0;
>   }
> @@ -1001,9 +1031,10 @@ static void
>   vhost_user_get_protocol_features(struct virtio_net *dev,
>   				 struct VhostUserMsg *msg)
>   {
> -	uint64_t features, protocol_features = VHOST_USER_PROTOCOL_FEATURES;
> +	uint64_t features, protocol_features;
>   
>   	rte_vhost_driver_get_features(dev->ifname, &features);
> +	rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
>   
>   	/*
>   	 * REPLY_ACK protocol feature is only mandatory for now
> @@ -1099,6 +1130,7 @@ static int
>   vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
>   {
>   	uint8_t *mac = (uint8_t *)&msg->payload.u64;
> +	int did = dev->did;
>   
>   	RTE_LOG(DEBUG, VHOST_CONFIG,
>   		":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
> @@ -1114,6 +1146,8 @@ vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
>   	 */
>   	rte_smp_wmb();
>   	rte_atomic16_set(&dev->broadcast_rarp, 1);
> +	if (did >= 0 && vdpa_devices[did]->ops->migration_done)
> +		vdpa_devices[did]->ops->migration_done(dev->vid);
>   
>   	return 0;
>   }
> @@ -1375,6 +1409,7 @@ vhost_user_msg_handler(int vid, int fd)
>   {
>   	struct virtio_net *dev;
>   	struct VhostUserMsg msg;
> +	int did;
>   	int ret;
>   	int unlock_required = 0;
>   
> @@ -1527,7 +1562,7 @@ vhost_user_msg_handler(int vid, int fd)
>   		break;
>   
>   	case VHOST_USER_GET_QUEUE_NUM:
> -		msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
> +		msg.payload.u64 = (uint64_t)vhost_user_get_queue_num(dev);
>   		msg.size = sizeof(msg.payload.u64);
>   		send_vhost_reply(fd, &msg);
>   		break;
> @@ -1580,6 +1615,15 @@ vhost_user_msg_handler(int vid, int fd)
>   		}
>   	}
>   
> +	did = dev->did;
> +	if (did >= 0 && virtio_is_ready(dev) &&
> +			!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
> +			msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
> +		if (vdpa_devices[did]->ops->dev_conf)
> +			vdpa_devices[did]->ops->dev_conf(vid);
> +		dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
> +	}
> +
>   	return 0;
>   }
>   
> 

Other than the code refactoring I propose, the patch looks good to me.

Thanks!
Maxime

  reply	other threads:[~2018-03-31  7:35 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-02 23:28 [dpdk-dev] [PATCH 0/7] vhost: support " Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 1/7] vhost: make capabilities configurable Zhihong Wang
2018-02-06 10:19   ` Maxime Coquelin
2018-02-08  3:03     ` Wang, Zhihong
2018-02-02 23:28 ` [dpdk-dev] [PATCH 2/7] vhost: export vhost feature definitions Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 3/7] vhost: support selective datapath Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 4/7] vhost: add apis for datapath configuration Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 5/7] vhost: adapt vhost lib for selective datapath Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 6/7] vhost: get callfd before device setup Zhihong Wang
2018-02-02 23:28 ` [dpdk-dev] [PATCH 7/7] vhost: export new apis Zhihong Wang
2018-03-05  9:20 ` [dpdk-dev] [PATCH v2 0/6] vhost: support selective datapath Zhihong Wang
2018-03-05  9:20   ` [dpdk-dev] [PATCH v2 1/6] vhost: export vhost feature definitions Zhihong Wang
2018-03-06  9:37     ` Tan, Jianfeng
2018-03-06 14:03       ` Maxime Coquelin
2018-03-15 10:58         ` Wang, Zhihong
2018-03-05  9:20   ` [dpdk-dev] [PATCH v2 2/6] vhost: support selective datapath Zhihong Wang
2018-03-05  9:20   ` [dpdk-dev] [PATCH v2 3/6] vhost: add apis for datapath configuration Zhihong Wang
2018-03-05  9:20   ` [dpdk-dev] [PATCH v2 4/6] vhost: adapt vhost lib for selective datapath Zhihong Wang
2018-03-05  9:20   ` [dpdk-dev] [PATCH v2 5/6] vhost: add apis for live migration Zhihong Wang
2018-03-05  9:20   ` [dpdk-dev] [PATCH v2 6/6] vhost: export new apis Zhihong Wang
2018-03-06  9:51     ` Tan, Jianfeng
2018-03-15 10:55       ` Wang, Zhihong
2018-03-19 10:12 ` [dpdk-dev] [PATCH v3 0/5] vhost: support selective datapath Zhihong Wang
2018-03-19 10:12   ` [dpdk-dev] [PATCH v3 1/5] vhost: export vhost feature definitions Zhihong Wang
2018-03-19 10:12   ` [dpdk-dev] [PATCH v3 2/5] vhost: support selective datapath Zhihong Wang
2018-03-21 21:05     ` Maxime Coquelin
2018-03-22  7:55       ` Wang, Zhihong
2018-03-22  8:31         ` Maxime Coquelin
2018-03-19 10:12   ` [dpdk-dev] [PATCH v3 3/5] vhost: add apis for datapath configuration Zhihong Wang
2018-03-21 21:08     ` Maxime Coquelin
2018-03-22  8:22       ` Wang, Zhihong
2018-03-22 14:18         ` Maxime Coquelin
2018-03-23 10:35           ` Wang, Zhihong
2018-03-19 10:12   ` [dpdk-dev] [PATCH v3 4/5] vhost: adapt vhost lib for selective datapath Zhihong Wang
2018-03-19 10:13   ` [dpdk-dev] [PATCH v3 5/5] vhost: add apis for live migration Zhihong Wang
2018-03-29 12:15   ` [dpdk-dev] [PATCH v3 0/5] vhost: support selective datapath Wodkowski, PawelX
2018-03-30  9:35     ` Wang, Zhihong
2018-03-30 10:00 ` [dpdk-dev] [PATCH v4 " Zhihong Wang
2018-03-30 10:00   ` [dpdk-dev] [PATCH v4 1/5] vhost: export vhost feature definitions Zhihong Wang
2018-03-31  5:56     ` Maxime Coquelin
2018-03-30 10:01   ` [dpdk-dev] [PATCH v4 2/5] vhost: support selective datapath Zhihong Wang
2018-03-31  6:10     ` Maxime Coquelin
2018-04-02  1:58       ` Wang, Zhihong
2018-03-31  7:38     ` Maxime Coquelin
2018-04-02  2:03       ` Wang, Zhihong
2018-03-30 10:01   ` [dpdk-dev] [PATCH v4 3/5] vhost: add apis for datapath configuration Zhihong Wang
2018-03-31  7:04     ` Maxime Coquelin
2018-04-02  2:01       ` Wang, Zhihong
2018-03-30 10:01   ` [dpdk-dev] [PATCH v4 4/5] vhost: adapt vhost lib for selective datapath Zhihong Wang
2018-03-31  7:35     ` Maxime Coquelin [this message]
2018-04-02 11:52       ` Wang, Zhihong
2018-03-30 10:01   ` [dpdk-dev] [PATCH v4 5/5] vhost: add apis for live migration Zhihong Wang
2018-03-31  7:39     ` Maxime Coquelin
2018-04-02 11:46 ` [dpdk-dev] [PATCH v5 0/5] vhost: support selective datapath Zhihong Wang
2018-04-02 11:46   ` [dpdk-dev] [PATCH v5 1/5] vhost: export vhost feature definitions Zhihong Wang
2018-04-02 11:46   ` [dpdk-dev] [PATCH v5 2/5] vhost: support selective datapath Zhihong Wang
2018-04-03  8:02     ` Maxime Coquelin
2018-04-15 17:39       ` Thomas Monjalon
2018-04-16  7:26         ` Maxime Coquelin
2018-04-03  8:19     ` Maxime Coquelin
2018-04-03 14:35       ` Wang, Zhihong
2018-04-02 11:46   ` [dpdk-dev] [PATCH v5 3/5] vhost: add apis for datapath configuration Zhihong Wang
2018-04-03  8:07     ` Maxime Coquelin
2018-04-02 11:46   ` [dpdk-dev] [PATCH v5 4/5] vhost: adapt vhost lib for selective datapath Zhihong Wang
2018-04-03  8:05     ` Maxime Coquelin
2018-04-02 11:46   ` [dpdk-dev] [PATCH v5 5/5] vhost: add apis for live migration Zhihong Wang
2018-04-03  8:27   ` [dpdk-dev] [PATCH v5 0/5] vhost: support selective datapath Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ea4dff26-d739-60b5-528e-4079ae04b3c6@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=cunming.liang@intel.com \
    --cc=dan.daly@intel.com \
    --cc=dev@dpdk.org \
    --cc=jianfeng.tan@intel.com \
    --cc=tiwei.bie@intel.com \
    --cc=xiao.w.wang@intel.com \
    --cc=yliu@fridaylinux.org \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).