From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3669EA04F1; Thu, 18 Jun 2020 18:28:34 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C09B21BFB9; Thu, 18 Jun 2020 18:28:18 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id C05791BFA9 for ; Thu, 18 Jun 2020 18:28:13 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from matan@mellanox.com) with SMTP; 18 Jun 2020 19:28:10 +0300 Received: from pegasus25.mtr.labs.mlnx. (pegasus25.mtr.labs.mlnx [10.210.16.10]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 05IGS9EQ008363; Thu, 18 Jun 2020 19:28:10 +0300 From: Matan Azrad To: Maxime Coquelin , Xiao Wang Cc: dev@dpdk.org Date: Thu, 18 Jun 2020 16:28:05 +0000 Message-Id: <1592497686-433697-4-git-send-email-matan@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1592497686-433697-1-git-send-email-matan@mellanox.com> References: <1592497686-433697-1-git-send-email-matan@mellanox.com> Subject: [dpdk-dev] [PATCH v1 3/4] vhost: improve device ready definition X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Some guest drivers may not configure disabled virtio queues. In this case, the vhost management never triggers the vDPA device configuration because it waits to the device to be ready. The current ready state means that all the virtio queues should be configured regardless the enablement status. In order to support this case, this patch changes the ready state: The device is ready when at least 1 queue pair is configured and enabled. So, now, the vDPA driver will be configured when the first queue pair is configured and enabled. Also the queue state operation is change to the next rules: 1. queue becomes ready (enabled and fully configured) - set_vring_state(enabled). 2. queue becomes not ready - set_vring_state(disabled). 3. queue stay ready and VHOST_USER_SET_VRING_ENABLE massage was handled - set_vring_state(enabled). The parallel operations for the application are adjusted too. Signed-off-by: Matan Azrad --- lib/librte_vhost/vhost_user.c | 51 ++++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index b0849b9..cfd5f27 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -1295,7 +1295,7 @@ { bool rings_ok; - if (!vq) + if (!vq || !vq->enabled) return false; if (vq_is_packed(dev)) @@ -1309,24 +1309,27 @@ vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD; } +#define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u + static int virtio_is_ready(struct virtio_net *dev) { struct vhost_virtqueue *vq; uint32_t i; - if (dev->nr_vring == 0) + if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY) return 0; - for (i = 0; i < dev->nr_vring; i++) { + for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) { vq = dev->virtqueue[i]; if (!vq_is_ready(dev, vq)) return 0; } - VHOST_LOG_CONFIG(INFO, - "virtio is now ready for processing.\n"); + if (!(dev->flags & VIRTIO_DEV_READY)) + VHOST_LOG_CONFIG(INFO, + "virtio is now ready for processing.\n"); return 1; } @@ -1970,8 +1973,6 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused, struct virtio_net *dev = *pdev; int enable = (int)msg->payload.state.num; int index = (int)msg->payload.state.index; - struct rte_vdpa_device *vdpa_dev; - int did = -1; if (validate_msg_fds(msg, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; @@ -1980,15 +1981,6 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused, "set queue enable: %d to qp idx: %d\n", enable, index); - did = dev->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); - if (vdpa_dev && vdpa_dev->ops->set_vring_state) - vdpa_dev->ops->set_vring_state(dev->vid, index, enable); - - if (dev->notify_ops->vring_state_changed) - dev->notify_ops->vring_state_changed(dev->vid, - index, enable); - /* On disable, rings have to be stopped being processed. */ if (!enable && dev->dequeue_zero_copy) drain_zmbuf_list(dev->virtqueue[index]); @@ -2622,11 +2614,13 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev, struct virtio_net *dev; struct VhostUserMsg msg; struct rte_vdpa_device *vdpa_dev; + bool ready[VHOST_MAX_VRING]; int did = -1; int ret; int unlock_required = 0; bool handled; int request; + uint32_t i; dev = get_device(vid); if (dev == NULL) @@ -2668,6 +2662,10 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev, VHOST_LOG_CONFIG(DEBUG, "External request %d\n", request); } + /* Save ready status for all the VQs before message handle. */ + for (i = 0; i < VHOST_MAX_VRING; i++) + ready[i] = vq_is_ready(dev, dev->virtqueue[i]); + ret = vhost_user_check_and_alloc_queue_pair(dev, &msg); if (ret < 0) { VHOST_LOG_CONFIG(ERR, @@ -2802,6 +2800,25 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev, return -1; } + did = dev->vdpa_dev_id; + vdpa_dev = rte_vdpa_get_device(did); + /* Update ready status. */ + for (i = 0; i < VHOST_MAX_VRING; i++) { + bool cur_ready = vq_is_ready(dev, dev->virtqueue[i]); + + if ((cur_ready && request == VHOST_USER_SET_VRING_ENABLE && + i == msg.payload.state.index) || + cur_ready != ready[i]) { + if (vdpa_dev && vdpa_dev->ops->set_vring_state) + vdpa_dev->ops->set_vring_state(dev->vid, i, + (int)cur_ready); + + if (dev->notify_ops->vring_state_changed) + dev->notify_ops->vring_state_changed(dev->vid, + i, (int)cur_ready); + } + } + if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) { dev->flags |= VIRTIO_DEV_READY; @@ -2816,8 +2833,6 @@ typedef int (*vhost_message_handler_t)(struct virtio_net **pdev, } } - did = dev->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); if (vdpa_dev && virtio_is_ready(dev) && !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) && msg.request.master == VHOST_USER_SET_VRING_CALL) { -- 1.8.3.1