From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id A9A075963 for ; Thu, 5 May 2016 10:59:44 +0200 (CEST) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga103.fm.intel.com with ESMTP; 05 May 2016 01:59:46 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.24,581,1455004800"; d="scan'208";a="959365819" Received: from dpdk06.sh.intel.com ([10.239.128.225]) by fmsmga001.fm.intel.com with ESMTP; 05 May 2016 01:59:43 -0700 From: Jianfeng Tan To: dev@dpdk.org Cc: Jianfeng Tan , yuanhan.liu@linux.intel.com, nakajima.yoshihiro@lab.ntt.co.jp, nhorman@tuxdriver.com Date: Thu, 5 May 2016 08:59:41 +0000 Message-Id: <1462438781-139674-4-git-send-email-jianfeng.tan@intel.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1462438781-139674-1-git-send-email-jianfeng.tan@intel.com> References: <1462438781-139674-1-git-send-email-jianfeng.tan@intel.com> Subject: [dpdk-dev] [PATCH 3/3] virtio-user: add mq in virtual pci driver X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 05 May 2016 08:59:45 -0000 Partially implement ctrl-queue to handle control command with class of VIRTIO_NET_CTRL_MQ and with cmd of VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET to handle mq support. After filling the command into ctrl-queue, we dequeue it when notify_queue(), and invoke method from device emulation to enable/disable queues. Signed-off-by: Jianfeng Tan --- drivers/net/virtio/virtio_user/virtio_user_pci.c | 89 +++++++++++++++++++++++- 1 file changed, 87 insertions(+), 2 deletions(-) diff --git a/drivers/net/virtio/virtio_user/virtio_user_pci.c b/drivers/net/virtio/virtio_user/virtio_user_pci.c index 873e619..aa02c60 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_pci.c +++ b/drivers/net/virtio/virtio_user/virtio_user_pci.c @@ -38,6 +38,7 @@ #include "../virtio_logs.h" #include "../virtio_pci.h" #include "../virtqueue.h" +#include "../virtio_ring.h" #include "virtio_user_dev.h" static void @@ -157,8 +158,10 @@ vdev_setup_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq) if (vq->virtio_net_hdr_mz) { vq->virtio_net_hdr_mem = (phys_addr_t)vq->virtio_net_hdr_mz->addr; - /* Do it one more time after we reset virtio_net_hdr_mem */ - vring_hdr_desc_init(vq); + + /* Do it again after we reset virtio_net_hdr_mem for tx */ + if ((vq->vq_queue_index % VTNET_CQ) == VTNET_TQ) + vring_hdr_desc_init(vq); } vq->offset = offsetof(struct rte_mbuf, buf_addr); return 0; @@ -182,11 +185,93 @@ vdev_del_queue(struct virtio_hw *hw, struct virtqueue *vq) close(uhw->kickfds[vq->vq_queue_index]); } +static uint8_t +handle_mq(struct virtqueue *vq, uint16_t queues) +{ + struct virtio_hw *hw = vq->hw; + struct virtio_user_hw *uhw = (struct virtio_user_hw *)hw->vdev_private; + uint32_t i; + uint8_t ret = 0; + + if (queues > uhw->max_queue_pairs) { + PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported", + queues, uhw->max_queue_pairs); + return -1; + } + + for (i = 0; i < queues; ++i) + ret |= virtio_user_enable_queue_pair(uhw, i, 1); + for (i = queues; i < uhw->max_queue_pairs; ++i) + ret |= virtio_user_enable_queue_pair(uhw, i, 0); + + return ret; +} + +static uint32_t +handle_ctrl(struct virtqueue *vq, uint16_t desc_idx_hdr) +{ + struct virtio_net_ctrl_hdr *hdr; + virtio_net_ctrl_ack status = ~0; + uint16_t i, desc_idx_data, desc_idx_status; + uint32_t num_of_descs = 0; + + /* locate desc for header, data, and status */ + desc_idx_data = vq->vq_ring.desc[desc_idx_hdr].next; + num_of_descs++; + + + i = desc_idx_data; + while (vq->vq_ring.desc[i].flags == VRING_DESC_F_NEXT) { + i = vq->vq_ring.desc[i].next; + num_of_descs++; + } + + /* locate desc for status */ + desc_idx_status = i; + num_of_descs++; + + hdr = (struct virtio_net_ctrl_hdr *)vq->vq_ring.desc[desc_idx_hdr].addr; + if (hdr->class == VIRTIO_NET_CTRL_MQ && + hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { + uint16_t queues; + + queues = *(uint16_t *)vq->vq_ring.desc[desc_idx_data].addr; + status = handle_mq(vq, queues); + } + + /* Update status */ + *(virtio_net_ctrl_ack *)vq->vq_ring.desc[desc_idx_status].addr = status; + + return num_of_descs; +} + static void vdev_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) { uint64_t buf = 1; struct virtio_user_hw *uhw = (struct virtio_user_hw *)hw->vdev_private; + uint16_t avail_idx, desc_idx; + struct vring_used_elem *uep; + uint32_t num_of_descs; + + if (vq == hw->cvq) { + /* Consume avail ring, using used ring idx as first one */ + while (vq->vq_ring.used->idx != vq->vq_ring.avail->idx) { + avail_idx = (vq->vq_ring.used->idx) & + (vq->vq_nentries - 1); + desc_idx = vq->vq_ring.avail->ring[avail_idx]; + + num_of_descs = handle_ctrl(vq, desc_idx); + + /* Update used ring */ + uep = &vq->vq_ring.used->ring[avail_idx]; + uep->id = avail_idx; + uep->len = num_of_descs; + + vq->vq_ring.used->idx++; + } + return; + } if (write(uhw->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0) PMD_DRV_LOG(ERR, "failed to kick backend: %s\n", strerror(errno)); -- 2.1.4