From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 86939A055F; Mon, 17 Oct 2022 13:46:15 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 580CD41614; Mon, 17 Oct 2022 13:45:54 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by mails.dpdk.org (Postfix) with ESMTP id 6911D4114E for ; Mon, 17 Oct 2022 13:45:50 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1666007150; x=1697543150; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=VMY/XQgY3LiXk3abQWsaQCKGKFqsmcMrD2+Gz98WQA0=; b=WsadmO5nM736+DggVNy72kNqcKtYTQRKiqed0vAmlGEd31lyraMnvLvF CY8Kv7MBX3CVX0DIBPiIB4GDM3DSAZdU2K2Ts8ucdm1fAXtMEv5hqrnCr 7Mu6R9ya1LtWsOc4iL8mDTrF5PSk+5OU3CI+icaYY+fnpRAxw+yGc7/MJ qvFB1E/cofxJiNncREC6Y1XDkd/4USqlh14R4pqIA2mrU5RD1flCUAHbI 5IXGxLwHIqcn7kyciIPb2p08F7aDAyEdcCpjtTz1YnWCwO0w7rA/B/N0K yrxWRmQ9KNS5C9II5mfZ6WrhS/W+kJtjqi8sDbfPvxEdHAf5NCXESkiKm w==; X-IronPort-AV: E=McAfee;i="6500,9779,10502"; a="305763847" X-IronPort-AV: E=Sophos;i="5.95,191,1661842800"; d="scan'208";a="305763847" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Oct 2022 04:45:49 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10502"; a="623199960" X-IronPort-AV: E=Sophos;i="5.95,191,1661842800"; d="scan'208";a="623199960" Received: from dpdk-dipei.sh.intel.com ([10.67.110.251]) by orsmga007.jf.intel.com with ESMTP; 17 Oct 2022 04:45:47 -0700 From: Andy Pei To: dev@dpdk.org Cc: chenbo.xia@intel.com, rosen.xu@intel.com, wei.huang@intel.com, gang.cao@intel.com, maxime.coquelin@redhat.com Subject: [PATCH v6 6/8] vdpa/ifc: support dynamic enable/disable queue Date: Mon, 17 Oct 2022 19:41:43 +0800 Message-Id: <1666006905-74029-7-git-send-email-andy.pei@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1666006905-74029-1-git-send-email-andy.pei@intel.com> References: <1661229305-240952-2-git-send-email-andy.pei@intel.com> <1666006905-74029-1-git-send-email-andy.pei@intel.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Huang Wei Support dynamic enable or disable queue. For front end, like QEMU, user can use ethtool to configure queue. For example, "ethtool -L eth0 combined 3" to enable 3 queues pairs. Signed-off-by: Huang Wei Signed-off-by: Andy Pei Reviewed-by: Chenbo Xia --- drivers/vdpa/ifc/base/ifcvf.c | 100 ++++++++++++++++++++++++++++++++++++++++++ drivers/vdpa/ifc/base/ifcvf.h | 6 +++ drivers/vdpa/ifc/ifcvf_vdpa.c | 93 ++++++++++++++++++++++++++++++++------- 3 files changed, 184 insertions(+), 15 deletions(-) diff --git a/drivers/vdpa/ifc/base/ifcvf.c b/drivers/vdpa/ifc/base/ifcvf.c index 30bb8cb..869ddd6 100644 --- a/drivers/vdpa/ifc/base/ifcvf.c +++ b/drivers/vdpa/ifc/base/ifcvf.c @@ -233,6 +233,106 @@ } } +int +ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i) +{ + struct ifcvf_pci_common_cfg *cfg; + u8 *lm_cfg; + u16 notify_off; + int msix_vector; + + if (i >= (int)hw->nr_vring) + return -1; + + cfg = hw->common_cfg; + if (!cfg) { + RTE_LOG(ERR, PMD, "common_cfg in HW is NULL.\n"); + return -1; + } + + ifcvf_enable_mq(hw); + + IFCVF_WRITE_REG16(i, &cfg->queue_select); + msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector); + if (msix_vector != (i + 1)) { + IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector); + msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector); + if (msix_vector == IFCVF_MSI_NO_VECTOR) { + RTE_LOG(ERR, PMD, "queue %d, msix vec alloc failed\n", + i); + return -1; + } + } + + io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo, + &cfg->queue_desc_hi); + io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo, + &cfg->queue_avail_hi); + io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo, + &cfg->queue_used_hi); + IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size); + + lm_cfg = hw->lm_cfg; + if (lm_cfg) { + if (hw->device_type == IFCVF_BLK) + *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET + + i * IFCVF_LM_CFG_SIZE) = + (u32)hw->vring[i].last_avail_idx | + ((u32)hw->vring[i].last_used_idx << 16); + else + *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET + + (i / 2) * IFCVF_LM_CFG_SIZE + + (i % 2) * 4) = + (u32)hw->vring[i].last_avail_idx | + ((u32)hw->vring[i].last_used_idx << 16); + } + + notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off); + hw->notify_addr[i] = (void *)((u8 *)hw->notify_base + + notify_off * hw->notify_off_multiplier); + IFCVF_WRITE_REG16(1, &cfg->queue_enable); + + return 0; +} + +void +ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i) +{ + struct ifcvf_pci_common_cfg *cfg; + u32 ring_state; + u8 *lm_cfg; + + if (i >= (int)hw->nr_vring) + return; + + cfg = hw->common_cfg; + if (!cfg) { + RTE_LOG(ERR, PMD, "common_cfg in HW is NULL.\n"); + return; + } + + IFCVF_WRITE_REG16(i, &cfg->queue_select); + IFCVF_WRITE_REG16(0, &cfg->queue_enable); + + lm_cfg = hw->lm_cfg; + if (lm_cfg) { + if (hw->device_type == IFCVF_BLK) { + ring_state = *(u32 *)(lm_cfg + + IFCVF_LM_RING_STATE_OFFSET + + i * IFCVF_LM_CFG_SIZE); + hw->vring[i].last_avail_idx = + (u16)(ring_state & IFCVF_16_BIT_MASK); + } else { + ring_state = *(u32 *)(lm_cfg + + IFCVF_LM_RING_STATE_OFFSET + + (i / 2) * IFCVF_LM_CFG_SIZE + + (i % 2) * 4); + hw->vring[i].last_avail_idx = (u16)(ring_state >> 16); + } + hw->vring[i].last_used_idx = (u16)(ring_state >> 16); + } +} + STATIC int ifcvf_hw_enable(struct ifcvf_hw *hw) { diff --git a/drivers/vdpa/ifc/base/ifcvf.h b/drivers/vdpa/ifc/base/ifcvf.h index 1e133c0..3726da7 100644 --- a/drivers/vdpa/ifc/base/ifcvf.h +++ b/drivers/vdpa/ifc/base/ifcvf.h @@ -164,6 +164,12 @@ struct ifcvf_hw { ifcvf_get_features(struct ifcvf_hw *hw); int +ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i); + +void +ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i); + +int ifcvf_start_hw(struct ifcvf_hw *hw); void diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c index 0c3407a..9c49f9c 100644 --- a/drivers/vdpa/ifc/ifcvf_vdpa.c +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c @@ -1282,13 +1282,59 @@ struct rte_vdpa_dev_info { } static int +ifcvf_config_vring(struct ifcvf_internal *internal, int vring) +{ + struct ifcvf_hw *hw = &internal->hw; + int vid = internal->vid; + struct rte_vhost_vring vq; + uint64_t gpa; + + if (hw->vring[vring].enable) { + rte_vhost_get_vhost_vring(vid, vring, &vq); + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for descriptor ring."); + return -1; + } + hw->vring[vring].desc = gpa; + + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for available ring."); + return -1; + } + hw->vring[vring].avail = gpa; + + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for used ring."); + return -1; + } + hw->vring[vring].used = gpa; + + hw->vring[vring].size = vq.size; + rte_vhost_get_vring_base(vid, vring, + &hw->vring[vring].last_avail_idx, + &hw->vring[vring].last_used_idx); + ifcvf_enable_vring_hw(&internal->hw, vring); + } else { + ifcvf_disable_vring_hw(&internal->hw, vring); + rte_vhost_set_vring_base(vid, vring, + hw->vring[vring].last_avail_idx, + hw->vring[vring].last_used_idx); + } + + return 0; +} + +static int ifcvf_set_vring_state(int vid, int vring, int state) { struct rte_vdpa_device *vdev; struct internal_list *list; struct ifcvf_internal *internal; struct ifcvf_hw *hw; - struct ifcvf_pci_common_cfg *cfg; + bool enable = !!state; int ret = 0; vdev = rte_vhost_get_vdpa_device(vid); @@ -1298,6 +1344,9 @@ struct rte_vdpa_dev_info { return -1; } + DRV_LOG(INFO, "%s queue %d of vDPA device %s", + enable ? "enable" : "disable", vring, vdev->device->name); + internal = list->internal; if (vring < 0 || vring >= internal->max_queues * 2) { DRV_LOG(ERR, "Vring index %d not correct", vring); @@ -1305,27 +1354,41 @@ struct rte_vdpa_dev_info { } hw = &internal->hw; + hw->vring[vring].enable = enable; + if (!internal->configured) - goto exit; + return 0; - cfg = hw->common_cfg; - IFCVF_WRITE_REG16(vring, &cfg->queue_select); - IFCVF_WRITE_REG16(!!state, &cfg->queue_enable); + unset_notify_relay(internal); - if (!state && hw->vring[vring].enable) { - ret = vdpa_disable_vfio_intr(internal); - if (ret) - return ret; + ret = vdpa_enable_vfio_intr(internal, false); + if (ret) { + DRV_LOG(ERR, "failed to set vfio interrupt of vDPA device %s", + vdev->device->name); + return ret; } - if (state && !hw->vring[vring].enable) { - ret = vdpa_enable_vfio_intr(internal, false); - if (ret) - return ret; + ret = ifcvf_config_vring(internal, vring); + if (ret) { + DRV_LOG(ERR, "failed to configure queue %d of vDPA device %s", + vring, vdev->device->name); + return ret; + } + + ret = setup_notify_relay(internal); + if (ret) { + DRV_LOG(ERR, "failed to setup notify relay of vDPA device %s", + vdev->device->name); + return ret; + } + + ret = rte_vhost_host_notifier_ctrl(vid, vring, enable); + if (ret) { + DRV_LOG(ERR, "vDPA device %s queue %d host notifier ctrl fail", + vdev->device->name, vring); + return ret; } -exit: - hw->vring[vring].enable = !!state; return 0; } -- 1.8.3.1