From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 171CCA0548; Thu, 8 Sep 2022 07:53:54 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B119642B6F; Thu, 8 Sep 2022 07:53:34 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id C17C54284D for ; Thu, 8 Sep 2022 07:53:32 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1662616413; x=1694152413; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=qmYPrbvzT0qVwJZI86FfxXWf1tpnVsi/b0TzNiP5QB4=; b=cKoyeARolynpCxCG8g0veclEKWyqmPaFuqLkz0wO5HTzJpRoYyboQ573 qapTYUC+mCpEB12WxSOlpwlFVrZEmap5hjZxP3oNjUKUA8DzqkM8L8dAC e3LkpIbifPyylf/i3F8BblI8HzFOUM5ZlldkiUv3Pa6/f8gDc3MT9RM/P xNEbx028LriaH0tyzJwlxeB9KwKijt+Kg5tFEvi44bl1p9JEtSpE3gFIu ngPRaoIlhhh6HRuEBPA8sIuT1dAyH3gGrz1hOD+kV/ZXLl4owUuxUVtUX GgPETOYpmg5oykIbB2juasFqQYUyvtoCDGuq0f3AsDg/Etbw1SQZ6igcI Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10463"; a="277464185" X-IronPort-AV: E=Sophos;i="5.93,299,1654585200"; d="scan'208";a="277464185" Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 07 Sep 2022 22:53:32 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.93,299,1654585200"; d="scan'208";a="676531863" Received: from dpdk-dipei.sh.intel.com ([10.67.110.251]) by fmsmga008.fm.intel.com with ESMTP; 07 Sep 2022 22:53:30 -0700 From: Andy Pei To: dev@dpdk.org Cc: chenbo.xia@intel.com, rosen.xu@intel.com, wei.huang@intel.com, gang.cao@intel.com, maxime.coquelin@redhat.com, Huang Wei Subject: [PATCH v2 6/8] vdpa/ifc: support dynamic enable/disable queue Date: Thu, 8 Sep 2022 13:54:16 +0800 Message-Id: <1662616458-164613-7-git-send-email-andy.pei@intel.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1662616458-164613-1-git-send-email-andy.pei@intel.com> References: <1661229305-240952-2-git-send-email-andy.pei@intel.com> <1662616458-164613-1-git-send-email-andy.pei@intel.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Huang Wei Support dynamic enable or disable queue. For front end, like QEMU, user can use ethtool to configurate queue. For example, "ethtool -L eth0 combined 3" to enable 3 queues pairs. Signed-off-by: Huang Wei Signed-off-by: Andy Pei --- drivers/vdpa/ifc/base/ifcvf.c | 101 ++++++++++++++++++++++++++++++++++++ drivers/vdpa/ifc/base/ifcvf.h | 6 +++ drivers/vdpa/ifc/base/ifcvf_osdep.h | 1 + drivers/vdpa/ifc/ifcvf_vdpa.c | 93 +++++++++++++++++++++++++++------ 4 files changed, 186 insertions(+), 15 deletions(-) diff --git a/drivers/vdpa/ifc/base/ifcvf.c b/drivers/vdpa/ifc/base/ifcvf.c index 4875ea1..34f32f8 100644 --- a/drivers/vdpa/ifc/base/ifcvf.c +++ b/drivers/vdpa/ifc/base/ifcvf.c @@ -230,6 +230,107 @@ } } +int +ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i) +{ + struct ifcvf_pci_common_cfg *cfg; + u8 *lm_cfg; + u16 notify_off; + int msix_vector; + + if (!hw || (i >= (int)hw->nr_vring)) + return -1; + + cfg = hw->common_cfg; + if (!cfg) { + ERROUT("common_cfg in HW is NULL.\n"); + return -1; + } + + ifcvf_enable_multiqueue(hw); + + IFCVF_WRITE_REG16(i, &cfg->queue_select); + msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector); + if (msix_vector != (i + 1)) { + IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector); + msix_vector = IFCVF_READ_REG16(&cfg->queue_msix_vector); + if (msix_vector == IFCVF_MSI_NO_VECTOR) { + ERROUT("queue %u, msix vec alloc failed\n", i); + return -1; + } + } + + io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo, + &cfg->queue_desc_hi); + io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo, + &cfg->queue_avail_hi); + io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo, + &cfg->queue_used_hi); + IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size); + + lm_cfg = hw->lm_cfg; + if (lm_cfg) { + if (hw->device_type == IFCVF_BLK) + *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET + + i * IFCVF_LM_CFG_SIZE) = + (u32)hw->vring[i].last_avail_idx | + ((u32)hw->vring[i].last_used_idx << 16); + else + *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET + + (i / 2) * IFCVF_LM_CFG_SIZE + + (i % 2) * 4) = + (u32)hw->vring[i].last_avail_idx | + ((u32)hw->vring[i].last_used_idx << 16); + } + + notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off); + hw->notify_addr[i] = (void *)((u8 *)hw->notify_base + + notify_off * hw->notify_off_multiplier); + IFCVF_WRITE_REG16(1, &cfg->queue_enable); + + return 0; +} + +void +ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i) +{ + struct ifcvf_pci_common_cfg *cfg; + u32 ring_state; + u8 *lm_cfg; + + if (!hw || (i >= (int)hw->nr_vring)) + return; + + cfg = hw->common_cfg; + if (!cfg) { + ERROUT("common_cfg in HW is NULL.\n"); + return; + } + + IFCVF_WRITE_REG16(i, &cfg->queue_select); + IFCVF_WRITE_REG16(0, &cfg->queue_enable); + + lm_cfg = hw->lm_cfg; + if (lm_cfg) { + if (hw->device_type == IFCVF_BLK) + ring_state = *(u32 *)(lm_cfg + + IFCVF_LM_RING_STATE_OFFSET + + i * IFCVF_LM_CFG_SIZE); + else + ring_state = *(u32 *)(lm_cfg + + IFCVF_LM_RING_STATE_OFFSET + + (i / 2) * IFCVF_LM_CFG_SIZE + + (i % 2) * 4); + + if (hw->device_type == IFCVF_BLK) + hw->vring[i].last_avail_idx = + (u16)(ring_state & IFCVF_16_BIT_MASK); + else + hw->vring[i].last_avail_idx = (u16)(ring_state >> 16); + hw->vring[i].last_used_idx = (u16)(ring_state >> 16); + } +} + STATIC int ifcvf_hw_enable(struct ifcvf_hw *hw) { diff --git a/drivers/vdpa/ifc/base/ifcvf.h b/drivers/vdpa/ifc/base/ifcvf.h index c17bf2a..e67d4e8 100644 --- a/drivers/vdpa/ifc/base/ifcvf.h +++ b/drivers/vdpa/ifc/base/ifcvf.h @@ -164,6 +164,12 @@ struct ifcvf_hw { ifcvf_get_features(struct ifcvf_hw *hw); int +ifcvf_enable_vring_hw(struct ifcvf_hw *hw, int i); + +void +ifcvf_disable_vring_hw(struct ifcvf_hw *hw, int i); + +int ifcvf_start_hw(struct ifcvf_hw *hw); void diff --git a/drivers/vdpa/ifc/base/ifcvf_osdep.h b/drivers/vdpa/ifc/base/ifcvf_osdep.h index 3d56769..4a1bfec 100644 --- a/drivers/vdpa/ifc/base/ifcvf_osdep.h +++ b/drivers/vdpa/ifc/base/ifcvf_osdep.h @@ -16,6 +16,7 @@ #define WARNINGOUT(S, args...) RTE_LOG(WARNING, PMD, S, ##args) #define DEBUGOUT(S, args...) RTE_LOG(DEBUG, PMD, S, ##args) +#define ERROUT(S, args...) RTE_LOG(ERR, PMD, S, ##args) #define STATIC static #define msec_delay(x) rte_delay_us_sleep(1000 * (x)) diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c index 48f1a89..16fd0fd 100644 --- a/drivers/vdpa/ifc/ifcvf_vdpa.c +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c @@ -1288,13 +1288,59 @@ struct rte_vdpa_dev_info { } static int +ifcvf_config_vring(struct ifcvf_internal *internal, int vring) +{ + struct ifcvf_hw *hw = &internal->hw; + int vid = internal->vid; + struct rte_vhost_vring vq; + uint64_t gpa; + + if (hw->vring[vring].enable) { + rte_vhost_get_vhost_vring(vid, vring, &vq); + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for descriptor ring."); + return -1; + } + hw->vring[vring].desc = gpa; + + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for available ring."); + return -1; + } + hw->vring[vring].avail = gpa; + + gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used); + if (gpa == 0) { + DRV_LOG(ERR, "Fail to get GPA for used ring."); + return -1; + } + hw->vring[vring].used = gpa; + + hw->vring[vring].size = vq.size; + rte_vhost_get_vring_base(vid, vring, + &hw->vring[vring].last_avail_idx, + &hw->vring[vring].last_used_idx); + ifcvf_enable_vring_hw(&internal->hw, vring); + } else { + ifcvf_disable_vring_hw(&internal->hw, vring); + rte_vhost_set_vring_base(vid, vring, + hw->vring[vring].last_avail_idx, + hw->vring[vring].last_used_idx); + } + + return 0; +} + +static int ifcvf_set_vring_state(int vid, int vring, int state) { struct rte_vdpa_device *vdev; struct internal_list *list; struct ifcvf_internal *internal; struct ifcvf_hw *hw; - struct ifcvf_pci_common_cfg *cfg; + bool enable = !!state; int ret = 0; vdev = rte_vhost_get_vdpa_device(vid); @@ -1304,6 +1350,9 @@ struct rte_vdpa_dev_info { return -1; } + DRV_LOG(INFO, "%s queue %d of vDPA device %s", + enable ? "enable" : "disable", vring, vdev->device->name); + internal = list->internal; if (vring < 0 || vring >= internal->max_queues * 2) { DRV_LOG(ERR, "Vring index %d not correct", vring); @@ -1311,27 +1360,41 @@ struct rte_vdpa_dev_info { } hw = &internal->hw; + hw->vring[vring].enable = enable; + if (!internal->configured) - goto exit; + return 0; - cfg = hw->common_cfg; - IFCVF_WRITE_REG16(vring, &cfg->queue_select); - IFCVF_WRITE_REG16(!!state, &cfg->queue_enable); + unset_notify_relay(internal); - if (!state && hw->vring[vring].enable) { - ret = vdpa_disable_vfio_intr(internal); - if (ret) - return ret; + ret = vdpa_enable_vfio_intr(internal, false); + if (ret) { + DRV_LOG(ERR, "failed to set vfio interrupt of vDPA device %s", + vdev->device->name); + return ret; } - if (state && !hw->vring[vring].enable) { - ret = vdpa_enable_vfio_intr(internal, false); - if (ret) - return ret; + ret = ifcvf_config_vring(internal, vring); + if (ret) { + DRV_LOG(ERR, "failed to configure queue %d of vDPA device %s", + vring, vdev->device->name); + return ret; + } + + ret = setup_notify_relay(internal); + if (ret) { + DRV_LOG(ERR, "failed to setup notify relay of vDPA device %s", + vdev->device->name); + return ret; + } + + ret = rte_vhost_host_notifier_ctrl(vid, vring, enable); + if (ret) { + DRV_LOG(ERR, "vDPA device %s queue %d host notifier ctrl fail", + vdev->device->name, vring); + return ret; } -exit: - hw->vring[vring].enable = !!state; return 0; } -- 1.8.3.1