From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx0a-001b2d01.pphosted.com (mx0b-001b2d01.pphosted.com [148.163.158.5]) by dpdk.org (Postfix) with ESMTP id 1F7BB5F45 for ; Mon, 2 Apr 2018 13:37:21 +0200 (CEST) Received: from pps.filterd (m0098413.ppops.net [127.0.0.1]) by mx0b-001b2d01.pphosted.com (8.16.0.22/8.16.0.22) with SMTP id w32BbA8m084212 for ; Mon, 2 Apr 2018 07:37:20 -0400 Received: from e06smtp10.uk.ibm.com (e06smtp10.uk.ibm.com [195.75.94.106]) by mx0b-001b2d01.pphosted.com with ESMTP id 2h3gd7y9cd-1 (version=TLSv1.2 cipher=AES256-SHA256 bits=256 verify=NOT) for ; Mon, 02 Apr 2018 07:37:18 -0400 Received: from localhost by e06smtp10.uk.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Mon, 2 Apr 2018 12:36:56 +0100 Received: from b06cxnps4076.portsmouth.uk.ibm.com (9.149.109.198) by e06smtp10.uk.ibm.com (192.168.101.140) with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted; Mon, 2 Apr 2018 12:36:53 +0100 Received: from d06av24.portsmouth.uk.ibm.com (mk.ibm.com [9.149.105.60]) by b06cxnps4076.portsmouth.uk.ibm.com (8.14.9/8.14.9/NCO v10.0) with ESMTP id w32BarnA57737462; Mon, 2 Apr 2018 11:36:53 GMT Received: from d06av24.portsmouth.uk.ibm.com (unknown [127.0.0.1]) by IMSVA (Postfix) with ESMTP id A027F42042; Mon, 2 Apr 2018 12:28:45 +0100 (BST) Received: from d06av24.portsmouth.uk.ibm.com (unknown [127.0.0.1]) by IMSVA (Postfix) with ESMTP id 2AF7442041; Mon, 2 Apr 2018 12:28:44 +0100 (BST) Received: from chozha.in.ibm.com (unknown [9.77.196.204]) by d06av24.portsmouth.uk.ibm.com (Postfix) with ESMTP; Mon, 2 Apr 2018 12:28:43 +0100 (BST) From: Gowrishankar To: Anatoly Burakov Cc: Jonas Pfefferle1 , Chao Zhu , dev@dpdk.org, Gowrishankar Muthukrishnan Date: Mon, 2 Apr 2018 17:06:50 +0530 X-Mailer: git-send-email 1.9.1 In-Reply-To: References: X-TM-AS-GCONF: 00 x-cbid: 18040211-0040-0000-0000-000004296D72 X-IBM-AV-DETECTION: SAVI=unused REMOTE=unused XFE=unused x-cbparentid: 18040211-0041-0000-0000-0000262C96AC Message-Id: X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10432:, , definitions=2018-04-02_04:, , signatures=0 X-Proofpoint-Spam-Details: rule=outbound_notspam policy=outbound score=0 priorityscore=1501 malwarescore=0 suspectscore=0 phishscore=0 bulkscore=0 spamscore=0 clxscore=1015 lowpriorityscore=0 impostorscore=0 adultscore=0 classifier=spam adjust=0 reason=mlx scancount=1 engine=8.0.1-1709140000 definitions=main-1804020133 Subject: [dpdk-dev] [PATCH v2 24/41] vfio: allow to map other memory regions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 02 Apr 2018 11:37:21 -0000 From: Gowrishankar Muthukrishnan Below patch adds powerpc arch specific changes. Signed-off-by: Gowrishankar Muthukrishnan --- lib/librte_eal/linuxapp/eal/eal_vfio.c | 110 +++++++++++++++++++++++++++++++-- 1 file changed, 105 insertions(+), 5 deletions(-) diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c index 4e9e296..985acf4 100644 --- a/lib/librte_eal/linuxapp/eal/eal_vfio.c +++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c @@ -24,6 +24,7 @@ static int vfio_type1_dma_map(int); static int vfio_type1_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int); static int vfio_spapr_dma_map(int); +static int vfio_spapr_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int); static int vfio_noiommu_dma_map(int); static int vfio_noiommu_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int); @@ -41,8 +42,7 @@ .type_id = RTE_VFIO_SPAPR, .name = "sPAPR", .dma_map_func = &vfio_spapr_dma_map, - .dma_user_map_func = NULL - // TODO: work with PPC64 people on enabling this, window size! + .dma_user_map_func = &vfio_spapr_dma_mem_map }, /* IOMMU-less mode */ { @@ -838,7 +838,6 @@ struct spapr_create_window_walk_param { ret = ioctl(*vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map); - if (ret) { RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n", errno, strerror(errno)); @@ -852,7 +851,6 @@ struct spapr_create_window_walk_param { vfio_spapr_dma_map(int vfio_container_fd) { int ret; - uint64_t hugepage_sz = 0; struct spapr_create_window_walk_param wa; struct vfio_iommu_spapr_tce_info info = { @@ -890,7 +888,7 @@ struct spapr_create_window_walk_param { /* sPAPR requires window size to be a power of 2 */ create.window_size = rte_align64pow2(create.window_size); - create.page_shift = __builtin_ctzll(hugepage_sz); + create.page_shift = __builtin_ctzll(wa.hugepage_sz); create.levels = 1; ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create); @@ -912,6 +910,108 @@ struct spapr_create_window_walk_param { } static int +vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova, + uint64_t len, int do_map) +{ + int ret; + struct spapr_create_window_walk_param wa = { + .hugepage_sz = 0, + }; + struct vfio_iommu_spapr_tce_create create = { + .argsz = sizeof(create), + }; + + /* check if DMA window is from 0 to max(phys_addr + len) */ + wa.create = &create; + rte_memseg_walk(vfio_spapr_create_window_walk, &wa); + create.window_size = rte_align64pow2(create.window_size); + if (iova > create.window_size) { + struct vfio_iommu_spapr_tce_info info = { + .argsz = sizeof(info), + }; + struct vfio_iommu_spapr_tce_remove remove = { + .argsz = sizeof(remove), + }; + + /* query spapr iommu info */ + ret = ioctl(vfio_container_fd, + VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); + if (ret) { + RTE_LOG(ERR, EAL, " cannot get iommu info, " + "error %i (%s)\n", errno, strerror(errno)); + return -1; + } + + /* remove old DMA window */ + remove.start_addr = info.dma32_window_start; + ret = ioctl(vfio_container_fd, + VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove); + if (ret) { + RTE_LOG(ERR, EAL, " cannot remove default DMA window, " + "error %i (%s)\n", errno, strerror(errno)); + return -1; + } + create.page_shift = __builtin_ctzll(wa.hugepage_sz); + create.levels = 1; + + ret = ioctl(vfio_container_fd, + VFIO_IOMMU_SPAPR_TCE_CREATE, &create); + if (ret) { + RTE_LOG(ERR, EAL, " cannot create new DMA window, " + "error %i (%s)\n", errno, strerror(errno)); + return -1; + } + + if (create.start_addr != 0) { + RTE_LOG(ERR, EAL, " DMA window start address != 0\n"); + return -1; + } + + } + + if (do_map != 0) { + if (rte_memseg_walk(vfio_spapr_dma_map_walk, &vfio_container_fd)) + return -1; + } else { + struct vfio_iommu_type1_dma_unmap dma_unmap; + struct vfio_iommu_spapr_register_memory reg = { + .argsz = sizeof(reg), + .flags = 0 + }; + + /* for unmap, check if iova within DMA window */ + if (iova > create.window_size) { + RTE_LOG(ERR, EAL, "iova not beyond DMA window for unmap"); + return -1; + } + + reg.vaddr = (uintptr_t) vaddr; + reg.size = len; + ret = ioctl(vfio_container_fd, + VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, ®); + if (ret) { + RTE_LOG(ERR, EAL, " cannot unregister vaddr for IOMMU, error %i (%s)\n", + errno, strerror(errno)); + return -1; + } + + memset(&dma_unmap, 0, sizeof(dma_unmap)); + dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap); + dma_unmap.size = len; + dma_unmap.iova = iova; + + ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA, + &dma_unmap); + if (ret) { + RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n", + errno, strerror(errno)); + return -1; + } + } + return 0; +} + +static int vfio_noiommu_dma_map(int __rte_unused vfio_container_fd) { /* No-IOMMU mode does not need DMA mapping */ -- 1.9.1