From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
Sachin Saxena <sachin.saxena@nxp.com>,
Gagandeep Singh <g.singh@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>, Vanshika Shukla <vanshika.shukla@nxp.com>
Subject: [v2 16/43] bus/fslmc: dynamic IOVA mode configuration
Date: Wed, 18 Sep 2024 13:20:29 +0530 [thread overview]
Message-ID: <20240918075056.1838654-17-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20240918075056.1838654-1-vanshika.shukla@nxp.com>
From: Jun Yang <jun.yang@nxp.com>
IOVA mode should not be configured with CFLAGS because
1) User can perform "--iova-mode" to configure IOVA.
2) IOVA mode is determined by negotiation between multiple devices.
Eal is in VA mode only when all devices support VA mode.
Hence:
1) Remove RTE_LIBRTE_DPAA2_USE_PHYS_IOVA cflags.
Instead, use rte_eal_iova_mode API to identify VA or PA mode.
2) Support memory IOMMU mapping and I/O IOMMU mapping(PCI space).
3) For memory IOMMU, in VA mode, IOVA:VA = 1:1;
in PA mode, IOVA:VA = PA:VA. The mapping policy is determined by
EAL memory driver.
4) For I/O IOMMU, IOVA:VA is up to I/O driver configuration.
In general, it's aligned with memory IOMMU mapping.
5) Memory and I/O IOVA tables are created and update when DMA
mapping is setup, which takes place of dpaax IOVA table.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
Signed-off-by: Vanshika Shukla <vanshika.shukla@nxp.com>
---
drivers/bus/fslmc/bus_fslmc_driver.h | 29 +-
drivers/bus/fslmc/fslmc_bus.c | 33 +-
drivers/bus/fslmc/fslmc_logs.h | 5 +-
drivers/bus/fslmc/fslmc_vfio.c | 668 ++++++++++++++++++-----
drivers/bus/fslmc/fslmc_vfio.h | 4 +
drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c | 3 +-
drivers/bus/fslmc/portal/dpaa2_hw_dpio.c | 10 +-
drivers/bus/fslmc/portal/dpaa2_hw_dpio.h | 3 +-
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 111 ++--
drivers/bus/fslmc/version.map | 7 +-
drivers/dma/dpaa2/dpaa2_qdma.c | 1 +
11 files changed, 619 insertions(+), 255 deletions(-)
diff --git a/drivers/bus/fslmc/bus_fslmc_driver.h b/drivers/bus/fslmc/bus_fslmc_driver.h
index dc2f395f60..11eebd560c 100644
--- a/drivers/bus/fslmc/bus_fslmc_driver.h
+++ b/drivers/bus/fslmc/bus_fslmc_driver.h
@@ -37,9 +37,6 @@ extern "C" {
#include <fslmc_vfio.h>
-#include "portal/dpaa2_hw_pvt.h"
-#include "portal/dpaa2_hw_dpio.h"
-
#define FSLMC_OBJECT_MAX_LEN 32 /**< Length of each device on bus */
#define DPAA2_INVALID_MBUF_SEQN 0
@@ -149,6 +146,32 @@ struct rte_dpaa2_driver {
rte_dpaa2_remove_t remove;
};
+int
+rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size);
+__rte_internal
+int
+rte_fslmc_vfio_mem_dmaunmap(uint64_t iova, uint64_t size);
+__rte_internal
+uint64_t
+rte_fslmc_cold_mem_vaddr_to_iova(void *vaddr,
+ uint64_t size);
+__rte_internal
+void *
+rte_fslmc_cold_mem_iova_to_vaddr(uint64_t iova,
+ uint64_t size);
+__rte_internal
+__hot uint64_t
+rte_fslmc_mem_vaddr_to_iova(void *vaddr);
+__rte_internal
+__hot void *
+rte_fslmc_mem_iova_to_vaddr(uint64_t iova);
+__rte_internal
+uint64_t
+rte_fslmc_io_vaddr_to_iova(void *vaddr);
+__rte_internal
+void *
+rte_fslmc_io_iova_to_vaddr(uint64_t iova);
+
/**
* Register a DPAA2 driver.
*
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index 654726dbe6..ce87b4ddbd 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -27,7 +27,6 @@
#define FSLMC_BUS_NAME fslmc
struct rte_fslmc_bus rte_fslmc_bus;
-uint8_t dpaa2_virt_mode;
#define DPAA2_SEQN_DYNFIELD_NAME "dpaa2_seqn_dynfield"
int dpaa2_seqn_dynfield_offset = -1;
@@ -457,22 +456,6 @@ rte_fslmc_probe(void)
probe_all = rte_fslmc_bus.bus.conf.scan_mode != RTE_BUS_SCAN_ALLOWLIST;
- /* In case of PA, the FD addresses returned by qbman APIs are physical
- * addresses, which need conversion into equivalent VA address for
- * rte_mbuf. For that, a table (a serial array, in memory) is used to
- * increase translation efficiency.
- * This has to be done before probe as some device initialization
- * (during) probe allocate memory (dpaa2_sec) which needs to be pinned
- * to this table.
- *
- * Error is ignored as relevant logs are handled within dpaax and
- * handling for unavailable dpaax table too is transparent to caller.
- *
- * And, the IOVA table is only applicable in case of PA mode.
- */
- if (rte_eal_iova_mode() == RTE_IOVA_PA)
- dpaax_iova_table_populate();
-
TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
ret = rte_fslmc_match(drv, dev);
@@ -507,9 +490,6 @@ rte_fslmc_probe(void)
}
}
- if (rte_eal_iova_mode() == RTE_IOVA_VA)
- dpaa2_virt_mode = 1;
-
return 0;
}
@@ -558,12 +538,6 @@ rte_fslmc_driver_register(struct rte_dpaa2_driver *driver)
void
rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
{
- /* Cleanup the PA->VA Translation table; From wherever this function
- * is called from.
- */
- if (rte_eal_iova_mode() == RTE_IOVA_PA)
- dpaax_iova_table_depopulate();
-
TAILQ_REMOVE(&rte_fslmc_bus.driver_list, driver, next);
}
@@ -599,13 +573,12 @@ rte_dpaa2_get_iommu_class(void)
bool is_vfio_noiommu_enabled = 1;
bool has_iova_va;
+ if (rte_eal_iova_mode() == RTE_IOVA_PA)
+ return RTE_IOVA_PA;
+
if (TAILQ_EMPTY(&rte_fslmc_bus.device_list))
return RTE_IOVA_DC;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- return RTE_IOVA_PA;
-#endif
-
/* check if all devices on the bus support Virtual addressing or not */
has_iova_va = fslmc_all_device_support_iova();
diff --git a/drivers/bus/fslmc/fslmc_logs.h b/drivers/bus/fslmc/fslmc_logs.h
index e15c603426..d6abffc566 100644
--- a/drivers/bus/fslmc/fslmc_logs.h
+++ b/drivers/bus/fslmc/fslmc_logs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright 2016 NXP
+ * Copyright 2016-2023 NXP
*
*/
@@ -10,7 +10,8 @@
extern int dpaa2_logtype_bus;
#define DPAA2_BUS_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, dpaa2_logtype_bus, "fslmc: " fmt "\n", \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_bus, \
+ "fslmc " # level ": " fmt "\n", \
##args)
/* Debug logs are with Function names */
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 45dac61d97..fe18429f42 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -19,6 +19,7 @@
#include <libgen.h>
#include <dirent.h>
#include <sys/eventfd.h>
+#include <ctype.h>
#include <eal_filesystem.h>
#include <rte_mbuf.h>
@@ -49,9 +50,41 @@
*/
static struct fslmc_vfio_container s_vfio_container;
/* Currently we only support single group/process. */
-const char *fslmc_group; /* dprc.x*/
+static const char *fslmc_group; /* dprc.x*/
static uint32_t *msi_intr_vaddr;
-void *(*rte_mcp_ptr_list);
+static void *(*rte_mcp_ptr_list);
+
+struct fslmc_dmaseg {
+ uint64_t vaddr;
+ uint64_t iova;
+ uint64_t size;
+
+ TAILQ_ENTRY(fslmc_dmaseg) next;
+};
+
+TAILQ_HEAD(fslmc_dmaseg_list, fslmc_dmaseg);
+
+struct fslmc_dmaseg_list fslmc_memsegs =
+ TAILQ_HEAD_INITIALIZER(fslmc_memsegs);
+struct fslmc_dmaseg_list fslmc_iosegs =
+ TAILQ_HEAD_INITIALIZER(fslmc_iosegs);
+
+static uint64_t fslmc_mem_va2iova = RTE_BAD_IOVA;
+static int fslmc_mem_map_num;
+
+struct fslmc_mem_param {
+ struct vfio_mp_param mp_param;
+ struct fslmc_dmaseg_list memsegs;
+ struct fslmc_dmaseg_list iosegs;
+ uint64_t mem_va2iova;
+ int mem_map_num;
+};
+
+enum {
+ FSLMC_VFIO_SOCKET_REQ_CONTAINER = 0x100,
+ FSLMC_VFIO_SOCKET_REQ_GROUP,
+ FSLMC_VFIO_SOCKET_REQ_MEM
+};
void *
dpaa2_get_mcp_ptr(int portal_idx)
@@ -65,6 +98,64 @@ dpaa2_get_mcp_ptr(int portal_idx)
static struct rte_dpaa2_object_list dpaa2_obj_list =
TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
+static uint64_t
+fslmc_io_virt2phy(const void *virtaddr)
+{
+ FILE *fp = fopen("/proc/self/maps", "r");
+ char *line = NULL;
+ size_t linesz;
+ uint64_t start, end, phy;
+ const uint64_t va = (const uint64_t)virtaddr;
+ char tmp[1024];
+ int ret;
+
+ if (!fp)
+ return RTE_BAD_IOVA;
+ while (getdelim(&line, &linesz, '\n', fp) > 0) {
+ char *ptr = line;
+ int n;
+
+ /** Parse virtual address range.*/
+ n = 0;
+ while (*ptr && !isspace(*ptr)) {
+ tmp[n] = *ptr;
+ ptr++;
+ n++;
+ }
+ tmp[n] = 0;
+ ret = sscanf(tmp, "%" SCNx64 "-%" SCNx64, &start, &end);
+ if (ret != 2)
+ continue;
+ if (va < start || va >= end)
+ continue;
+
+ /** This virtual address is in this segment.*/
+ while (*ptr == ' ' || *ptr == 'r' ||
+ *ptr == 'w' || *ptr == 's' ||
+ *ptr == 'p' || *ptr == 'x' ||
+ *ptr == '-')
+ ptr++;
+
+ /** Extract phy address*/
+ n = 0;
+ while (*ptr && !isspace(*ptr)) {
+ tmp[n] = *ptr;
+ ptr++;
+ n++;
+ }
+ tmp[n] = 0;
+ phy = strtoul(tmp, 0, 16);
+ if (!phy)
+ continue;
+
+ fclose(fp);
+ return phy + va - start;
+ }
+
+ fclose(fp);
+ return RTE_BAD_IOVA;
+}
+
/*register a fslmc bus based dpaa2 driver */
void
rte_fslmc_object_register(struct rte_dpaa2_object *object)
@@ -271,7 +362,7 @@ fslmc_get_group_id(const char *group_name,
ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
group_name, groupid);
if (ret <= 0) {
- DPAA2_BUS_ERR("Unable to find %s IOMMU group", group_name);
+ DPAA2_BUS_ERR("Find %s IOMMU group", group_name);
if (ret < 0)
return ret;
@@ -314,7 +405,7 @@ fslmc_vfio_open_group_fd(const char *group_name)
/* if we're in a secondary process, request group fd from the primary
* process via mp channel.
*/
- p->req = SOCKET_REQ_GROUP;
+ p->req = FSLMC_VFIO_SOCKET_REQ_GROUP;
p->group_num = iommu_group_num;
strcpy(mp_req.name, FSLMC_VFIO_MP);
mp_req.len_param = sizeof(*p);
@@ -408,7 +499,7 @@ fslmc_vfio_open_container_fd(void)
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
if (vfio_container_fd < 0) {
- DPAA2_BUS_ERR("Cannot open VFIO container(%s), err(%d)",
+ DPAA2_BUS_ERR("Open VFIO container(%s), err(%d)",
VFIO_CONTAINER_PATH, vfio_container_fd);
ret = vfio_container_fd;
goto err_exit;
@@ -417,7 +508,7 @@ fslmc_vfio_open_container_fd(void)
/* check VFIO API version */
ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
if (ret < 0) {
- DPAA2_BUS_ERR("Could not get VFIO API version(%d)",
+ DPAA2_BUS_ERR("Get VFIO API version(%d)",
ret);
} else if (ret != VFIO_API_VERSION) {
DPAA2_BUS_ERR("Unsupported VFIO API version(%d)",
@@ -431,7 +522,7 @@ fslmc_vfio_open_container_fd(void)
ret = fslmc_vfio_check_extensions(vfio_container_fd);
if (ret) {
- DPAA2_BUS_ERR("No supported IOMMU extensions found(%d)",
+ DPAA2_BUS_ERR("Unsupported IOMMU extensions found(%d)",
ret);
close(vfio_container_fd);
goto err_exit;
@@ -443,7 +534,7 @@ fslmc_vfio_open_container_fd(void)
* if we're in a secondary process, request container fd from the
* primary process via mp channel
*/
- p->req = SOCKET_REQ_CONTAINER;
+ p->req = FSLMC_VFIO_SOCKET_REQ_CONTAINER;
strcpy(mp_req.name, FSLMC_VFIO_MP);
mp_req.len_param = sizeof(*p);
mp_req.num_fds = 0;
@@ -473,7 +564,7 @@ fslmc_vfio_open_container_fd(void)
err_exit:
if (mp_reply.msgs)
free(mp_reply.msgs);
- DPAA2_BUS_ERR("Cannot request container fd err(%d)", ret);
+ DPAA2_BUS_ERR("Open container fd err(%d)", ret);
return ret;
}
@@ -506,17 +597,19 @@ fslmc_vfio_mp_primary(const struct rte_mp_msg *msg,
struct rte_mp_msg reply;
struct vfio_mp_param *r = (void *)reply.param;
const struct vfio_mp_param *m = (const void *)msg->param;
+ struct fslmc_mem_param *map;
if (msg->len_param != sizeof(*m)) {
- DPAA2_BUS_ERR("fslmc vfio received invalid message!");
+ DPAA2_BUS_ERR("Invalid msg size(%d) for req(%d)",
+ msg->len_param, m->req);
return -EINVAL;
}
memset(&reply, 0, sizeof(reply));
switch (m->req) {
- case SOCKET_REQ_GROUP:
- r->req = SOCKET_REQ_GROUP;
+ case FSLMC_VFIO_SOCKET_REQ_GROUP:
+ r->req = FSLMC_VFIO_SOCKET_REQ_GROUP;
r->group_num = m->group_num;
fd = fslmc_vfio_group_fd_by_id(m->group_num);
if (fd < 0) {
@@ -530,9 +623,10 @@ fslmc_vfio_mp_primary(const struct rte_mp_msg *msg,
reply.num_fds = 1;
reply.fds[0] = fd;
}
+ reply.len_param = sizeof(*r);
break;
- case SOCKET_REQ_CONTAINER:
- r->req = SOCKET_REQ_CONTAINER;
+ case FSLMC_VFIO_SOCKET_REQ_CONTAINER:
+ r->req = FSLMC_VFIO_SOCKET_REQ_CONTAINER;
fd = fslmc_vfio_container_fd();
if (fd <= 0) {
r->result = SOCKET_ERR;
@@ -541,20 +635,73 @@ fslmc_vfio_mp_primary(const struct rte_mp_msg *msg,
reply.num_fds = 1;
reply.fds[0] = fd;
}
+ reply.len_param = sizeof(*r);
+ break;
+ case FSLMC_VFIO_SOCKET_REQ_MEM:
+ map = (void *)reply.param;
+ r = &map->mp_param;
+ r->req = FSLMC_VFIO_SOCKET_REQ_MEM;
+ r->result = SOCKET_OK;
+ rte_memcpy(&map->memsegs, &fslmc_memsegs,
+ sizeof(struct fslmc_dmaseg_list));
+ rte_memcpy(&map->iosegs, &fslmc_iosegs,
+ sizeof(struct fslmc_dmaseg_list));
+ map->mem_va2iova = fslmc_mem_va2iova;
+ map->mem_map_num = fslmc_mem_map_num;
+ reply.len_param = sizeof(struct fslmc_mem_param);
break;
default:
- DPAA2_BUS_ERR("fslmc vfio received invalid message(%08x)",
+ DPAA2_BUS_ERR("VFIO received invalid message(%08x)",
m->req);
return -ENOTSUP;
}
strcpy(reply.name, FSLMC_VFIO_MP);
- reply.len_param = sizeof(*r);
ret = rte_mp_reply(&reply, peer);
return ret;
}
+static int
+fslmc_vfio_mp_sync_mem_req(void)
+{
+ struct rte_mp_msg mp_req, *mp_rep;
+ struct rte_mp_reply mp_reply = {0};
+ struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+ int ret = 0;
+ struct vfio_mp_param *mp_param;
+ struct fslmc_mem_param *mem_rsp;
+
+ mp_param = (void *)mp_req.param;
+ memset(&mp_req, 0, sizeof(struct rte_mp_msg));
+ mp_param->req = FSLMC_VFIO_SOCKET_REQ_MEM;
+ strcpy(mp_req.name, FSLMC_VFIO_MP);
+ mp_req.len_param = sizeof(struct vfio_mp_param);
+ if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
+ mp_reply.nb_received == 1) {
+ mp_rep = &mp_reply.msgs[0];
+ mem_rsp = (struct fslmc_mem_param *)mp_rep->param;
+ if (mem_rsp->mp_param.result == SOCKET_OK) {
+ rte_memcpy(&fslmc_memsegs,
+ &mem_rsp->memsegs,
+ sizeof(struct fslmc_dmaseg_list));
+ rte_memcpy(&fslmc_memsegs,
+ &mem_rsp->memsegs,
+ sizeof(struct fslmc_dmaseg_list));
+ fslmc_mem_va2iova = mem_rsp->mem_va2iova;
+ fslmc_mem_map_num = mem_rsp->mem_map_num;
+ } else {
+ DPAA2_BUS_ERR("Bad MEM SEG");
+ ret = -EINVAL;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ free(mp_reply.msgs);
+
+ return ret;
+}
+
static int
fslmc_vfio_mp_sync_setup(void)
{
@@ -565,6 +712,10 @@ fslmc_vfio_mp_sync_setup(void)
fslmc_vfio_mp_primary);
if (ret && rte_errno != ENOTSUP)
return ret;
+ } else {
+ ret = fslmc_vfio_mp_sync_mem_req();
+ if (ret)
+ return ret;
}
return 0;
@@ -585,30 +736,34 @@ vfio_connect_container(int vfio_container_fd,
iommu_type = fslmc_vfio_iommu_type(vfio_group_fd);
if (iommu_type < 0) {
- DPAA2_BUS_ERR("Failed to get iommu type(%d)",
- iommu_type);
+ DPAA2_BUS_ERR("Get iommu type(%d)", iommu_type);
return iommu_type;
}
/* Check whether support for SMMU type IOMMU present or not */
- if (ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, iommu_type)) {
- /* Connect group to container */
- ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
+ ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, iommu_type);
+ if (ret <= 0) {
+ DPAA2_BUS_ERR("Unsupported IOMMU type(%d) ret(%d), err(%d)",
+ iommu_type, ret, -errno);
+ return -EINVAL;
+ }
+
+ ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
&vfio_container_fd);
- if (ret) {
- DPAA2_BUS_ERR("Failed to setup group container");
- return -errno;
- }
+ if (ret) {
+ DPAA2_BUS_ERR("Set group container ret(%d), err(%d)",
+ ret, -errno);
- ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU, iommu_type);
- if (ret) {
- DPAA2_BUS_ERR("Failed to setup VFIO iommu");
- return -errno;
- }
- } else {
- DPAA2_BUS_ERR("No supported IOMMU available");
- return -EINVAL;
+ return ret;
+ }
+
+ ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU, iommu_type);
+ if (ret) {
+ DPAA2_BUS_ERR("Set iommu ret(%d), err(%d)",
+ ret, -errno);
+
+ return ret;
}
return fslmc_vfio_connect_container(vfio_group_fd);
@@ -629,11 +784,11 @@ static int vfio_map_irq_region(void)
fd = fslmc_vfio_group_fd_by_name(group_name);
if (fd <= 0) {
- DPAA2_BUS_ERR("%s failed to open group fd(%d)",
- __func__, fd);
+ DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)",
+ __func__, group_name, fd);
if (fd < 0)
return fd;
- return -rte_errno;
+ return -EIO;
}
if (!fslmc_vfio_container_connected(fd)) {
DPAA2_BUS_ERR("Container is not connected");
@@ -643,8 +798,8 @@ static int vfio_map_irq_region(void)
vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
PROT_READ, MAP_SHARED, fd, 0x6030000);
if (vaddr == MAP_FAILED) {
- DPAA2_BUS_INFO("Unable to map region (errno = %d)", errno);
- return -errno;
+ DPAA2_BUS_ERR("Unable to map region (errno = %d)", errno);
+ return -ENOMEM;
}
msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
@@ -654,141 +809,200 @@ static int vfio_map_irq_region(void)
return 0;
DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno);
- return -errno;
-}
-
-static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
-static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
-
-static void
-fslmc_memevent_cb(enum rte_mem_event type, const void *addr,
- size_t len, void *arg __rte_unused)
-{
- struct rte_memseg_list *msl;
- struct rte_memseg *ms;
- size_t cur_len = 0, map_len = 0;
- uint64_t virt_addr;
- rte_iova_t iova_addr;
- int ret;
-
- msl = rte_mem_virt2memseg_list(addr);
-
- while (cur_len < len) {
- const void *va = RTE_PTR_ADD(addr, cur_len);
-
- ms = rte_mem_virt2memseg(va, msl);
- iova_addr = ms->iova;
- virt_addr = ms->addr_64;
- map_len = ms->len;
-
- DPAA2_BUS_DEBUG("Request for %s, va=%p, "
- "virt_addr=0x%" PRIx64 ", "
- "iova=0x%" PRIx64 ", map_len=%zu",
- type == RTE_MEM_EVENT_ALLOC ?
- "alloc" : "dealloc",
- va, virt_addr, iova_addr, map_len);
-
- /* iova_addr may be set to RTE_BAD_IOVA */
- if (iova_addr == RTE_BAD_IOVA) {
- DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
- cur_len += map_len;
- continue;
- }
-
- if (type == RTE_MEM_EVENT_ALLOC)
- ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
- else
- ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
-
- if (ret != 0) {
- DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. "
- "Map=%d, addr=%p, len=%zu, err:(%d)",
- type, va, map_len, ret);
- return;
- }
-
- cur_len += map_len;
- }
-
- if (type == RTE_MEM_EVENT_ALLOC)
- DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu",
- addr, len);
- else
- DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu",
- addr, len);
+ return ret;
}
static int
-fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr,
- size_t len)
+fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len)
{
struct vfio_iommu_type1_dma_map dma_map = {
.argsz = sizeof(struct vfio_iommu_type1_dma_map),
.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
};
- int ret, fd;
+ int ret, fd, is_io = 0;
const char *group_name = fslmc_vfio_get_group_name();
+ struct fslmc_dmaseg *dmaseg = NULL;
+ uint64_t phy = 0;
+
+ if (rte_eal_iova_mode() == RTE_IOVA_VA) {
+ if (vaddr != iovaddr) {
+ DPAA2_BUS_ERR("IOVA:VA(%" PRIx64 " : %" PRIx64 ") %s",
+ iovaddr, vaddr,
+ "should be 1:1 for VA mode");
+
+ return -EINVAL;
+ }
+ }
+ phy = rte_mem_virt2phy((const void *)(uintptr_t)vaddr);
+ if (phy == RTE_BAD_IOVA) {
+ phy = fslmc_io_virt2phy((const void *)(uintptr_t)vaddr);
+ if (phy == RTE_BAD_IOVA)
+ return -ENOMEM;
+ is_io = 1;
+ } else if (fslmc_mem_va2iova != RTE_BAD_IOVA &&
+ fslmc_mem_va2iova != (iovaddr - vaddr)) {
+ DPAA2_BUS_WARN("Multiple MEM PA<->VA conversions.");
+ }
+ DPAA2_BUS_DEBUG("%s(%zu): VA(%" PRIx64 "):IOVA(%" PRIx64 "):PHY(%" PRIx64 ")",
+ is_io ? "DMA IO map size" : "DMA MEM map size",
+ len, vaddr, iovaddr, phy);
+
+ if (is_io)
+ goto io_mapping_check;
+
+ TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) {
+ if (!((vaddr + len) <= dmaseg->vaddr ||
+ (dmaseg->vaddr + dmaseg->size) <= vaddr)) {
+ DPAA2_BUS_ERR("MEM: New VA Range(%" PRIx64 " ~ %" PRIx64 ")",
+ vaddr, vaddr + len);
+ DPAA2_BUS_ERR("MEM: Overlap with (%" PRIx64 " ~ %" PRIx64 ")",
+ dmaseg->vaddr,
+ dmaseg->vaddr + dmaseg->size);
+ return -EEXIST;
+ }
+ if (!((iovaddr + len) <= dmaseg->iova ||
+ (dmaseg->iova + dmaseg->size) <= iovaddr)) {
+ DPAA2_BUS_ERR("MEM: New IOVA Range(%" PRIx64 " ~ %" PRIx64 ")",
+ iovaddr, iovaddr + len);
+ DPAA2_BUS_ERR("MEM: Overlap with (%" PRIx64 " ~ %" PRIx64 ")",
+ dmaseg->iova,
+ dmaseg->iova + dmaseg->size);
+ return -EEXIST;
+ }
+ }
+ goto start_mapping;
+
+io_mapping_check:
+ TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) {
+ if (!((vaddr + len) <= dmaseg->vaddr ||
+ (dmaseg->vaddr + dmaseg->size) <= vaddr)) {
+ DPAA2_BUS_ERR("IO: New VA Range (%" PRIx64 " ~ %" PRIx64 ")",
+ vaddr, vaddr + len);
+ DPAA2_BUS_ERR("IO: Overlap with (%" PRIx64 " ~ %" PRIx64 ")",
+ dmaseg->vaddr,
+ dmaseg->vaddr + dmaseg->size);
+ return -EEXIST;
+ }
+ if (!((iovaddr + len) <= dmaseg->iova ||
+ (dmaseg->iova + dmaseg->size) <= iovaddr)) {
+ DPAA2_BUS_ERR("IO: New IOVA Range(%" PRIx64 " ~ %" PRIx64 ")",
+ iovaddr, iovaddr + len);
+ DPAA2_BUS_ERR("IO: Overlap with (%" PRIx64 " ~ %" PRIx64 ")",
+ dmaseg->iova,
+ dmaseg->iova + dmaseg->size);
+ return -EEXIST;
+ }
+ }
+
+start_mapping:
fd = fslmc_vfio_group_fd_by_name(group_name);
if (fd <= 0) {
- DPAA2_BUS_ERR("%s failed to open group fd(%d)",
- __func__, fd);
+ DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)",
+ __func__, group_name, fd);
if (fd < 0)
return fd;
- return -rte_errno;
+ return -EIO;
}
if (fslmc_vfio_iommu_type(fd) == RTE_VFIO_NOIOMMU) {
DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
- return 0;
+ if (phy != iovaddr) {
+ DPAA2_BUS_ERR("IOVA should support with IOMMU");
+ return -EIO;
+ }
+ goto end_mapping;
}
dma_map.size = len;
dma_map.vaddr = vaddr;
dma_map.iova = iovaddr;
-#ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- if (vaddr != iovaddr) {
- DPAA2_BUS_WARN("vaddr(0x%lx) != iovaddr(0x%lx)",
- vaddr, iovaddr);
- }
-#endif
-
/* SET DMA MAP for IOMMU */
if (!fslmc_vfio_container_connected(fd)) {
- DPAA2_BUS_ERR("Container is not connected ");
+ DPAA2_BUS_ERR("Container is not connected");
return -EIO;
}
- DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
- (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
ret = ioctl(fslmc_vfio_container_fd(), VFIO_IOMMU_MAP_DMA,
&dma_map);
if (ret) {
- DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
- errno);
+ DPAA2_BUS_ERR("%s(%d) VA(%" PRIx64 "):IOVA(%" PRIx64 "):PHY(%" PRIx64 ")",
+ is_io ? "DMA IO map err" : "DMA MEM map err",
+ errno, vaddr, iovaddr, phy);
return ret;
}
+end_mapping:
+ dmaseg = malloc(sizeof(struct fslmc_dmaseg));
+ if (!dmaseg) {
+ DPAA2_BUS_ERR("DMA segment malloc failed!");
+ return -ENOMEM;
+ }
+ dmaseg->vaddr = vaddr;
+ dmaseg->iova = iovaddr;
+ dmaseg->size = len;
+ if (is_io) {
+ TAILQ_INSERT_TAIL(&fslmc_iosegs, dmaseg, next);
+ } else {
+ fslmc_mem_map_num++;
+ if (fslmc_mem_map_num == 1)
+ fslmc_mem_va2iova = iovaddr - vaddr;
+ else
+ fslmc_mem_va2iova = RTE_BAD_IOVA;
+ TAILQ_INSERT_TAIL(&fslmc_memsegs, dmaseg, next);
+ }
+ DPAA2_BUS_LOG(NOTICE,
+ "%s(%zx): VA(%" PRIx64 "):IOVA(%" PRIx64 "):PHY(%" PRIx64 ")",
+ is_io ? "DMA I/O map size" : "DMA MEM map size",
+ len, vaddr, iovaddr, phy);
+
return 0;
}
static int
-fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
+fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr, size_t len)
{
struct vfio_iommu_type1_dma_unmap dma_unmap = {
.argsz = sizeof(struct vfio_iommu_type1_dma_unmap),
.flags = 0,
};
- int ret, fd;
+ int ret, fd, is_io = 0;
const char *group_name = fslmc_vfio_get_group_name();
+ struct fslmc_dmaseg *dmaseg = NULL;
+
+ TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) {
+ if (((vaddr && dmaseg->vaddr == vaddr) || !vaddr) &&
+ dmaseg->iova == iovaddr &&
+ dmaseg->size == len) {
+ is_io = 0;
+ break;
+ }
+ }
+
+ if (!dmaseg) {
+ TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) {
+ if (((vaddr && dmaseg->vaddr == vaddr) || !vaddr) &&
+ dmaseg->iova == iovaddr &&
+ dmaseg->size == len) {
+ is_io = 1;
+ break;
+ }
+ }
+ }
+
+ if (!dmaseg) {
+ DPAA2_BUS_ERR("IOVA(%" PRIx64 ") with length(%zx) not mapped",
+ iovaddr, len);
+ return 0;
+ }
fd = fslmc_vfio_group_fd_by_name(group_name);
if (fd <= 0) {
- DPAA2_BUS_ERR("%s failed to open group fd(%d)",
- __func__, fd);
+ DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)",
+ __func__, group_name, fd);
if (fd < 0)
return fd;
- return -rte_errno;
+ return -EIO;
}
if (fslmc_vfio_iommu_type(fd) == RTE_VFIO_NOIOMMU) {
DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
@@ -796,7 +1010,7 @@ fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
}
dma_unmap.size = len;
- dma_unmap.iova = vaddr;
+ dma_unmap.iova = iovaddr;
/* SET DMA MAP for IOMMU */
if (!fslmc_vfio_container_connected(fd)) {
@@ -804,19 +1018,164 @@ fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
return -EIO;
}
- DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
- (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
ret = ioctl(fslmc_vfio_container_fd(), VFIO_IOMMU_UNMAP_DMA,
&dma_unmap);
if (ret) {
- DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
- errno);
- return -1;
+ DPAA2_BUS_ERR("DMA un-map IOVA(%" PRIx64 " ~ %" PRIx64 ") err(%d)",
+ iovaddr, iovaddr + len, errno);
+ return ret;
+ }
+
+ if (is_io) {
+ TAILQ_REMOVE(&fslmc_iosegs, dmaseg, next);
+ } else {
+ TAILQ_REMOVE(&fslmc_memsegs, dmaseg, next);
+ fslmc_mem_map_num--;
+ if (TAILQ_EMPTY(&fslmc_memsegs))
+ fslmc_mem_va2iova = RTE_BAD_IOVA;
}
+ free(dmaseg);
+
return 0;
}
+uint64_t
+rte_fslmc_cold_mem_vaddr_to_iova(void *vaddr,
+ uint64_t size)
+{
+ struct fslmc_dmaseg *dmaseg;
+ uint64_t va;
+
+ va = (uint64_t)vaddr;
+ TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) {
+ if (va >= dmaseg->vaddr &&
+ (va + size) < (dmaseg->vaddr + dmaseg->size)) {
+ return dmaseg->iova + va - dmaseg->vaddr;
+ }
+ }
+
+ return RTE_BAD_IOVA;
+}
+
+void *
+rte_fslmc_cold_mem_iova_to_vaddr(uint64_t iova,
+ uint64_t size)
+{
+ struct fslmc_dmaseg *dmaseg;
+
+ TAILQ_FOREACH(dmaseg, &fslmc_memsegs, next) {
+ if (iova >= dmaseg->iova &&
+ (iova + size) < (dmaseg->iova + dmaseg->size))
+ return (void *)((uintptr_t)dmaseg->vaddr
+ + (uintptr_t)(iova - dmaseg->iova));
+ }
+
+ return NULL;
+}
+
+__hot uint64_t
+rte_fslmc_mem_vaddr_to_iova(void *vaddr)
+{
+ if (likely(fslmc_mem_va2iova != RTE_BAD_IOVA))
+ return (uint64_t)vaddr + fslmc_mem_va2iova;
+
+ return rte_fslmc_cold_mem_vaddr_to_iova(vaddr, 0);
+}
+
+__hot void *
+rte_fslmc_mem_iova_to_vaddr(uint64_t iova)
+{
+ if (likely(fslmc_mem_va2iova != RTE_BAD_IOVA))
+ return (void *)((uintptr_t)iova - (uintptr_t)fslmc_mem_va2iova);
+
+ return rte_fslmc_cold_mem_iova_to_vaddr(iova, 0);
+}
+
+uint64_t
+rte_fslmc_io_vaddr_to_iova(void *vaddr)
+{
+ struct fslmc_dmaseg *dmaseg = NULL;
+ uint64_t va = (uint64_t)vaddr;
+
+ TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) {
+ if ((va >= dmaseg->vaddr) &&
+ va < dmaseg->vaddr + dmaseg->size)
+ return dmaseg->iova + va - dmaseg->vaddr;
+ }
+
+ return RTE_BAD_IOVA;
+}
+
+void *
+rte_fslmc_io_iova_to_vaddr(uint64_t iova)
+{
+ struct fslmc_dmaseg *dmaseg = NULL;
+
+ TAILQ_FOREACH(dmaseg, &fslmc_iosegs, next) {
+ if ((iova >= dmaseg->iova) &&
+ iova < dmaseg->iova + dmaseg->size)
+ return (void *)((uintptr_t)dmaseg->vaddr
+ + (uintptr_t)(iova - dmaseg->iova));
+ }
+
+ return NULL;
+}
+
+static void
+fslmc_memevent_cb(enum rte_mem_event type, const void *addr,
+ size_t len, void *arg __rte_unused)
+{
+ struct rte_memseg_list *msl;
+ struct rte_memseg *ms;
+ size_t cur_len = 0, map_len = 0;
+ uint64_t virt_addr;
+ rte_iova_t iova_addr;
+ int ret;
+
+ msl = rte_mem_virt2memseg_list(addr);
+
+ while (cur_len < len) {
+ const void *va = RTE_PTR_ADD(addr, cur_len);
+
+ ms = rte_mem_virt2memseg(va, msl);
+ iova_addr = ms->iova;
+ virt_addr = ms->addr_64;
+ map_len = ms->len;
+
+ DPAA2_BUS_DEBUG("%s, va=%p, virt=%" PRIx64 ", iova=%" PRIx64 ", len=%zu",
+ type == RTE_MEM_EVENT_ALLOC ? "alloc" : "dealloc",
+ va, virt_addr, iova_addr, map_len);
+
+ /* iova_addr may be set to RTE_BAD_IOVA */
+ if (iova_addr == RTE_BAD_IOVA) {
+ DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
+ cur_len += map_len;
+ continue;
+ }
+
+ if (type == RTE_MEM_EVENT_ALLOC)
+ ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
+ else
+ ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
+
+ if (ret != 0) {
+ DPAA2_BUS_ERR("%s: Map=%d, addr=%p, len=%zu, err:(%d)",
+ type == RTE_MEM_EVENT_ALLOC ?
+ "DMA Mapping failed. " :
+ "DMA Unmapping failed. ",
+ type, va, map_len, ret);
+ return;
+ }
+
+ cur_len += map_len;
+ }
+
+ DPAA2_BUS_DEBUG("Total %s: addr=%p, len=%zu",
+ type == RTE_MEM_EVENT_ALLOC ? "Mapped" : "Unmapped",
+ addr, len);
+}
+
static int
fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
const struct rte_memseg *ms, void *arg)
@@ -847,7 +1206,7 @@ rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size)
int
rte_fslmc_vfio_mem_dmaunmap(uint64_t iova, uint64_t size)
{
- return fslmc_unmap_dma(iova, 0, size);
+ return fslmc_unmap_dma(0, iova, size);
}
int rte_fslmc_vfio_dmamap(void)
@@ -857,9 +1216,10 @@ int rte_fslmc_vfio_dmamap(void)
/* Lock before parsing and registering callback to memory subsystem */
rte_mcfg_mem_read_lock();
- if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
+ ret = rte_memseg_walk(fslmc_dmamap_seg, &i);
+ if (ret) {
rte_mcfg_mem_read_unlock();
- return -1;
+ return ret;
}
ret = rte_mem_event_callback_register("fslmc_memevent_clb",
@@ -898,6 +1258,14 @@ fslmc_vfio_setup_device(const char *dev_addr,
const char *group_name = fslmc_vfio_get_group_name();
vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name);
+ if (vfio_group_fd <= 0) {
+ DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)",
+ __func__, group_name, vfio_group_fd);
+ if (vfio_group_fd < 0)
+ return vfio_group_fd;
+ return -EIO;
+ }
+
if (!fslmc_vfio_container_connected(vfio_group_fd)) {
DPAA2_BUS_ERR("Container is not connected");
return -EIO;
@@ -1006,8 +1374,7 @@ int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index)
vfio_dev_fd = rte_intr_dev_fd_get(intr_handle);
ret = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret)
- DPAA2_BUS_ERR(
- "Error disabling dpaa2 interrupts for fd %d",
+ DPAA2_BUS_ERR("Error disabling dpaa2 interrupts for fd %d",
rte_intr_fd_get(intr_handle));
return ret;
@@ -1032,7 +1399,7 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
if (ret < 0) {
DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)",
i, errno, strerror(errno));
- return -1;
+ return ret;
}
/* if this vector cannot be used with eventfd,
@@ -1046,8 +1413,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
if (fd < 0) {
DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)",
- errno, strerror(errno));
- return -1;
+ errno, strerror(errno));
+ return fd;
}
if (rte_intr_fd_set(intr_handle, fd))
@@ -1063,7 +1430,7 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
}
/* if we're here, we haven't found a suitable interrupt vector */
- return -1;
+ return -EIO;
}
static void
@@ -1237,6 +1604,13 @@ fslmc_vfio_close_group(void)
const char *group_name = fslmc_vfio_get_group_name();
vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name);
+ if (vfio_group_fd <= 0) {
+ DPAA2_BUS_ERR("%s: Get fd by name(%s) failed(%d)",
+ __func__, group_name, vfio_group_fd);
+ if (vfio_group_fd < 0)
+ return vfio_group_fd;
+ return -EIO;
+ }
RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
if (dev->device.devargs &&
@@ -1328,7 +1702,7 @@ fslmc_vfio_process_group(void)
ret = fslmc_process_mcp(dev);
if (ret) {
DPAA2_BUS_ERR("Unable to map MC Portal");
- return -1;
+ return ret;
}
found_mportal = 1;
}
@@ -1345,7 +1719,7 @@ fslmc_vfio_process_group(void)
/* Cannot continue if there is not even a single mportal */
if (!found_mportal) {
DPAA2_BUS_ERR("No MC Portal device found. Not continuing");
- return -1;
+ return -EIO;
}
/* Search for DPRC device next as it updates endpoint of
@@ -1357,7 +1731,7 @@ fslmc_vfio_process_group(void)
ret = fslmc_process_iodevices(dev);
if (ret) {
DPAA2_BUS_ERR("Unable to process dprc");
- return -1;
+ return ret;
}
TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
}
@@ -1414,7 +1788,7 @@ fslmc_vfio_process_group(void)
if (ret) {
DPAA2_BUS_DEBUG("Dev (%s) init failed",
dev->device.name);
- return -1;
+ return ret;
}
break;
@@ -1438,7 +1812,7 @@ fslmc_vfio_process_group(void)
if (ret) {
DPAA2_BUS_DEBUG("Dev (%s) init failed",
dev->device.name);
- return -1;
+ return ret;
}
break;
@@ -1467,9 +1841,9 @@ fslmc_vfio_setup_group(void)
vfio_container_fd = fslmc_vfio_container_fd();
if (vfio_container_fd <= 0) {
vfio_container_fd = fslmc_vfio_open_container_fd();
- if (vfio_container_fd <= 0) {
+ if (vfio_container_fd < 0) {
DPAA2_BUS_ERR("Failed to create MC VFIO container");
- return -rte_errno;
+ return vfio_container_fd;
}
}
@@ -1482,6 +1856,8 @@ fslmc_vfio_setup_group(void)
if (vfio_group_fd <= 0) {
vfio_group_fd = fslmc_vfio_open_group_fd(group_name);
if (vfio_group_fd <= 0) {
+ DPAA2_BUS_ERR("%s: open group name(%s) failed(%d)",
+ __func__, group_name, vfio_group_fd);
if (!vfio_group_fd)
close(vfio_group_fd);
DPAA2_BUS_ERR("Failed to create MC VFIO group");
diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h
index 1695b6c078..408b35680d 100644
--- a/drivers/bus/fslmc/fslmc_vfio.h
+++ b/drivers/bus/fslmc/fslmc_vfio.h
@@ -11,6 +11,10 @@
#include <rte_compat.h>
#include <rte_vfio.h>
+#ifndef __hot
+#define __hot __attribute__((hot))
+#endif
+
/* Pathname of FSL-MC devices directory. */
#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
#define DPAA2_MC_DPNI_DEVID 7
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
index bc36607e64..85e4c16c03 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016,2020 NXP
+ * Copyright 2016,2020-2023 NXP
*
*/
@@ -28,7 +28,6 @@
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
-
TAILQ_HEAD(dpbp_dev_list, dpaa2_dpbp_dev);
static struct dpbp_dev_list dpbp_dev_list
= TAILQ_HEAD_INITIALIZER(dpbp_dev_list); /*!< DPBP device list */
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 8265fee497..b52a8c8ba5 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -332,9 +332,8 @@ dpaa2_affine_qbman_swp(void)
}
RTE_PER_LCORE(_dpaa2_io).dpio_dev = dpio_dev;
- DPAA2_BUS_INFO(
- "DPAA Portal=%p (%d) is affined to thread %" PRIu64,
- dpio_dev, dpio_dev->index, tid);
+ DPAA2_BUS_DEBUG("Portal[%d] is affined to thread %" PRIu64,
+ dpio_dev->index, tid);
}
return 0;
}
@@ -354,9 +353,8 @@ dpaa2_affine_qbman_ethrx_swp(void)
}
RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev = dpio_dev;
- DPAA2_BUS_INFO(
- "DPAA Portal=%p (%d) is affined for eth rx to thread %"
- PRIu64, dpio_dev, dpio_dev->index, tid);
+ DPAA2_BUS_DEBUG("Portal_eth_rx[%d] is affined to thread %" PRIu64,
+ dpio_dev->index, tid);
}
return 0;
}
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
index 7407f8d38d..328e1e788a 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2023 NXP
*
*/
@@ -12,6 +12,7 @@
#include <mc/fsl_mc_sys.h>
#include <rte_compat.h>
+#include <dpaa2_hw_pvt.h>
struct dpaa2_io_portal_t {
struct dpaa2_dpio_dev *dpio_dev;
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 169c7917ea..c5900bd06a 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -14,6 +14,7 @@
#include <mc/fsl_mc_sys.h>
#include <fsl_qbman_portal.h>
+#include <bus_fslmc_driver.h>
#ifndef false
#define false 0
@@ -80,6 +81,8 @@
#define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
#define DPAA2_DPCI_MAX_QUEUES 2
+#define DPAA2_INVALID_FLOW_ID 0xffff
+#define DPAA2_INVALID_CGID 0xff
struct dpaa2_queue;
@@ -365,83 +368,63 @@ enum qbman_fd_format {
*/
#define DPAA2_EQ_RESP_ALWAYS 1
-/* Various structures representing contiguous memory maps */
-struct dpaa2_memseg {
- TAILQ_ENTRY(dpaa2_memseg) next;
- char *vaddr;
- rte_iova_t iova;
- size_t len;
-};
-
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
-extern uint8_t dpaa2_virt_mode;
-static void *dpaa2_mem_ptov(phys_addr_t paddr) __rte_unused;
-
-static void *dpaa2_mem_ptov(phys_addr_t paddr)
+static inline uint64_t
+dpaa2_mem_va_to_iova(void *va)
{
- void *va;
-
- if (dpaa2_virt_mode)
- return (void *)(size_t)paddr;
-
- va = (void *)dpaax_iova_table_get_va(paddr);
- if (likely(va != NULL))
- return va;
-
- /* If not, Fallback to full memseg list searching */
- va = rte_mem_iova2virt(paddr);
+ if (likely(rte_eal_iova_mode() == RTE_IOVA_VA))
+ return (uint64_t)va;
- return va;
+ return rte_fslmc_mem_vaddr_to_iova(va);
}
-static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __rte_unused;
-
-static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
+static inline void *
+dpaa2_mem_iova_to_va(uint64_t iova)
{
- const struct rte_memseg *memseg;
-
- if (dpaa2_virt_mode)
- return vaddr;
+ if (likely(rte_eal_iova_mode() == RTE_IOVA_VA))
+ return (void *)(uintptr_t)iova;
- memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
- if (memseg)
- return memseg->iova + RTE_PTR_DIFF(vaddr, memseg->addr);
- return (size_t)NULL;
+ return rte_fslmc_mem_iova_to_vaddr(iova);
}
-/**
- * When we are using Physical addresses as IO Virtual Addresses,
- * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
- * wherever required.
- * These routines are called with help of below MACRO's
- */
-
#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
-
-/**
- * macro to convert Virtual address to IOVA
- */
-#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr))
-
-/**
- * macro to convert IOVA to Virtual address
- */
-#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova))
-
-/**
- * macro to convert modify the memory containing IOVA to Virtual address
- */
+#define DPAA2_VADDR_TO_IOVA(_vaddr) \
+ dpaa2_mem_va_to_iova((void *)(uintptr_t)_vaddr)
+#define DPAA2_IOVA_TO_VADDR(_iova) \
+ dpaa2_mem_iova_to_va((uint64_t)_iova)
#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
- {_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); }
+ {_mem = (_type)DPAA2_IOVA_TO_VADDR(_mem); }
+
+#define DPAA2_VAMODE_VADDR_TO_IOVA(_vaddr) ((uint64_t)_vaddr)
+#define DPAA2_VAMODE_IOVA_TO_VADDR(_iova) ((void *)_iova)
+#define DPAA2_VAMODE_MODIFY_IOVA_TO_VADDR(_mem, _type) \
+ {_mem = (_type)(_mem); }
+
+#define DPAA2_PAMODE_VADDR_TO_IOVA(_vaddr) \
+ rte_fslmc_mem_vaddr_to_iova((void *)_vaddr)
+#define DPAA2_PAMODE_IOVA_TO_VADDR(_iova) \
+ rte_fslmc_mem_iova_to_vaddr((uint64_t)_iova)
+#define DPAA2_PAMODE_MODIFY_IOVA_TO_VADDR(_mem, _type) \
+ {_mem = (_type)rte_fslmc_mem_iova_to_vaddr(_mem); }
+
+static inline uint64_t
+dpaa2_mem_va_to_iova_check(void *va, uint64_t size)
+{
+ uint64_t iova = rte_fslmc_cold_mem_vaddr_to_iova(va, size);
-#else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+ if (iova == RTE_BAD_IOVA)
+ return RTE_BAD_IOVA;
-#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
-#define DPAA2_VADDR_TO_IOVA(_vaddr) (phys_addr_t)(_vaddr)
-#define DPAA2_IOVA_TO_VADDR(_iova) (void *)(_iova)
-#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
+ /** Double check the iova is valid.*/
+ if (iova != rte_mem_virt2iova(va))
+ return RTE_BAD_IOVA;
+
+ return iova;
+}
-#endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+#define DPAA2_VADDR_TO_IOVA_AND_CHECK(_vaddr, size) \
+ dpaa2_mem_va_to_iova_check(_vaddr, size)
+#define DPAA2_IOVA_TO_VADDR_AND_CHECK(_iova, size) \
+ rte_fslmc_cold_mem_iova_to_vaddr(_iova, size)
static inline
int check_swp_active_dqs(uint16_t dpio_index)
diff --git a/drivers/bus/fslmc/version.map b/drivers/bus/fslmc/version.map
index b49bc0a62c..2c36895285 100644
--- a/drivers/bus/fslmc/version.map
+++ b/drivers/bus/fslmc/version.map
@@ -24,7 +24,6 @@ INTERNAL {
dpaa2_seqn_dynfield_offset;
dpaa2_seqn;
dpaa2_svr_family;
- dpaa2_virt_mode;
dpbp_disable;
dpbp_enable;
dpbp_get_attributes;
@@ -119,6 +118,12 @@ INTERNAL {
rte_fslmc_object_register;
rte_global_active_dqs_list;
rte_fslmc_vfio_mem_dmaunmap;
+ rte_fslmc_cold_mem_vaddr_to_iova;
+ rte_fslmc_cold_mem_iova_to_vaddr;
+ rte_fslmc_mem_vaddr_to_iova;
+ rte_fslmc_mem_iova_to_vaddr;
+ rte_fslmc_io_vaddr_to_iova;
+ rte_fslmc_io_iova_to_vaddr;
local: *;
};
diff --git a/drivers/dma/dpaa2/dpaa2_qdma.c b/drivers/dma/dpaa2/dpaa2_qdma.c
index 2c91ceec13..99b8881c5d 100644
--- a/drivers/dma/dpaa2/dpaa2_qdma.c
+++ b/drivers/dma/dpaa2/dpaa2_qdma.c
@@ -10,6 +10,7 @@
#include <mc/fsl_dpdmai.h>
+#include <dpaa2_hw_dpio.h>
#include "rte_pmd_dpaa2_qdma.h"
#include "dpaa2_qdma.h"
#include "dpaa2_qdma_logs.h"
--
2.25.1
next prev parent reply other threads:[~2024-09-18 7:53 UTC|newest]
Thread overview: 229+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-13 5:59 [v1 00/43] DPAA2 specific patches vanshika.shukla
2024-09-13 5:59 ` [v1 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-13 5:59 ` [v1 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-13 5:59 ` [v1 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-13 5:59 ` [v1 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-13 5:59 ` [v1 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-13 5:59 ` [v1 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-13 5:59 ` [v1 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-13 5:59 ` [v1 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-13 5:59 ` [v1 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-13 5:59 ` [v1 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-13 5:59 ` [v1 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-13 5:59 ` [v1 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-13 5:59 ` [v1 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-13 5:59 ` [v1 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-13 5:59 ` [v1 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-13 5:59 ` [v1 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-13 5:59 ` [v1 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-13 5:59 ` [v1 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-13 5:59 ` [v1 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-13 5:59 ` [v1 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-13 5:59 ` [v1 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-13 5:59 ` [v1 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-13 5:59 ` [v1 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-13 5:59 ` [v1 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-13 5:59 ` [v1 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-13 5:59 ` [v1 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-13 5:59 ` [v1 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-13 5:59 ` [v1 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-13 5:59 ` [v1 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-13 5:59 ` [v1 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-13 5:59 ` [v1 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-13 5:59 ` [v1 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-13 5:59 ` [v1 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-13 5:59 ` [v1 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-13 5:59 ` [v1 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-13 5:59 ` [v1 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-13 5:59 ` [v1 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-13 5:59 ` [v1 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-13 5:59 ` [v1 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-13 5:59 ` [v1 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-13 5:59 ` [v1 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-13 5:59 ` [v1 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-13 5:59 ` [v1 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-09-18 7:50 ` [v2 00/43] DPAA2 specific patches vanshika.shukla
2024-09-18 7:50 ` [v2 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-14 12:00 ` [v3 00/43] DPAA2 specific patches vanshika.shukla
2024-10-14 12:00 ` [v3 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-14 12:00 ` [v3 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-14 12:00 ` [v3 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-14 12:00 ` [v3 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-14 12:00 ` [v3 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-14 12:00 ` [v3 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-14 12:00 ` [v3 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-14 12:00 ` [v3 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-14 12:00 ` [v3 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-14 12:00 ` [v3 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-14 12:00 ` [v3 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-14 12:00 ` [v3 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-14 12:00 ` [v3 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-15 2:27 ` Stephen Hemminger
2024-10-14 12:00 ` [v3 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-10-15 2:29 ` Stephen Hemminger
2024-10-14 12:00 ` [v3 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-14 12:00 ` [v3 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-15 2:31 ` Stephen Hemminger
2024-10-14 12:01 ` [v3 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-14 12:01 ` [v3 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-14 12:01 ` [v3 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-14 12:01 ` [v3 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-10-14 12:01 ` [v3 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-14 12:01 ` [v3 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-14 12:01 ` [v3 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-14 12:01 ` [v3 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-10-14 12:01 ` [v3 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-14 12:01 ` [v3 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-14 12:01 ` [v3 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-14 12:01 ` [v3 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-14 12:01 ` [v3 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-14 12:01 ` [v3 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-14 12:01 ` [v3 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-14 12:01 ` [v3 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-14 12:01 ` [v3 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-14 12:01 ` [v3 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-14 12:01 ` [v3 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-14 12:01 ` [v3 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-10-14 12:01 ` [v3 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-14 12:01 ` [v3 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-14 12:01 ` [v3 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-14 12:01 ` [v3 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-14 12:01 ` [v3 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-14 12:01 ` [v3 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-14 12:01 ` [v3 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-15 2:32 ` Stephen Hemminger
2024-10-22 19:12 ` [v4 00/42] DPAA2 specific patches vanshika.shukla
2024-10-22 19:12 ` [v4 01/42] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-22 19:12 ` [v4 02/42] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-22 19:12 ` [v4 03/42] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-22 19:12 ` [v4 04/42] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-22 19:12 ` [v4 05/42] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-22 19:12 ` [v4 06/42] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-22 19:12 ` [v4 07/42] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-22 19:12 ` [v4 08/42] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-22 19:12 ` [v4 09/42] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-22 19:12 ` [v4 10/42] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-22 19:12 ` [v4 11/42] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-22 19:12 ` [v4 12/42] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-22 19:12 ` [v4 13/42] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-22 19:12 ` [v4 14/42] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-10-22 19:12 ` [v4 15/42] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-22 19:12 ` [v4 16/42] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-23 1:02 ` Stephen Hemminger
2024-10-22 19:12 ` [v4 17/42] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-22 19:12 ` [v4 18/42] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-22 19:12 ` [v4 19/42] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-22 19:12 ` [v4 20/42] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-22 19:12 ` [v4 21/42] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-22 19:12 ` [v4 22/42] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-22 19:12 ` [v4 23/42] net/dpaa2: flow API refactor vanshika.shukla
2024-10-23 0:52 ` Stephen Hemminger
2024-10-23 12:04 ` [EXT] " Vanshika Shukla
2024-10-22 19:12 ` [v4 24/42] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-22 19:12 ` [v4 25/42] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-22 19:12 ` [v4 26/42] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-22 19:12 ` [v4 27/42] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-22 19:12 ` [v4 28/42] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-22 19:12 ` [v4 29/42] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-22 19:12 ` [v4 30/42] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-22 19:12 ` [v4 31/42] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-22 19:12 ` [v4 32/42] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-22 19:12 ` [v4 33/42] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-22 19:12 ` [v4 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-22 19:12 ` [v4 35/42] net/dpaa2: support software taildrop vanshika.shukla
2024-10-22 19:12 ` [v4 36/42] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-22 19:12 ` [v4 37/42] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-22 19:12 ` [v4 38/42] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-22 19:12 ` [v4 39/42] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-22 19:12 ` [v4 40/42] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-22 19:12 ` [v4 41/42] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-22 19:12 ` [v4 42/42] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-23 11:59 ` [v5 00/42] DPAA2 specific patches vanshika.shukla
2024-10-23 11:59 ` [v5 01/42] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-23 11:59 ` [v5 02/42] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-23 11:59 ` [v5 03/42] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-23 11:59 ` [v5 04/42] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-23 11:59 ` [v5 05/42] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-23 11:59 ` [v5 06/42] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-23 11:59 ` [v5 07/42] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-23 11:59 ` [v5 08/42] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-23 11:59 ` [v5 09/42] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-23 11:59 ` [v5 10/42] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-23 11:59 ` [v5 11/42] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-23 11:59 ` [v5 12/42] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-23 11:59 ` [v5 13/42] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-23 11:59 ` [v5 14/42] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-11-09 17:07 ` Thomas Monjalon
2024-10-23 11:59 ` [v5 15/42] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-23 11:59 ` [v5 16/42] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-23 11:59 ` [v5 17/42] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-23 11:59 ` [v5 18/42] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-23 11:59 ` [v5 19/42] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-23 11:59 ` [v5 20/42] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-23 11:59 ` [v5 21/42] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-23 11:59 ` [v5 22/42] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-23 11:59 ` [v5 23/42] net/dpaa2: flow API refactor vanshika.shukla
2024-11-09 19:01 ` Thomas Monjalon
2024-10-23 11:59 ` [v5 24/42] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-23 11:59 ` [v5 25/42] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-23 11:59 ` [v5 26/42] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-23 11:59 ` [v5 27/42] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-23 11:59 ` [v5 28/42] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-23 11:59 ` [v5 29/42] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-23 11:59 ` [v5 30/42] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-23 11:59 ` [v5 31/42] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-23 11:59 ` [v5 32/42] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-23 11:59 ` [v5 33/42] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-23 11:59 ` [v5 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-23 11:59 ` [v5 35/42] net/dpaa2: support software taildrop vanshika.shukla
2024-10-23 11:59 ` [v5 36/42] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-23 11:59 ` [v5 37/42] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-23 11:59 ` [v5 38/42] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-23 11:59 ` [v5 39/42] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-23 11:59 ` [v5 40/42] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-23 11:59 ` [v5 41/42] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-23 11:59 ` [v5 42/42] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-11-07 11:24 ` [v5 00/42] DPAA2 specific patches Hemant Agrawal
2024-09-18 7:50 ` [v2 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-18 7:50 ` [v2 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-18 7:50 ` [v2 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-18 7:50 ` [v2 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-18 7:50 ` [v2 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-18 7:50 ` [v2 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-18 7:50 ` [v2 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-18 7:50 ` [v2 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-18 7:50 ` [v2 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-18 7:50 ` [v2 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-18 7:50 ` [v2 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-18 7:50 ` [v2 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-18 7:50 ` [v2 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-18 7:50 ` [v2 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-18 7:50 ` vanshika.shukla [this message]
2024-09-18 7:50 ` [v2 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-18 7:50 ` [v2 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-18 7:50 ` [v2 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-18 7:50 ` [v2 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-18 7:50 ` [v2 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-18 7:50 ` [v2 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-18 7:50 ` [v2 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-18 7:50 ` [v2 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-18 7:50 ` [v2 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-18 7:50 ` [v2 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-18 7:50 ` [v2 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-18 7:50 ` [v2 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-18 7:50 ` [v2 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-18 7:50 ` [v2 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-18 7:50 ` [v2 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-18 7:50 ` [v2 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-18 7:50 ` [v2 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-18 7:50 ` [v2 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-18 7:50 ` [v2 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-18 7:50 ` [v2 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-18 7:50 ` [v2 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-18 7:50 ` [v2 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-18 7:50 ` [v2 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-18 7:50 ` [v2 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-18 7:50 ` [v2 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-18 7:50 ` [v2 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-18 7:50 ` [v2 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-10 2:54 ` [v2 00/43] DPAA2 specific patches Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240918075056.1838654-17-vanshika.shukla@nxp.com \
--to=vanshika.shukla@nxp.com \
--cc=dev@dpdk.org \
--cc=g.singh@nxp.com \
--cc=hemant.agrawal@nxp.com \
--cc=jun.yang@nxp.com \
--cc=sachin.saxena@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).