DPDK patches and discussions
 help / color / mirror / Atom feed
From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
	Sachin Saxena <sachin.saxena@nxp.com>,
	Anatoly Burakov <anatoly.burakov@intel.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v2 14/43] bus/fslmc: enhance MC VFIO multiprocess support
Date: Wed, 18 Sep 2024 13:20:27 +0530	[thread overview]
Message-ID: <20240918075056.1838654-15-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20240918075056.1838654-1-vanshika.shukla@nxp.com>

From: Jun Yang <jun.yang@nxp.com>

MC VFIO is not registered into RTE VFIO. Primary process registers
MC vfio mp action for secondary process to request.
VFIO/Container handlers are provided via CMSG.
Primary process is responsible to connect MC VFIO group to container.

In addition, MC VFIO code is refactored according to container/group logic.
In general, VFIO container can support multiple groups per process.
Now we only support single MC group(dprc.x) per process, but we add
logic to support connecting multiple MC groups to container.

Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
 drivers/bus/fslmc/fslmc_bus.c  |  14 +-
 drivers/bus/fslmc/fslmc_vfio.c | 996 ++++++++++++++++++++++-----------
 drivers/bus/fslmc/fslmc_vfio.h |  35 +-
 drivers/bus/fslmc/version.map  |   1 +
 4 files changed, 694 insertions(+), 352 deletions(-)

diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index 7baadf99b9..654726dbe6 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -318,6 +318,7 @@ rte_fslmc_scan(void)
 	struct dirent *entry;
 	static int process_once;
 	int groupid;
+	char *group_name;
 
 	if (process_once) {
 		DPAA2_BUS_DEBUG("Fslmc bus already scanned. Not rescanning");
@@ -325,12 +326,19 @@ rte_fslmc_scan(void)
 	}
 	process_once = 1;
 
-	ret = fslmc_get_container_group(&groupid);
+	/* Now we only support single group per process.*/
+	group_name = getenv("DPRC");
+	if (!group_name) {
+		DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
+		return -EINVAL;
+	}
+
+	ret = fslmc_get_container_group(group_name, &groupid);
 	if (ret != 0)
 		goto scan_fail;
 
 	/* Scan devices on the group */
-	sprintf(fslmc_dirpath, "%s/%s", SYSFS_FSL_MC_DEVICES, fslmc_container);
+	sprintf(fslmc_dirpath, "%s/%s", SYSFS_FSL_MC_DEVICES, group_name);
 	dir = opendir(fslmc_dirpath);
 	if (!dir) {
 		DPAA2_BUS_ERR("Unable to open VFIO group directory");
@@ -338,7 +346,7 @@ rte_fslmc_scan(void)
 	}
 
 	/* Scan the DPRC container object */
-	ret = scan_one_fslmc_device(fslmc_container);
+	ret = scan_one_fslmc_device(group_name);
 	if (ret != 0) {
 		/* Error in parsing directory - exit gracefully */
 		goto scan_fail_cleanup;
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 1cc256f849..15d2930cf0 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -42,12 +42,14 @@
 
 #define FSLMC_CONTAINER_MAX_LEN 8 /**< Of the format dprc.XX */
 
-/* Number of VFIO containers & groups with in */
-static struct fslmc_vfio_group vfio_group;
-static struct fslmc_vfio_container vfio_container;
-static int container_device_fd;
-char *fslmc_container;
-static int fslmc_iommu_type;
+#define FSLMC_VFIO_MP "fslmc_vfio_mp_sync"
+
+/* Container is composed by multiple groups, however,
+ * now each process only supports single group with in container.
+ */
+static struct fslmc_vfio_container s_vfio_container;
+/* Currently we only support single group/process. */
+const char *fslmc_group; /* dprc.x*/
 static uint32_t *msi_intr_vaddr;
 void *(*rte_mcp_ptr_list);
 
@@ -72,108 +74,547 @@ rte_fslmc_object_register(struct rte_dpaa2_object *object)
 	TAILQ_INSERT_TAIL(&dpaa2_obj_list, object, next);
 }
 
-int
-fslmc_get_container_group(int *groupid)
+static const char *
+fslmc_vfio_get_group_name(void)
 {
-	int ret;
-	char *container;
+	return fslmc_group;
+}
+
+static void
+fslmc_vfio_set_group_name(const char *group_name)
+{
+	fslmc_group = group_name;
+}
+
+static int
+fslmc_vfio_add_group(int vfio_group_fd,
+	int iommu_group_num, const char *group_name)
+{
+	struct fslmc_vfio_group *group;
+
+	group = rte_zmalloc(NULL, sizeof(struct fslmc_vfio_group), 0);
+	if (!group)
+		return -ENOMEM;
+	group->fd = vfio_group_fd;
+	group->groupid = iommu_group_num;
+	strcpy(group->group_name, group_name);
+	if (rte_vfio_noiommu_is_enabled() > 0)
+		group->iommu_type = RTE_VFIO_NOIOMMU;
+	else
+		group->iommu_type = VFIO_TYPE1_IOMMU;
+	LIST_INSERT_HEAD(&s_vfio_container.groups, group, next);
+
+	return 0;
+}
+
+static int
+fslmc_vfio_clear_group(int vfio_group_fd)
+{
+	struct fslmc_vfio_group *group;
+	struct fslmc_vfio_device *dev;
+	int clear = 0;
+
+	LIST_FOREACH(group, &s_vfio_container.groups, next) {
+		if (group->fd == vfio_group_fd) {
+			LIST_FOREACH(dev, &group->vfio_devices, next)
+				LIST_REMOVE(dev, next);
+
+			close(vfio_group_fd);
+			LIST_REMOVE(group, next);
+			rte_free(group);
+			clear = 1;
 
-	if (!fslmc_container) {
-		container = getenv("DPRC");
-		if (container == NULL) {
-			DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
-			return -EINVAL;
+			break;
 		}
+	}
 
-		if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) {
-			DPAA2_BUS_ERR("Invalid container name: %s", container);
-			return -1;
+	if (LIST_EMPTY(&s_vfio_container.groups)) {
+		if (s_vfio_container.fd > 0)
+			close(s_vfio_container.fd);
+
+		s_vfio_container.fd = -1;
+	}
+	if (clear)
+		return 0;
+
+	return -ENODEV;
+}
+
+static int
+fslmc_vfio_connect_container(int vfio_group_fd)
+{
+	struct fslmc_vfio_group *group;
+
+	LIST_FOREACH(group, &s_vfio_container.groups, next) {
+		if (group->fd == vfio_group_fd) {
+			group->connected = 1;
+
+			return 0;
+		}
+	}
+
+	return -ENODEV;
+}
+
+static int
+fslmc_vfio_container_connected(int vfio_group_fd)
+{
+	struct fslmc_vfio_group *group;
+
+	LIST_FOREACH(group, &s_vfio_container.groups, next) {
+		if (group->fd == vfio_group_fd) {
+			if (group->connected)
+				return 1;
+		}
+	}
+	return 0;
+}
+
+static int
+fslmc_vfio_iommu_type(int vfio_group_fd)
+{
+	struct fslmc_vfio_group *group;
+
+	LIST_FOREACH(group, &s_vfio_container.groups, next) {
+		if (group->fd == vfio_group_fd)
+			return group->iommu_type;
+	}
+	return -ENODEV;
+}
+
+static int
+fslmc_vfio_group_fd_by_name(const char *group_name)
+{
+	struct fslmc_vfio_group *group;
+
+	LIST_FOREACH(group, &s_vfio_container.groups, next) {
+		if (!strcmp(group->group_name, group_name))
+			return group->fd;
+	}
+	return -ENODEV;
+}
+
+static int
+fslmc_vfio_group_fd_by_id(int group_id)
+{
+	struct fslmc_vfio_group *group;
+
+	LIST_FOREACH(group, &s_vfio_container.groups, next) {
+		if (group->groupid == group_id)
+			return group->fd;
+	}
+	return -ENODEV;
+}
+
+static int
+fslmc_vfio_group_add_dev(int vfio_group_fd,
+	int dev_fd, const char *name)
+{
+	struct fslmc_vfio_group *group;
+	struct fslmc_vfio_device *dev;
+
+	LIST_FOREACH(group, &s_vfio_container.groups, next) {
+		if (group->fd == vfio_group_fd) {
+			dev = rte_zmalloc(NULL,
+				sizeof(struct fslmc_vfio_device), 0);
+			dev->fd = dev_fd;
+			strcpy(dev->dev_name, name);
+			LIST_INSERT_HEAD(&group->vfio_devices, dev, next);
+			return 0;
 		}
+	}
+	return -ENODEV;
+}
 
-		fslmc_container = strdup(container);
-		if (!fslmc_container) {
-			DPAA2_BUS_ERR("Mem alloc failure; Container name");
-			return -ENOMEM;
+static int
+fslmc_vfio_group_remove_dev(int vfio_group_fd,
+	const char *name)
+{
+	struct fslmc_vfio_group *group = NULL;
+	struct fslmc_vfio_device *dev;
+	int removed = 0;
+
+	LIST_FOREACH(group, &s_vfio_container.groups, next) {
+		if (group->fd == vfio_group_fd)
+			break;
+	}
+
+	if (group) {
+		LIST_FOREACH(dev, &group->vfio_devices, next) {
+			if (!strcmp(dev->dev_name, name)) {
+				LIST_REMOVE(dev, next);
+				removed = 1;
+				break;
+			}
 		}
 	}
 
-	fslmc_iommu_type = (rte_vfio_noiommu_is_enabled() == 1) ?
-		RTE_VFIO_NOIOMMU : VFIO_TYPE1_IOMMU;
+	if (removed)
+		return 0;
+
+	return -ENODEV;
+}
+
+static int
+fslmc_vfio_container_fd(void)
+{
+	return s_vfio_container.fd;
+}
+
+static int
+fslmc_get_group_id(const char *group_name,
+	int *groupid)
+{
+	int ret;
 
 	/* get group number */
 	ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
-				     fslmc_container, groupid);
+			group_name, groupid);
 	if (ret <= 0) {
-		DPAA2_BUS_ERR("Unable to find %s IOMMU group", fslmc_container);
-		return -1;
+		DPAA2_BUS_ERR("Unable to find %s IOMMU group", group_name);
+		if (ret < 0)
+			return ret;
+
+		return -EIO;
 	}
 
-	DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d",
-			fslmc_container, *groupid);
+	DPAA2_BUS_DEBUG("GROUP(%s) has VFIO iommu group id = %d",
+		group_name, *groupid);
 
 	return 0;
 }
 
 static int
-vfio_connect_container(void)
+fslmc_vfio_open_group_fd(const char *group_name)
 {
-	int fd, ret;
+	int vfio_group_fd;
+	char filename[PATH_MAX];
+	struct rte_mp_msg mp_req, *mp_rep;
+	struct rte_mp_reply mp_reply = {0};
+	struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+	struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
+	int iommu_group_num, ret;
 
-	if (vfio_container.used) {
-		DPAA2_BUS_DEBUG("No container available");
-		return -1;
+	vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name);
+	if (vfio_group_fd > 0)
+		return vfio_group_fd;
+
+	ret = fslmc_get_group_id(group_name, &iommu_group_num);
+	if (ret)
+		return ret;
+	/* if primary, try to open the group */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		/* try regular group format */
+		snprintf(filename, sizeof(filename),
+			VFIO_GROUP_FMT, iommu_group_num);
+		vfio_group_fd = open(filename, O_RDWR);
+
+		goto add_vfio_group;
+	}
+	/* if we're in a secondary process, request group fd from the primary
+	 * process via mp channel.
+	 */
+	p->req = SOCKET_REQ_GROUP;
+	p->group_num = iommu_group_num;
+	strcpy(mp_req.name, FSLMC_VFIO_MP);
+	mp_req.len_param = sizeof(*p);
+	mp_req.num_fds = 0;
+
+	vfio_group_fd = -1;
+	if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
+	    mp_reply.nb_received == 1) {
+		mp_rep = &mp_reply.msgs[0];
+		p = (struct vfio_mp_param *)mp_rep->param;
+		if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
+			vfio_group_fd = mp_rep->fds[0];
+		} else if (p->result == SOCKET_NO_FD) {
+			DPAA2_BUS_ERR("Bad VFIO group fd");
+			vfio_group_fd = 0;
+		}
 	}
 
-	/* Try connecting to vfio container if already created */
-	if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER,
-		&vfio_container.fd)) {
-		DPAA2_BUS_DEBUG(
-		    "Container pre-exists with FD[0x%x] for this group",
-		    vfio_container.fd);
-		vfio_group.container = &vfio_container;
+	free(mp_reply.msgs);
+
+add_vfio_group:
+	if (vfio_group_fd <= 0) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			DPAA2_BUS_ERR("Open VFIO group(%s) failed(%d)",
+				filename, vfio_group_fd);
+		} else {
+			DPAA2_BUS_ERR("Cannot request group fd(%d)",
+				vfio_group_fd);
+		}
+	} else {
+		ret = fslmc_vfio_add_group(vfio_group_fd, iommu_group_num,
+			group_name);
+		if (ret)
+			return ret;
+	}
+
+	return vfio_group_fd;
+}
+
+static int
+fslmc_vfio_check_extensions(int vfio_container_fd)
+{
+	int ret;
+	uint32_t idx, n_extensions = 0;
+	static const int type_id[] = {RTE_VFIO_TYPE1, RTE_VFIO_SPAPR,
+		RTE_VFIO_NOIOMMU};
+	static const char * const type_id_nm[] = {"Type 1",
+		"sPAPR", "No-IOMMU"};
+
+	for (idx = 0; idx < RTE_DIM(type_id); idx++) {
+		ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
+			type_id[idx]);
+		if (ret < 0) {
+			DPAA2_BUS_ERR("Could not get IOMMU type, error %i (%s)",
+				errno, strerror(errno));
+			close(vfio_container_fd);
+			return -errno;
+		} else if (ret == 1) {
+			/* we found a supported extension */
+			n_extensions++;
+		}
+		DPAA2_BUS_DEBUG("IOMMU type %d (%s) is %s",
+			type_id[idx], type_id_nm[idx],
+			ret ? "supported" : "not supported");
+	}
+
+	/* if we didn't find any supported IOMMU types, fail */
+	if (!n_extensions) {
+		close(vfio_container_fd);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int
+fslmc_vfio_open_container_fd(void)
+{
+	int ret, vfio_container_fd;
+	struct rte_mp_msg mp_req, *mp_rep;
+	struct rte_mp_reply mp_reply = {0};
+	struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+	struct vfio_mp_param *p = (void *)mp_req.param;
+
+	if (fslmc_vfio_container_fd() > 0)
+		return fslmc_vfio_container_fd();
+
+	/* if we're in a primary process, try to open the container */
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
+		if (vfio_container_fd < 0) {
+			DPAA2_BUS_ERR("Cannot open VFIO container(%s), err(%d)",
+				VFIO_CONTAINER_PATH, vfio_container_fd);
+			ret = vfio_container_fd;
+			goto err_exit;
+		}
+
+		/* check VFIO API version */
+		ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
+		if (ret < 0) {
+			DPAA2_BUS_ERR("Could not get VFIO API version(%d)",
+				ret);
+		} else if (ret != VFIO_API_VERSION) {
+			DPAA2_BUS_ERR("Unsupported VFIO API version(%d)",
+				ret);
+			ret = -ENOTSUP;
+		}
+		if (ret < 0) {
+			close(vfio_container_fd);
+			goto err_exit;
+		}
+
+		ret = fslmc_vfio_check_extensions(vfio_container_fd);
+		if (ret) {
+			DPAA2_BUS_ERR("No supported IOMMU extensions found(%d)",
+				ret);
+			close(vfio_container_fd);
+			goto err_exit;
+		}
+
+		goto success_exit;
+	}
+	/*
+	 * if we're in a secondary process, request container fd from the
+	 * primary process via mp channel
+	 */
+	p->req = SOCKET_REQ_CONTAINER;
+	strcpy(mp_req.name, FSLMC_VFIO_MP);
+	mp_req.len_param = sizeof(*p);
+	mp_req.num_fds = 0;
+
+	vfio_container_fd = -1;
+	ret = rte_mp_request_sync(&mp_req, &mp_reply, &ts);
+	if (ret)
+		goto err_exit;
+
+	if (mp_reply.nb_received != 1) {
+		ret = -EIO;
+		goto err_exit;
+	}
+
+	mp_rep = &mp_reply.msgs[0];
+	p = (void *)mp_rep->param;
+	if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
+		vfio_container_fd = mp_rep->fds[0];
+		free(mp_reply.msgs);
+	}
+
+success_exit:
+	s_vfio_container.fd = vfio_container_fd;
+
+	return vfio_container_fd;
+
+err_exit:
+	if (mp_reply.msgs)
+		free(mp_reply.msgs);
+	DPAA2_BUS_ERR("Cannot request container fd err(%d)", ret);
+	return ret;
+}
+
+int
+fslmc_get_container_group(const char *group_name,
+	int *groupid)
+{
+	int ret;
+
+	if (!group_name) {
+		DPAA2_BUS_ERR("No group name provided!");
+
+		return -EINVAL;
+	}
+	ret = fslmc_get_group_id(group_name, groupid);
+	if (ret)
+		return ret;
+
+	fslmc_vfio_set_group_name(group_name);
+
+	return 0;
+}
+
+static int
+fslmc_vfio_mp_primary(const struct rte_mp_msg *msg,
+	const void *peer)
+{
+	int fd = -1;
+	int ret;
+	struct rte_mp_msg reply;
+	struct vfio_mp_param *r = (void *)reply.param;
+	const struct vfio_mp_param *m = (const void *)msg->param;
+
+	if (msg->len_param != sizeof(*m)) {
+		DPAA2_BUS_ERR("fslmc vfio received invalid message!");
+		return -EINVAL;
+	}
+
+	memset(&reply, 0, sizeof(reply));
+
+	switch (m->req) {
+	case SOCKET_REQ_GROUP:
+		r->req = SOCKET_REQ_GROUP;
+		r->group_num = m->group_num;
+		fd = fslmc_vfio_group_fd_by_id(m->group_num);
+		if (fd < 0) {
+			r->result = SOCKET_ERR;
+		} else if (!fd) {
+			/* if group exists but isn't bound to VFIO driver */
+			r->result = SOCKET_NO_FD;
+		} else {
+			/* if group exists and is bound to VFIO driver */
+			r->result = SOCKET_OK;
+			reply.num_fds = 1;
+			reply.fds[0] = fd;
+		}
+		break;
+	case SOCKET_REQ_CONTAINER:
+		r->req = SOCKET_REQ_CONTAINER;
+		fd = fslmc_vfio_container_fd();
+		if (fd <= 0) {
+			r->result = SOCKET_ERR;
+		} else {
+			r->result = SOCKET_OK;
+			reply.num_fds = 1;
+			reply.fds[0] = fd;
+		}
+		break;
+	default:
+		DPAA2_BUS_ERR("fslmc vfio received invalid message(%08x)",
+			m->req);
+		return -ENOTSUP;
+	}
+
+	strcpy(reply.name, FSLMC_VFIO_MP);
+	reply.len_param = sizeof(*r);
+	ret = rte_mp_reply(&reply, peer);
+
+	return ret;
+}
+
+static int
+fslmc_vfio_mp_sync_setup(void)
+{
+	int ret;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		ret = rte_mp_action_register(FSLMC_VFIO_MP,
+			fslmc_vfio_mp_primary);
+		if (ret && rte_errno != ENOTSUP)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+vfio_connect_container(int vfio_container_fd,
+	int vfio_group_fd)
+{
+	int ret;
+	int iommu_type;
+
+	if (fslmc_vfio_container_connected(vfio_group_fd)) {
+		DPAA2_BUS_WARN("VFIO FD(%d) has connected to container",
+			vfio_group_fd);
 		return 0;
 	}
 
-	/* Opens main vfio file descriptor which represents the "container" */
-	fd = rte_vfio_get_container_fd();
-	if (fd < 0) {
-		DPAA2_BUS_ERR("Failed to open VFIO container");
-		return -errno;
+	iommu_type = fslmc_vfio_iommu_type(vfio_group_fd);
+	if (iommu_type < 0) {
+		DPAA2_BUS_ERR("Failed to get iommu type(%d)",
+			iommu_type);
+
+		return iommu_type;
 	}
 
 	/* Check whether support for SMMU type IOMMU present or not */
-	if (ioctl(fd, VFIO_CHECK_EXTENSION, fslmc_iommu_type)) {
+	if (ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, iommu_type)) {
 		/* Connect group to container */
-		ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd);
+		ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
+			&vfio_container_fd);
 		if (ret) {
 			DPAA2_BUS_ERR("Failed to setup group container");
-			close(fd);
 			return -errno;
 		}
 
-		ret = ioctl(fd, VFIO_SET_IOMMU, fslmc_iommu_type);
+		ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU, iommu_type);
 		if (ret) {
 			DPAA2_BUS_ERR("Failed to setup VFIO iommu");
-			close(fd);
 			return -errno;
 		}
 	} else {
 		DPAA2_BUS_ERR("No supported IOMMU available");
-		close(fd);
 		return -EINVAL;
 	}
 
-	vfio_container.used = 1;
-	vfio_container.fd = fd;
-	vfio_container.group = &vfio_group;
-	vfio_group.container = &vfio_container;
-
-	return 0;
+	return fslmc_vfio_connect_container(vfio_group_fd);
 }
 
-static int vfio_map_irq_region(struct fslmc_vfio_group *group)
+static int vfio_map_irq_region(void)
 {
-	int ret;
+	int ret, fd;
 	unsigned long *vaddr = NULL;
 	struct vfio_iommu_type1_dma_map map = {
 		.argsz = sizeof(map),
@@ -182,9 +623,23 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group)
 		.iova = 0x6030000,
 		.size = 0x1000,
 	};
+	const char *group_name = fslmc_vfio_get_group_name();
+
+	fd = fslmc_vfio_group_fd_by_name(group_name);
+	if (fd <= 0) {
+		DPAA2_BUS_ERR("%s failed to open group fd(%d)",
+			__func__, fd);
+		if (fd < 0)
+			return fd;
+		return -rte_errno;
+	}
+	if (!fslmc_vfio_container_connected(fd)) {
+		DPAA2_BUS_ERR("Container is not connected");
+		return -EIO;
+	}
 
 	vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
-		PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
+		PROT_READ, MAP_SHARED, fd, 0x6030000);
 	if (vaddr == MAP_FAILED) {
 		DPAA2_BUS_INFO("Unable to map region (errno = %d)", errno);
 		return -errno;
@@ -192,8 +647,8 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group)
 
 	msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
 	map.vaddr = (unsigned long)vaddr;
-	ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map);
-	if (ret == 0)
+	ret = ioctl(fslmc_vfio_container_fd(), VFIO_IOMMU_MAP_DMA, &map);
+	if (!ret)
 		return 0;
 
 	DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno);
@@ -204,8 +659,8 @@ static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
 static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
 
 static void
-fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
-		void *arg __rte_unused)
+fslmc_memevent_cb(enum rte_mem_event type, const void *addr,
+	size_t len, void *arg __rte_unused)
 {
 	struct rte_memseg_list *msl;
 	struct rte_memseg *ms;
@@ -262,44 +717,54 @@ fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
 }
 
 static int
-fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len)
+fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr,
+	size_t len)
 {
-	struct fslmc_vfio_group *group;
 	struct vfio_iommu_type1_dma_map dma_map = {
 		.argsz = sizeof(struct vfio_iommu_type1_dma_map),
 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
 	};
-	int ret;
-
-	if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
+	int ret, fd;
+	const char *group_name = fslmc_vfio_get_group_name();
+
+	fd = fslmc_vfio_group_fd_by_name(group_name);
+	if (fd <= 0) {
+		DPAA2_BUS_ERR("%s failed to open group fd(%d)",
+			__func__, fd);
+		if (fd < 0)
+			return fd;
+		return -rte_errno;
+	}
+	if (fslmc_vfio_iommu_type(fd) == RTE_VFIO_NOIOMMU) {
 		DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
 		return 0;
 	}
 
 	dma_map.size = len;
 	dma_map.vaddr = vaddr;
-
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
 	dma_map.iova = iovaddr;
-#else
-	dma_map.iova = dma_map.vaddr;
+
+#ifndef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+	if (vaddr != iovaddr) {
+		DPAA2_BUS_WARN("vaddr(0x%lx) != iovaddr(0x%lx)",
+			vaddr, iovaddr);
+	}
 #endif
 
 	/* SET DMA MAP for IOMMU */
-	group = &vfio_group;
-
-	if (!group->container) {
+	if (!fslmc_vfio_container_connected(fd)) {
 		DPAA2_BUS_ERR("Container is not connected ");
-		return -1;
+		return -EIO;
 	}
 
 	DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
 			(uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
-	ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+	ret = ioctl(fslmc_vfio_container_fd(), VFIO_IOMMU_MAP_DMA,
+		&dma_map);
 	if (ret) {
 		DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
 				errno);
-		return -1;
+		return ret;
 	}
 
 	return 0;
@@ -308,14 +773,22 @@ fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len)
 static int
 fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
 {
-	struct fslmc_vfio_group *group;
 	struct vfio_iommu_type1_dma_unmap dma_unmap = {
 		.argsz = sizeof(struct vfio_iommu_type1_dma_unmap),
 		.flags = 0,
 	};
-	int ret;
-
-	if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
+	int ret, fd;
+	const char *group_name = fslmc_vfio_get_group_name();
+
+	fd = fslmc_vfio_group_fd_by_name(group_name);
+	if (fd <= 0) {
+		DPAA2_BUS_ERR("%s failed to open group fd(%d)",
+			__func__, fd);
+		if (fd < 0)
+			return fd;
+		return -rte_errno;
+	}
+	if (fslmc_vfio_iommu_type(fd) == RTE_VFIO_NOIOMMU) {
 		DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
 		return 0;
 	}
@@ -324,16 +797,15 @@ fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
 	dma_unmap.iova = vaddr;
 
 	/* SET DMA MAP for IOMMU */
-	group = &vfio_group;
-
-	if (!group->container) {
+	if (!fslmc_vfio_container_connected(fd)) {
 		DPAA2_BUS_ERR("Container is not connected ");
-		return -1;
+		return -EIO;
 	}
 
 	DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
 			(uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
-	ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
+	ret = ioctl(fslmc_vfio_container_fd(), VFIO_IOMMU_UNMAP_DMA,
+		&dma_unmap);
 	if (ret) {
 		DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
 				errno);
@@ -367,41 +839,13 @@ fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
 int
 rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size)
 {
-	int ret;
-	struct fslmc_vfio_group *group;
-	struct vfio_iommu_type1_dma_map dma_map = {
-		.argsz = sizeof(struct vfio_iommu_type1_dma_map),
-		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
-	};
-
-	if (fslmc_iommu_type == RTE_VFIO_NOIOMMU) {
-		DPAA2_BUS_DEBUG("Running in NOIOMMU mode");
-		return 0;
-	}
-
-	/* SET DMA MAP for IOMMU */
-	group = &vfio_group;
-	if (!group->container) {
-		DPAA2_BUS_ERR("Container is not connected");
-		return -1;
-	}
-
-	dma_map.size = size;
-	dma_map.vaddr = vaddr;
-	dma_map.iova = iova;
-
-	DPAA2_BUS_DEBUG("VFIOdmamap 0x%"PRIx64":0x%"PRIx64",size 0x%"PRIx64"\n",
-			(uint64_t)dma_map.vaddr, (uint64_t)dma_map.iova,
-			(uint64_t)dma_map.size);
-	ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
-		    &dma_map);
-	if (ret) {
-		DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)",
-			errno);
-		return ret;
-	}
+	return fslmc_map_dma(vaddr, iova, size);
+}
 
-	return 0;
+int
+rte_fslmc_vfio_mem_dmaunmap(uint64_t iova, uint64_t size)
+{
+	return fslmc_unmap_dma(iova, 0, size);
 }
 
 int rte_fslmc_vfio_dmamap(void)
@@ -431,7 +875,7 @@ int rte_fslmc_vfio_dmamap(void)
 	 * the interrupt region to SMMU. This should be removed once the
 	 * support is added in the Kernel.
 	 */
-	vfio_map_irq_region(&vfio_group);
+	vfio_map_irq_region();
 
 	/* Existing segments have been mapped and memory callback for hotplug
 	 * has been installed.
@@ -442,149 +886,19 @@ int rte_fslmc_vfio_dmamap(void)
 }
 
 static int
-fslmc_vfio_open_group_fd(int iommu_group_num)
-{
-	int vfio_group_fd;
-	char filename[PATH_MAX];
-	struct rte_mp_msg mp_req, *mp_rep;
-	struct rte_mp_reply mp_reply = {0};
-	struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
-	struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
-
-	/* if primary, try to open the group */
-	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		/* try regular group format */
-		snprintf(filename, sizeof(filename),
-			VFIO_GROUP_FMT, iommu_group_num);
-		vfio_group_fd = open(filename, O_RDWR);
-		if (vfio_group_fd <= 0) {
-			DPAA2_BUS_ERR("Open VFIO group(%s) failed(%d)",
-				filename, vfio_group_fd);
-		}
-
-		return vfio_group_fd;
-	}
-	/* if we're in a secondary process, request group fd from the primary
-	 * process via mp channel.
-	 */
-	p->req = SOCKET_REQ_GROUP;
-	p->group_num = iommu_group_num;
-	strcpy(mp_req.name, EAL_VFIO_MP);
-	mp_req.len_param = sizeof(*p);
-	mp_req.num_fds = 0;
-
-	vfio_group_fd = -1;
-	if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
-	    mp_reply.nb_received == 1) {
-		mp_rep = &mp_reply.msgs[0];
-		p = (struct vfio_mp_param *)mp_rep->param;
-		if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
-			vfio_group_fd = mp_rep->fds[0];
-		} else if (p->result == SOCKET_NO_FD) {
-			DPAA2_BUS_ERR("Bad VFIO group fd");
-			vfio_group_fd = 0;
-		}
-	}
-
-	free(mp_reply.msgs);
-	if (vfio_group_fd < 0) {
-		DPAA2_BUS_ERR("Cannot request group fd(%d)",
-			vfio_group_fd);
-	}
-	return vfio_group_fd;
-}
-
-static int
-fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
-		int *vfio_dev_fd, struct vfio_device_info *device_info)
+fslmc_vfio_setup_device(const char *dev_addr,
+	int *vfio_dev_fd, struct vfio_device_info *device_info)
 {
 	struct vfio_group_status group_status = {
 			.argsz = sizeof(group_status)
 	};
-	int vfio_group_fd, vfio_container_fd, iommu_group_no, ret;
+	int vfio_group_fd, ret;
+	const char *group_name = fslmc_vfio_get_group_name();
 
-	/* get group number */
-	ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_no);
-	if (ret < 0)
-		return -1;
-
-	/* get the actual group fd */
-	vfio_group_fd = vfio_group.fd;
-	if (vfio_group_fd < 0 && vfio_group_fd != -ENOENT)
-		return -1;
-
-	/*
-	 * if vfio_group_fd == -ENOENT, that means the device
-	 * isn't managed by VFIO
-	 */
-	if (vfio_group_fd == -ENOENT) {
-		DPAA2_BUS_WARN(" %s not managed by VFIO driver, skipping",
-				dev_addr);
-		return 1;
-	}
-
-	/* Opens main vfio file descriptor which represents the "container" */
-	vfio_container_fd = rte_vfio_get_container_fd();
-	if (vfio_container_fd < 0) {
-		DPAA2_BUS_ERR("Failed to open VFIO container");
-		return -errno;
-	}
-
-	/* check if the group is viable */
-	ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
-	if (ret) {
-		DPAA2_BUS_ERR("  %s cannot get group status, "
-				"error %i (%s)\n", dev_addr,
-				errno, strerror(errno));
-		close(vfio_group_fd);
-		rte_vfio_clear_group(vfio_group_fd);
-		return -1;
-	} else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
-		DPAA2_BUS_ERR("  %s VFIO group is not viable!\n", dev_addr);
-		close(vfio_group_fd);
-		rte_vfio_clear_group(vfio_group_fd);
-		return -1;
-	}
-	/* At this point, we know that this group is viable (meaning,
-	 * all devices are either bound to VFIO or not bound to anything)
-	 */
-
-	/* check if group does not have a container yet */
-	if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
-
-		/* add group to a container */
-		ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
-				&vfio_container_fd);
-		if (ret) {
-			DPAA2_BUS_ERR("  %s cannot add VFIO group to container, "
-					"error %i (%s)\n", dev_addr,
-					errno, strerror(errno));
-			close(vfio_group_fd);
-			close(vfio_container_fd);
-			rte_vfio_clear_group(vfio_group_fd);
-			return -1;
-		}
-
-		/*
-		 * set an IOMMU type for container
-		 *
-		 */
-		if (ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
-			  fslmc_iommu_type)) {
-			ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
-				    fslmc_iommu_type);
-			if (ret) {
-				DPAA2_BUS_ERR("Failed to setup VFIO iommu");
-				close(vfio_group_fd);
-				close(vfio_container_fd);
-				return -errno;
-			}
-		} else {
-			DPAA2_BUS_ERR("No supported IOMMU available");
-			close(vfio_group_fd);
-			close(vfio_container_fd);
-			return -EINVAL;
-		}
+	vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name);
+	if (!fslmc_vfio_container_connected(vfio_group_fd)) {
+		DPAA2_BUS_ERR("Container is not connected");
+		return -EIO;
 	}
 
 	/* get a file descriptor for the device */
@@ -594,26 +908,21 @@ fslmc_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
 		 * the VFIO group or the container not having IOMMU configured.
 		 */
 
-		DPAA2_BUS_WARN("Getting a vfio_dev_fd for %s failed", dev_addr);
-		close(vfio_group_fd);
-		close(vfio_container_fd);
-		rte_vfio_clear_group(vfio_group_fd);
-		return -1;
+		DPAA2_BUS_ERR("Getting a vfio_dev_fd for %s from %s failed",
+			dev_addr, group_name);
+		return -EIO;
 	}
 
 	/* test and setup the device */
 	ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
 	if (ret) {
-		DPAA2_BUS_ERR("  %s cannot get device info, error %i (%s)",
-				dev_addr, errno, strerror(errno));
-		close(*vfio_dev_fd);
-		close(vfio_group_fd);
-		close(vfio_container_fd);
-		rte_vfio_clear_group(vfio_group_fd);
-		return -1;
+		DPAA2_BUS_ERR("%s cannot get device info err(%d)(%s)",
+			dev_addr, errno, strerror(errno));
+		return ret;
 	}
 
-	return 0;
+	return fslmc_vfio_group_add_dev(vfio_group_fd, *vfio_dev_fd,
+			dev_addr);
 }
 
 static intptr_t vfio_map_mcp_obj(const char *mcp_obj)
@@ -625,8 +934,7 @@ static intptr_t vfio_map_mcp_obj(const char *mcp_obj)
 	struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
 	struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
 
-	fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, mcp_obj,
-			&mc_fd, &d_info);
+	fslmc_vfio_setup_device(mcp_obj, &mc_fd, &d_info);
 
 	/* getting device region info*/
 	ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
@@ -757,7 +1065,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
 }
 
 static void
-fslmc_close_iodevices(struct rte_dpaa2_device *dev)
+fslmc_close_iodevices(struct rte_dpaa2_device *dev,
+	int vfio_fd)
 {
 	struct rte_dpaa2_object *object = NULL;
 	struct rte_dpaa2_driver *drv;
@@ -800,6 +1109,11 @@ fslmc_close_iodevices(struct rte_dpaa2_device *dev)
 		break;
 	}
 
+	ret = fslmc_vfio_group_remove_dev(vfio_fd, dev->device.name);
+	if (ret) {
+		DPAA2_BUS_ERR("Failed to remove %s from vfio",
+			dev->device.name);
+	}
 	DPAA2_BUS_LOG(DEBUG, "Device (%s) Closed",
 		      dev->device.name);
 }
@@ -811,17 +1125,21 @@ fslmc_close_iodevices(struct rte_dpaa2_device *dev)
 static int
 fslmc_process_iodevices(struct rte_dpaa2_device *dev)
 {
-	int dev_fd;
+	int dev_fd, ret;
 	struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
 	struct rte_dpaa2_object *object = NULL;
 
-	fslmc_vfio_setup_device(SYSFS_FSL_MC_DEVICES, dev->device.name,
-			&dev_fd, &device_info);
+	ret = fslmc_vfio_setup_device(dev->device.name, &dev_fd,
+			&device_info);
+	if (ret)
+		return ret;
 
 	switch (dev->dev_type) {
 	case DPAA2_ETH:
-		rte_dpaa2_vfio_setup_intr(dev->intr_handle, dev_fd,
-					  device_info.num_irqs);
+		ret = rte_dpaa2_vfio_setup_intr(dev->intr_handle, dev_fd,
+				device_info.num_irqs);
+		if (ret)
+			return ret;
 		break;
 	case DPAA2_CON:
 	case DPAA2_IO:
@@ -913,6 +1231,10 @@ int
 fslmc_vfio_close_group(void)
 {
 	struct rte_dpaa2_device *dev, *dev_temp;
+	int vfio_group_fd;
+	const char *group_name = fslmc_vfio_get_group_name();
+
+	vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name);
 
 	RTE_TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
 		if (dev->device.devargs &&
@@ -927,7 +1249,7 @@ fslmc_vfio_close_group(void)
 		case DPAA2_CRYPTO:
 		case DPAA2_QDMA:
 		case DPAA2_IO:
-			fslmc_close_iodevices(dev);
+			fslmc_close_iodevices(dev, vfio_group_fd);
 			break;
 		case DPAA2_CON:
 		case DPAA2_CI:
@@ -936,7 +1258,7 @@ fslmc_vfio_close_group(void)
 			if (rte_eal_process_type() == RTE_PROC_SECONDARY)
 				continue;
 
-			fslmc_close_iodevices(dev);
+			fslmc_close_iodevices(dev, vfio_group_fd);
 			break;
 		case DPAA2_DPRTC:
 		default:
@@ -945,10 +1267,7 @@ fslmc_vfio_close_group(void)
 		}
 	}
 
-	if (vfio_group.fd > 0) {
-		close(vfio_group.fd);
-		vfio_group.fd = 0;
-	}
+	fslmc_vfio_clear_group(vfio_group_fd);
 
 	return 0;
 }
@@ -1138,75 +1457,84 @@ fslmc_vfio_process_group(void)
 int
 fslmc_vfio_setup_group(void)
 {
-	int groupid;
-	int ret;
+	int vfio_group_fd, vfio_container_fd, ret;
 	struct vfio_group_status status = { .argsz = sizeof(status) };
+	const char *group_name = fslmc_vfio_get_group_name();
+
+	/* MC VFIO setup entry */
+	vfio_container_fd = fslmc_vfio_container_fd();
+	if (vfio_container_fd <= 0) {
+		vfio_container_fd = fslmc_vfio_open_container_fd();
+		if (vfio_container_fd <= 0) {
+			DPAA2_BUS_ERR("Failed to create MC VFIO container");
+			return -rte_errno;
+		}
+	}
 
-	/* if already done once */
-	if (container_device_fd)
-		return 0;
-
-	ret = fslmc_get_container_group(&groupid);
-	if (ret)
-		return ret;
-
-	/* In case this group was already opened, continue without any
-	 * processing.
-	 */
-	if (vfio_group.groupid == groupid) {
-		DPAA2_BUS_ERR("groupid already exists %d", groupid);
-		return 0;
+	if (!group_name) {
+		DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
+		return -EINVAL;
 	}
 
-	/* Get the actual group fd */
-	ret = fslmc_vfio_open_group_fd(groupid);
-	if (ret <= 0)
-		return ret;
-	vfio_group.fd = ret;
+	vfio_group_fd = fslmc_vfio_group_fd_by_name(group_name);
+	if (vfio_group_fd <= 0) {
+		vfio_group_fd = fslmc_vfio_open_group_fd(group_name);
+		if (vfio_group_fd <= 0) {
+			DPAA2_BUS_ERR("Failed to create MC VFIO group");
+			return -rte_errno;
+		}
+	}
 
 	/* Check group viability */
-	ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status);
+	ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &status);
 	if (ret) {
-		DPAA2_BUS_ERR("VFIO error getting group status");
-		close(vfio_group.fd);
-		vfio_group.fd = 0;
+		DPAA2_BUS_ERR("VFIO(%s:fd=%d) error getting group status(%d)",
+			group_name, vfio_group_fd, ret);
+		fslmc_vfio_clear_group(vfio_group_fd);
 		return ret;
 	}
 
 	if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
 		DPAA2_BUS_ERR("VFIO group not viable");
-		close(vfio_group.fd);
-		vfio_group.fd = 0;
+		fslmc_vfio_clear_group(vfio_group_fd);
 		return -EPERM;
 	}
-	/* Since Group is VIABLE, Store the groupid */
-	vfio_group.groupid = groupid;
 
 	/* check if group does not have a container yet */
 	if (!(status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
 		/* Now connect this IOMMU group to given container */
-		ret = vfio_connect_container();
-		if (ret) {
-			DPAA2_BUS_ERR("vfio group(%d) connect failed(%d)",
-				groupid, ret);
-			close(vfio_group.fd);
-			vfio_group.fd = 0;
-			return ret;
-		}
+		ret = vfio_connect_container(vfio_container_fd,
+			vfio_group_fd);
+	} else {
+		/* Here is supposed in secondary process,
+		 * group has been set to container in primary process.
+		 */
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			DPAA2_BUS_WARN("This group has been set container?");
+		ret = fslmc_vfio_connect_container(vfio_group_fd);
+	}
+	if (ret) {
+		DPAA2_BUS_ERR("vfio group connect failed(%d)", ret);
+		fslmc_vfio_clear_group(vfio_group_fd);
+		return ret;
 	}
 
 	/* Get Device information */
-	ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, fslmc_container);
+	ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, group_name);
 	if (ret < 0) {
-		DPAA2_BUS_ERR("Error getting device %s fd from group %d",
-			      fslmc_container, vfio_group.groupid);
-		close(vfio_group.fd);
-		vfio_group.fd = 0;
+		DPAA2_BUS_ERR("Error getting device %s fd", group_name);
+		fslmc_vfio_clear_group(vfio_group_fd);
+		return ret;
+	}
+
+	ret = fslmc_vfio_mp_sync_setup();
+	if (ret) {
+		DPAA2_BUS_ERR("VFIO MP sync setup failed!");
+		fslmc_vfio_clear_group(vfio_group_fd);
 		return ret;
 	}
-	container_device_fd = ret;
-	DPAA2_BUS_DEBUG("VFIO Container FD is [0x%X]",
-			container_device_fd);
+
+	DPAA2_BUS_DEBUG("VFIO GROUP FD is %d", vfio_group_fd);
 
 	return 0;
 }
diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h
index b6677bdd18..1695b6c078 100644
--- a/drivers/bus/fslmc/fslmc_vfio.h
+++ b/drivers/bus/fslmc/fslmc_vfio.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016,2019-2020 NXP
+ *   Copyright 2016,2019-2023 NXP
  *
  */
 
@@ -20,26 +20,28 @@
 #define DPAA2_MC_DPBP_DEVID	10
 #define DPAA2_MC_DPCI_DEVID	11
 
-typedef struct fslmc_vfio_device {
+struct fslmc_vfio_device {
+	LIST_ENTRY(fslmc_vfio_device) next;
 	int fd; /* fslmc root container device ?? */
 	int index; /*index of child object */
+	char dev_name[64];
 	struct fslmc_vfio_device *child; /* Child object */
-} fslmc_vfio_device;
+};
 
-typedef struct fslmc_vfio_group {
+struct fslmc_vfio_group {
+	LIST_ENTRY(fslmc_vfio_group) next;
 	int fd; /* /dev/vfio/"groupid" */
 	int groupid;
-	struct fslmc_vfio_container *container;
-	int object_index;
-	struct fslmc_vfio_device *vfio_device;
-} fslmc_vfio_group;
+	int connected;
+	char group_name[64]; /* dprc.x*/
+	int iommu_type;
+	LIST_HEAD(, fslmc_vfio_device) vfio_devices;
+};
 
-typedef struct fslmc_vfio_container {
+struct fslmc_vfio_container {
 	int fd; /* /dev/vfio/vfio */
-	int used;
-	int index; /* index in group list */
-	struct fslmc_vfio_group *group;
-} fslmc_vfio_container;
+	LIST_HEAD(, fslmc_vfio_group) groups;
+};
 
 extern char *fslmc_container;
 
@@ -57,8 +59,11 @@ int fslmc_vfio_setup_group(void);
 int fslmc_vfio_process_group(void);
 int fslmc_vfio_close_group(void);
 char *fslmc_get_container(void);
-int fslmc_get_container_group(int *gropuid);
+int fslmc_get_container_group(const char *group_name, int *gropuid);
 int rte_fslmc_vfio_dmamap(void);
-int rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova, uint64_t size);
+int rte_fslmc_vfio_mem_dmamap(uint64_t vaddr, uint64_t iova,
+		uint64_t size);
+int rte_fslmc_vfio_mem_dmaunmap(uint64_t iova,
+		uint64_t size);
 
 #endif /* _FSLMC_VFIO_H_ */
diff --git a/drivers/bus/fslmc/version.map b/drivers/bus/fslmc/version.map
index df1143733d..b49bc0a62c 100644
--- a/drivers/bus/fslmc/version.map
+++ b/drivers/bus/fslmc/version.map
@@ -118,6 +118,7 @@ INTERNAL {
 	rte_fslmc_get_device_count;
 	rte_fslmc_object_register;
 	rte_global_active_dqs_list;
+	rte_fslmc_vfio_mem_dmaunmap;
 
 	local: *;
 };
-- 
2.25.1


  parent reply	other threads:[~2024-09-18  7:53 UTC|newest]

Thread overview: 229+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-13  5:59 [v1 00/43] DPAA2 specific patches vanshika.shukla
2024-09-13  5:59 ` [v1 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-13  5:59 ` [v1 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-13  5:59 ` [v1 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-13  5:59 ` [v1 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-13  5:59 ` [v1 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-13  5:59 ` [v1 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-13  5:59 ` [v1 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-13  5:59 ` [v1 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-13  5:59 ` [v1 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-13  5:59 ` [v1 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-13  5:59 ` [v1 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-13  5:59 ` [v1 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-13  5:59 ` [v1 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-13  5:59 ` [v1 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-13  5:59 ` [v1 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-13  5:59 ` [v1 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-13  5:59 ` [v1 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-13  5:59 ` [v1 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-13  5:59 ` [v1 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-13  5:59 ` [v1 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-13  5:59 ` [v1 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-13  5:59 ` [v1 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-13  5:59 ` [v1 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-13  5:59 ` [v1 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-13  5:59 ` [v1 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-13  5:59 ` [v1 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-13  5:59 ` [v1 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-13  5:59 ` [v1 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-13  5:59 ` [v1 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-13  5:59 ` [v1 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-13  5:59 ` [v1 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-13  5:59 ` [v1 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-13  5:59 ` [v1 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-13  5:59 ` [v1 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-13  5:59 ` [v1 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-13  5:59 ` [v1 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-13  5:59 ` [v1 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-13  5:59 ` [v1 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-13  5:59 ` [v1 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-13  5:59 ` [v1 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-13  5:59 ` [v1 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-13  5:59 ` [v1 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-13  5:59 ` [v1 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-09-18  7:50 ` [v2 00/43] DPAA2 specific patches vanshika.shukla
2024-09-18  7:50   ` [v2 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-14 12:00     ` [v3 00/43] DPAA2 specific patches vanshika.shukla
2024-10-14 12:00       ` [v3 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-14 12:00       ` [v3 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-14 12:00       ` [v3 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-14 12:00       ` [v3 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-14 12:00       ` [v3 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-14 12:00       ` [v3 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-14 12:00       ` [v3 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-14 12:00       ` [v3 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-14 12:00       ` [v3 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-14 12:00       ` [v3 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-14 12:00       ` [v3 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-14 12:00       ` [v3 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-14 12:00       ` [v3 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-15  2:27         ` Stephen Hemminger
2024-10-14 12:00       ` [v3 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-10-15  2:29         ` Stephen Hemminger
2024-10-14 12:00       ` [v3 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-14 12:00       ` [v3 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-15  2:31         ` Stephen Hemminger
2024-10-14 12:01       ` [v3 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-14 12:01       ` [v3 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-14 12:01       ` [v3 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-14 12:01       ` [v3 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-10-14 12:01       ` [v3 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-14 12:01       ` [v3 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-14 12:01       ` [v3 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-14 12:01       ` [v3 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-10-14 12:01       ` [v3 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-14 12:01       ` [v3 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-14 12:01       ` [v3 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-14 12:01       ` [v3 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-14 12:01       ` [v3 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-14 12:01       ` [v3 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-14 12:01       ` [v3 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-14 12:01       ` [v3 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-14 12:01       ` [v3 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-14 12:01       ` [v3 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-14 12:01       ` [v3 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-14 12:01       ` [v3 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-10-14 12:01       ` [v3 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-14 12:01       ` [v3 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-14 12:01       ` [v3 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-14 12:01       ` [v3 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-14 12:01       ` [v3 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-14 12:01       ` [v3 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-14 12:01       ` [v3 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-15  2:32         ` Stephen Hemminger
2024-10-22 19:12       ` [v4 00/42] DPAA2 specific patches vanshika.shukla
2024-10-22 19:12         ` [v4 01/42] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-22 19:12         ` [v4 02/42] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-22 19:12         ` [v4 03/42] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-22 19:12         ` [v4 04/42] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-22 19:12         ` [v4 05/42] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-22 19:12         ` [v4 06/42] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-22 19:12         ` [v4 07/42] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-22 19:12         ` [v4 08/42] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-22 19:12         ` [v4 09/42] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-22 19:12         ` [v4 10/42] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-22 19:12         ` [v4 11/42] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-22 19:12         ` [v4 12/42] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-22 19:12         ` [v4 13/42] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-22 19:12         ` [v4 14/42] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-10-22 19:12         ` [v4 15/42] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-22 19:12         ` [v4 16/42] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-23  1:02           ` Stephen Hemminger
2024-10-22 19:12         ` [v4 17/42] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-22 19:12         ` [v4 18/42] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-22 19:12         ` [v4 19/42] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-22 19:12         ` [v4 20/42] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-22 19:12         ` [v4 21/42] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-22 19:12         ` [v4 22/42] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-22 19:12         ` [v4 23/42] net/dpaa2: flow API refactor vanshika.shukla
2024-10-23  0:52           ` Stephen Hemminger
2024-10-23 12:04             ` [EXT] " Vanshika Shukla
2024-10-22 19:12         ` [v4 24/42] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-22 19:12         ` [v4 25/42] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-22 19:12         ` [v4 26/42] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-22 19:12         ` [v4 27/42] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-22 19:12         ` [v4 28/42] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-22 19:12         ` [v4 29/42] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-22 19:12         ` [v4 30/42] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-22 19:12         ` [v4 31/42] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-22 19:12         ` [v4 32/42] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-22 19:12         ` [v4 33/42] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-22 19:12         ` [v4 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-22 19:12         ` [v4 35/42] net/dpaa2: support software taildrop vanshika.shukla
2024-10-22 19:12         ` [v4 36/42] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-22 19:12         ` [v4 37/42] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-22 19:12         ` [v4 38/42] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-22 19:12         ` [v4 39/42] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-22 19:12         ` [v4 40/42] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-22 19:12         ` [v4 41/42] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-22 19:12         ` [v4 42/42] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-23 11:59         ` [v5 00/42] DPAA2 specific patches vanshika.shukla
2024-10-23 11:59           ` [v5 01/42] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-23 11:59           ` [v5 02/42] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-23 11:59           ` [v5 03/42] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-23 11:59           ` [v5 04/42] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-23 11:59           ` [v5 05/42] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-23 11:59           ` [v5 06/42] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-23 11:59           ` [v5 07/42] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-23 11:59           ` [v5 08/42] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-23 11:59           ` [v5 09/42] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-23 11:59           ` [v5 10/42] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-23 11:59           ` [v5 11/42] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-23 11:59           ` [v5 12/42] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-23 11:59           ` [v5 13/42] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-23 11:59           ` [v5 14/42] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-11-09 17:07             ` Thomas Monjalon
2024-10-23 11:59           ` [v5 15/42] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-23 11:59           ` [v5 16/42] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-23 11:59           ` [v5 17/42] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-23 11:59           ` [v5 18/42] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-23 11:59           ` [v5 19/42] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-23 11:59           ` [v5 20/42] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-23 11:59           ` [v5 21/42] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-23 11:59           ` [v5 22/42] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-23 11:59           ` [v5 23/42] net/dpaa2: flow API refactor vanshika.shukla
2024-11-09 19:01             ` Thomas Monjalon
2024-10-23 11:59           ` [v5 24/42] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-23 11:59           ` [v5 25/42] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-23 11:59           ` [v5 26/42] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-23 11:59           ` [v5 27/42] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-23 11:59           ` [v5 28/42] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-23 11:59           ` [v5 29/42] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-23 11:59           ` [v5 30/42] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-23 11:59           ` [v5 31/42] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-23 11:59           ` [v5 32/42] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-23 11:59           ` [v5 33/42] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-23 11:59           ` [v5 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-23 11:59           ` [v5 35/42] net/dpaa2: support software taildrop vanshika.shukla
2024-10-23 11:59           ` [v5 36/42] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-23 11:59           ` [v5 37/42] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-23 11:59           ` [v5 38/42] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-23 11:59           ` [v5 39/42] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-23 11:59           ` [v5 40/42] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-23 11:59           ` [v5 41/42] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-23 11:59           ` [v5 42/42] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-11-07 11:24           ` [v5 00/42] DPAA2 specific patches Hemant Agrawal
2024-09-18  7:50   ` [v2 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-18  7:50   ` [v2 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-18  7:50   ` [v2 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-18  7:50   ` [v2 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-18  7:50   ` [v2 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-18  7:50   ` [v2 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-18  7:50   ` [v2 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-18  7:50   ` [v2 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-18  7:50   ` [v2 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-18  7:50   ` [v2 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-18  7:50   ` [v2 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-18  7:50   ` [v2 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-18  7:50   ` vanshika.shukla [this message]
2024-09-18  7:50   ` [v2 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-18  7:50   ` [v2 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-18  7:50   ` [v2 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-18  7:50   ` [v2 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-18  7:50   ` [v2 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-18  7:50   ` [v2 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-18  7:50   ` [v2 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-18  7:50   ` [v2 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-18  7:50   ` [v2 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-18  7:50   ` [v2 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-18  7:50   ` [v2 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-18  7:50   ` [v2 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-18  7:50   ` [v2 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-18  7:50   ` [v2 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-18  7:50   ` [v2 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-18  7:50   ` [v2 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-18  7:50   ` [v2 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-18  7:50   ` [v2 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-18  7:50   ` [v2 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-18  7:50   ` [v2 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-18  7:50   ` [v2 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-18  7:50   ` [v2 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-18  7:50   ` [v2 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-18  7:50   ` [v2 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-18  7:50   ` [v2 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-18  7:50   ` [v2 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-18  7:50   ` [v2 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-18  7:50   ` [v2 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-18  7:50   ` [v2 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-10  2:54   ` [v2 00/43] DPAA2 specific patches Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240918075056.1838654-15-vanshika.shukla@nxp.com \
    --to=vanshika.shukla@nxp.com \
    --cc=anatoly.burakov@intel.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=jun.yang@nxp.com \
    --cc=sachin.saxena@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).