DPDK patches and discussions
 help / color / mirror / Atom feed
From: Srikanth Yalavarthi <syalavarthi@marvell.com>
To: Srikanth Yalavarthi <syalavarthi@marvell.com>
Cc: <dev@dpdk.org>, <sshankarnara@marvell.com>, <aprabhu@marvell.com>,
	<ptakkar@marvell.com>
Subject: [PATCH v1 03/34] ml/cnxk: add generic cnxk device structure
Date: Wed, 30 Aug 2023 08:58:53 -0700	[thread overview]
Message-ID: <20230830155927.3566-4-syalavarthi@marvell.com> (raw)
In-Reply-To: <20230830155927.3566-1-syalavarthi@marvell.com>

Introduce generic cnxk device structure. This structure is
a top level device structure for the driver, which would
encapsulate the target / platform specific device structure.

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
 drivers/ml/cnxk/cn10k_ml_dev.c   | 315 ++++++++++----------
 drivers/ml/cnxk/cn10k_ml_dev.h   |  47 +--
 drivers/ml/cnxk/cn10k_ml_model.c |  14 +-
 drivers/ml/cnxk/cn10k_ml_model.h |   8 +-
 drivers/ml/cnxk/cn10k_ml_ocm.c   |  56 ++--
 drivers/ml/cnxk/cn10k_ml_ops.c   | 494 +++++++++++++++++--------------
 drivers/ml/cnxk/cnxk_ml_dev.c    |  11 +
 drivers/ml/cnxk/cnxk_ml_dev.h    |  58 ++++
 drivers/ml/cnxk/meson.build      |   2 +
 9 files changed, 562 insertions(+), 443 deletions(-)
 create mode 100644 drivers/ml/cnxk/cnxk_ml_dev.c
 create mode 100644 drivers/ml/cnxk/cnxk_ml_dev.h

diff --git a/drivers/ml/cnxk/cn10k_ml_dev.c b/drivers/ml/cnxk/cn10k_ml_dev.c
index b7e6ed9a00e..367fb7014c4 100644
--- a/drivers/ml/cnxk/cn10k_ml_dev.c
+++ b/drivers/ml/cnxk/cn10k_ml_dev.c
@@ -15,13 +15,15 @@
 #include <rte_mldev_pmd.h>
 #include <rte_pci.h>
 
-#include <roc_api.h>
-
 #include <eal_firmware.h>
 
+#include <roc_api.h>
+
 #include "cn10k_ml_dev.h"
 #include "cn10k_ml_ops.h"
 
+#include "cnxk_ml_dev.h"
+
 #define CN10K_ML_FW_PATH		"fw_path"
 #define CN10K_ML_FW_ENABLE_DPE_WARNINGS "enable_dpe_warnings"
 #define CN10K_ML_FW_REPORT_DPE_WARNINGS "report_dpe_warnings"
@@ -63,9 +65,6 @@ static const char *const valid_args[] = {CN10K_ML_FW_PATH,
 /* Supported OCM page sizes: 1KB, 2KB, 4KB, 8KB and 16KB */
 static const int valid_ocm_page_size[] = {1024, 2048, 4096, 8192, 16384};
 
-/* Dummy operations for ML device */
-struct rte_ml_dev_ops ml_dev_dummy_ops = {0};
-
 static int
 ml_read_file(const char *file, size_t *size, char **buffer)
 {
@@ -146,7 +145,7 @@ parse_integer_arg(const char *key __rte_unused, const char *value, void *extra_a
 }
 
 static int
-cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mldev)
+cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *cn10k_mldev)
 {
 	bool enable_dpe_warnings_set = false;
 	bool report_dpe_warnings_set = false;
@@ -183,7 +182,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde
 
 	if (rte_kvargs_count(kvlist, CN10K_ML_FW_ENABLE_DPE_WARNINGS) == 1) {
 		ret = rte_kvargs_process(kvlist, CN10K_ML_FW_ENABLE_DPE_WARNINGS,
-					 &parse_integer_arg, &mldev->fw.enable_dpe_warnings);
+					 &parse_integer_arg, &cn10k_mldev->fw.enable_dpe_warnings);
 		if (ret < 0) {
 			plt_err("Error processing arguments, key = %s\n",
 				CN10K_ML_FW_ENABLE_DPE_WARNINGS);
@@ -195,7 +194,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde
 
 	if (rte_kvargs_count(kvlist, CN10K_ML_FW_REPORT_DPE_WARNINGS) == 1) {
 		ret = rte_kvargs_process(kvlist, CN10K_ML_FW_REPORT_DPE_WARNINGS,
-					 &parse_integer_arg, &mldev->fw.report_dpe_warnings);
+					 &parse_integer_arg, &cn10k_mldev->fw.report_dpe_warnings);
 		if (ret < 0) {
 			plt_err("Error processing arguments, key = %s\n",
 				CN10K_ML_FW_REPORT_DPE_WARNINGS);
@@ -207,7 +206,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde
 
 	if (rte_kvargs_count(kvlist, CN10K_ML_DEV_CACHE_MODEL_DATA) == 1) {
 		ret = rte_kvargs_process(kvlist, CN10K_ML_DEV_CACHE_MODEL_DATA, &parse_integer_arg,
-					 &mldev->cache_model_data);
+					 &cn10k_mldev->cache_model_data);
 		if (ret < 0) {
 			plt_err("Error processing arguments, key = %s\n",
 				CN10K_ML_DEV_CACHE_MODEL_DATA);
@@ -230,7 +229,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde
 
 	if (rte_kvargs_count(kvlist, CN10K_ML_DEV_HW_QUEUE_LOCK) == 1) {
 		ret = rte_kvargs_process(kvlist, CN10K_ML_DEV_HW_QUEUE_LOCK, &parse_integer_arg,
-					 &mldev->hw_queue_lock);
+					 &cn10k_mldev->hw_queue_lock);
 		if (ret < 0) {
 			plt_err("Error processing arguments, key = %s\n",
 				CN10K_ML_DEV_HW_QUEUE_LOCK);
@@ -242,7 +241,7 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde
 
 	if (rte_kvargs_count(kvlist, CN10K_ML_OCM_PAGE_SIZE) == 1) {
 		ret = rte_kvargs_process(kvlist, CN10K_ML_OCM_PAGE_SIZE, &parse_integer_arg,
-					 &mldev->ocm_page_size);
+					 &cn10k_mldev->ocm_page_size);
 		if (ret < 0) {
 			plt_err("Error processing arguments, key = %s\n", CN10K_ML_OCM_PAGE_SIZE);
 			ret = -EINVAL;
@@ -253,49 +252,53 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde
 
 check_args:
 	if (!fw_path_set)
-		mldev->fw.path = CN10K_ML_FW_PATH_DEFAULT;
+		cn10k_mldev->fw.path = CN10K_ML_FW_PATH_DEFAULT;
 	else
-		mldev->fw.path = fw_path;
-	plt_info("ML: %s = %s", CN10K_ML_FW_PATH, mldev->fw.path);
+		cn10k_mldev->fw.path = fw_path;
+	plt_info("ML: %s = %s", CN10K_ML_FW_PATH, cn10k_mldev->fw.path);
 
 	if (!enable_dpe_warnings_set) {
-		mldev->fw.enable_dpe_warnings = CN10K_ML_FW_ENABLE_DPE_WARNINGS_DEFAULT;
+		cn10k_mldev->fw.enable_dpe_warnings = CN10K_ML_FW_ENABLE_DPE_WARNINGS_DEFAULT;
 	} else {
-		if ((mldev->fw.enable_dpe_warnings < 0) || (mldev->fw.enable_dpe_warnings > 1)) {
+		if ((cn10k_mldev->fw.enable_dpe_warnings < 0) ||
+		    (cn10k_mldev->fw.enable_dpe_warnings > 1)) {
 			plt_err("Invalid argument, %s = %d\n", CN10K_ML_FW_ENABLE_DPE_WARNINGS,
-				mldev->fw.enable_dpe_warnings);
+				cn10k_mldev->fw.enable_dpe_warnings);
 			ret = -EINVAL;
 			goto exit;
 		}
 	}
-	plt_info("ML: %s = %d", CN10K_ML_FW_ENABLE_DPE_WARNINGS, mldev->fw.enable_dpe_warnings);
+	plt_info("ML: %s = %d", CN10K_ML_FW_ENABLE_DPE_WARNINGS,
+		 cn10k_mldev->fw.enable_dpe_warnings);
 
 	if (!report_dpe_warnings_set) {
-		mldev->fw.report_dpe_warnings = CN10K_ML_FW_REPORT_DPE_WARNINGS_DEFAULT;
+		cn10k_mldev->fw.report_dpe_warnings = CN10K_ML_FW_REPORT_DPE_WARNINGS_DEFAULT;
 	} else {
-		if ((mldev->fw.report_dpe_warnings < 0) || (mldev->fw.report_dpe_warnings > 1)) {
+		if ((cn10k_mldev->fw.report_dpe_warnings < 0) ||
+		    (cn10k_mldev->fw.report_dpe_warnings > 1)) {
 			plt_err("Invalid argument, %s = %d\n", CN10K_ML_FW_REPORT_DPE_WARNINGS,
-				mldev->fw.report_dpe_warnings);
+				cn10k_mldev->fw.report_dpe_warnings);
 			ret = -EINVAL;
 			goto exit;
 		}
 	}
-	plt_info("ML: %s = %d", CN10K_ML_FW_REPORT_DPE_WARNINGS, mldev->fw.report_dpe_warnings);
+	plt_info("ML: %s = %d", CN10K_ML_FW_REPORT_DPE_WARNINGS,
+		 cn10k_mldev->fw.report_dpe_warnings);
 
 	if (!cache_model_data_set) {
-		mldev->cache_model_data = CN10K_ML_DEV_CACHE_MODEL_DATA_DEFAULT;
+		cn10k_mldev->cache_model_data = CN10K_ML_DEV_CACHE_MODEL_DATA_DEFAULT;
 	} else {
-		if ((mldev->cache_model_data < 0) || (mldev->cache_model_data > 1)) {
+		if ((cn10k_mldev->cache_model_data < 0) || (cn10k_mldev->cache_model_data > 1)) {
 			plt_err("Invalid argument, %s = %d\n", CN10K_ML_DEV_CACHE_MODEL_DATA,
-				mldev->cache_model_data);
+				cn10k_mldev->cache_model_data);
 			ret = -EINVAL;
 			goto exit;
 		}
 	}
-	plt_info("ML: %s = %d", CN10K_ML_DEV_CACHE_MODEL_DATA, mldev->cache_model_data);
+	plt_info("ML: %s = %d", CN10K_ML_DEV_CACHE_MODEL_DATA, cn10k_mldev->cache_model_data);
 
 	if (!ocm_alloc_mode_set) {
-		mldev->ocm.alloc_mode = CN10K_ML_OCM_ALLOC_MODE_DEFAULT;
+		cn10k_mldev->ocm.alloc_mode = CN10K_ML_OCM_ALLOC_MODE_DEFAULT;
 	} else {
 		if (!((strcmp(ocm_alloc_mode, "lowest") == 0) ||
 		      (strcmp(ocm_alloc_mode, "largest") == 0))) {
@@ -304,47 +307,47 @@ cn10k_mldev_parse_devargs(struct rte_devargs *devargs, struct cn10k_ml_dev *mlde
 			ret = -EINVAL;
 			goto exit;
 		}
-		mldev->ocm.alloc_mode = ocm_alloc_mode;
+		cn10k_mldev->ocm.alloc_mode = ocm_alloc_mode;
 	}
-	plt_info("ML: %s = %s", CN10K_ML_OCM_ALLOC_MODE, mldev->ocm.alloc_mode);
+	plt_info("ML: %s = %s", CN10K_ML_OCM_ALLOC_MODE, cn10k_mldev->ocm.alloc_mode);
 
 	if (!hw_queue_lock_set) {
-		mldev->hw_queue_lock = CN10K_ML_DEV_HW_QUEUE_LOCK_DEFAULT;
+		cn10k_mldev->hw_queue_lock = CN10K_ML_DEV_HW_QUEUE_LOCK_DEFAULT;
 	} else {
-		if ((mldev->hw_queue_lock < 0) || (mldev->hw_queue_lock > 1)) {
+		if ((cn10k_mldev->hw_queue_lock < 0) || (cn10k_mldev->hw_queue_lock > 1)) {
 			plt_err("Invalid argument, %s = %d\n", CN10K_ML_DEV_HW_QUEUE_LOCK,
-				mldev->hw_queue_lock);
+				cn10k_mldev->hw_queue_lock);
 			ret = -EINVAL;
 			goto exit;
 		}
 	}
-	plt_info("ML: %s = %d", CN10K_ML_DEV_HW_QUEUE_LOCK, mldev->hw_queue_lock);
+	plt_info("ML: %s = %d", CN10K_ML_DEV_HW_QUEUE_LOCK, cn10k_mldev->hw_queue_lock);
 
 	if (!ocm_page_size_set) {
-		mldev->ocm_page_size = CN10K_ML_OCM_PAGE_SIZE_DEFAULT;
+		cn10k_mldev->ocm_page_size = CN10K_ML_OCM_PAGE_SIZE_DEFAULT;
 	} else {
-		if (mldev->ocm_page_size < 0) {
+		if (cn10k_mldev->ocm_page_size < 0) {
 			plt_err("Invalid argument, %s = %d\n", CN10K_ML_OCM_PAGE_SIZE,
-				mldev->ocm_page_size);
+				cn10k_mldev->ocm_page_size);
 			ret = -EINVAL;
 			goto exit;
 		}
 
 		found = false;
 		for (i = 0; i < PLT_DIM(valid_ocm_page_size); i++) {
-			if (mldev->ocm_page_size == valid_ocm_page_size[i]) {
+			if (cn10k_mldev->ocm_page_size == valid_ocm_page_size[i]) {
 				found = true;
 				break;
 			}
 		}
 
 		if (!found) {
-			plt_err("Unsupported ocm_page_size = %d\n", mldev->ocm_page_size);
+			plt_err("Unsupported ocm_page_size = %d\n", cn10k_mldev->ocm_page_size);
 			ret = -EINVAL;
 			goto exit;
 		}
 	}
-	plt_info("ML: %s = %d", CN10K_ML_OCM_PAGE_SIZE, mldev->ocm_page_size);
+	plt_info("ML: %s = %d", CN10K_ML_OCM_PAGE_SIZE, cn10k_mldev->ocm_page_size);
 
 exit:
 	rte_kvargs_free(kvlist);
@@ -356,7 +359,8 @@ static int
 cn10k_ml_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 {
 	struct rte_ml_dev_pmd_init_params init_params;
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	char name[RTE_ML_STR_MAX];
 	struct rte_ml_dev *dev;
 	int ret;
@@ -364,7 +368,7 @@ cn10k_ml_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_de
 	PLT_SET_USED(pci_drv);
 
 	init_params = (struct rte_ml_dev_pmd_init_params){
-		.socket_id = rte_socket_id(), .private_data_size = sizeof(struct cn10k_ml_dev)};
+		.socket_id = rte_socket_id(), .private_data_size = sizeof(struct cnxk_ml_dev)};
 
 	ret = roc_plt_init();
 	if (ret < 0) {
@@ -380,18 +384,20 @@ cn10k_ml_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_de
 	}
 
 	/* Get private data space allocated */
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cnxk_mldev->mldev = dev;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		mldev->roc.pci_dev = pci_dev;
+		cn10k_mldev->roc.pci_dev = pci_dev;
 
-		ret = cn10k_mldev_parse_devargs(dev->device->devargs, mldev);
+		ret = cn10k_mldev_parse_devargs(dev->device->devargs, cn10k_mldev);
 		if (ret) {
 			plt_err("Failed to parse devargs ret = %d", ret);
 			goto pmd_destroy;
 		}
 
-		ret = roc_ml_dev_init(&mldev->roc);
+		ret = roc_ml_dev_init(&cn10k_mldev->roc);
 		if (ret) {
 			plt_err("Failed to initialize ML ROC, ret = %d", ret);
 			goto pmd_destroy;
@@ -407,7 +413,7 @@ cn10k_ml_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_de
 	dev->dequeue_burst = NULL;
 	dev->op_error_get = NULL;
 
-	mldev->state = ML_CN10K_DEV_STATE_PROBED;
+	cnxk_mldev->state = ML_CNXK_DEV_STATE_PROBED;
 
 	return 0;
 
@@ -424,7 +430,7 @@ cn10k_ml_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_de
 static int
 cn10k_ml_pci_remove(struct rte_pci_device *pci_dev)
 {
-	struct cn10k_ml_dev *mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	char name[RTE_ML_STR_MAX];
 	struct rte_ml_dev *dev;
 	int ret;
@@ -439,8 +445,8 @@ cn10k_ml_pci_remove(struct rte_pci_device *pci_dev)
 		return -ENODEV;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		mldev = dev->data->dev_private;
-		ret = roc_ml_dev_fini(&mldev->roc);
+		cnxk_mldev = dev->data->dev_private;
+		ret = roc_ml_dev_fini(&cnxk_mldev->cn10k_mldev.roc);
 		if (ret)
 			return ret;
 	}
@@ -486,45 +492,45 @@ cn10k_ml_fw_flags_get(struct cn10k_ml_fw *fw)
 static int
 cn10k_ml_fw_load_asim(struct cn10k_ml_fw *fw)
 {
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
 	uint64_t timeout_cycle;
 	uint64_t reg_val64;
 	bool timeout;
 	int ret = 0;
 
-	mldev = fw->mldev;
+	cn10k_mldev = fw->cn10k_mldev;
 
 	/* Reset HEAD and TAIL debug pointer registers */
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C0);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C1);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C0);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C1);
 
 	/* Set ML_MLR_BASE to base IOVA of the ML region in LLC/DRAM. */
 	reg_val64 = rte_eal_get_baseaddr();
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_MLR_BASE);
-	plt_ml_dbg("ML_MLR_BASE = 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_MLR_BASE));
-	roc_ml_reg_save(&mldev->roc, ML_MLR_BASE);
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_MLR_BASE);
+	plt_ml_dbg("ML_MLR_BASE = 0x%016lx", roc_ml_reg_read64(&cn10k_mldev->roc, ML_MLR_BASE));
+	roc_ml_reg_save(&cn10k_mldev->roc, ML_MLR_BASE);
 
 	/* Update FW load completion structure */
 	fw->req->jd.hdr.jce.w1.u64 = PLT_U64_CAST(&fw->req->status);
 	fw->req->jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_LOAD;
-	fw->req->jd.hdr.result = roc_ml_addr_ap2mlip(&mldev->roc, &fw->req->result);
+	fw->req->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &fw->req->result);
 	fw->req->jd.fw_load.flags = cn10k_ml_fw_flags_get(fw);
-	plt_write64(ML_CN10K_POLL_JOB_START, &fw->req->status);
+	plt_write64(ML_CNXK_POLL_JOB_START, &fw->req->status);
 	plt_wmb();
 
 	/* Enqueue FW load through scratch registers */
 	timeout = true;
-	timeout_cycle = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();
-	roc_ml_scratch_enqueue(&mldev->roc, &fw->req->jd);
+	timeout_cycle = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();
+	roc_ml_scratch_enqueue(&cn10k_mldev->roc, &fw->req->jd);
 
 	plt_rmb();
 	do {
-		if (roc_ml_scratch_is_done_bit_set(&mldev->roc) &&
-		    (plt_read64(&fw->req->status) == ML_CN10K_POLL_JOB_FINISH)) {
+		if (roc_ml_scratch_is_done_bit_set(&cn10k_mldev->roc) &&
+		    (plt_read64(&fw->req->status) == ML_CNXK_POLL_JOB_FINISH)) {
 			timeout = false;
 			break;
 		}
@@ -536,11 +542,11 @@ cn10k_ml_fw_load_asim(struct cn10k_ml_fw *fw)
 	} else {
 		/* Set ML to disable new jobs */
 		reg_val64 = (ROC_ML_CFG_JD_SIZE | ROC_ML_CFG_MLIP_ENA);
-		roc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);
+		roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);
 
 		/* Clear scratch registers */
-		roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_WORK_PTR);
-		roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_FW_CTRL);
+		roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_WORK_PTR);
+		roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_FW_CTRL);
 
 		if (timeout) {
 			plt_err("Firmware load timeout");
@@ -554,14 +560,14 @@ cn10k_ml_fw_load_asim(struct cn10k_ml_fw *fw)
 	}
 
 	/* Reset scratch registers */
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_FW_CTRL);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_WORK_PTR);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_FW_CTRL);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_WORK_PTR);
 
 	/* Disable job execution, to be enabled in start */
-	reg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);
+	reg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);
 	reg_val64 &= ~ROC_ML_CFG_ENA;
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);
-	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_CFG));
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);
+	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));
 
 	return ret;
 }
@@ -571,7 +577,7 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)
 {
 	union ml_a35_0_rst_vector_base_s a35_0_rst_vector_base;
 	union ml_a35_0_rst_vector_base_s a35_1_rst_vector_base;
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
 	uint64_t timeout_cycle;
 	uint64_t reg_val64;
 	uint32_t reg_val32;
@@ -580,24 +586,24 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)
 	int ret = 0;
 	uint8_t i;
 
-	mldev = fw->mldev;
+	cn10k_mldev = fw->cn10k_mldev;
 
 	/* Reset HEAD and TAIL debug pointer registers */
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C0);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C1);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C0);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_EXCEPTION_SP_C1);
 
 	/* (1) Write firmware images for ACC's two A35 cores to the ML region in LLC / DRAM. */
 	rte_memcpy(PLT_PTR_ADD(fw->data, FW_LINKER_OFFSET), buffer, size);
 
 	/* (2) Set ML(0)_MLR_BASE = Base IOVA of the ML region in LLC/DRAM. */
 	reg_val64 = PLT_PTR_SUB_U64_CAST(fw->data, rte_eal_get_baseaddr());
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_MLR_BASE);
-	plt_ml_dbg("ML_MLR_BASE => 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_MLR_BASE));
-	roc_ml_reg_save(&mldev->roc, ML_MLR_BASE);
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_MLR_BASE);
+	plt_ml_dbg("ML_MLR_BASE => 0x%016lx", roc_ml_reg_read64(&cn10k_mldev->roc, ML_MLR_BASE));
+	roc_ml_reg_save(&cn10k_mldev->roc, ML_MLR_BASE);
 
 	/* (3) Set ML(0)_AXI_BRIDGE_CTRL(1) = 0x184003 to remove back-pressure check on DMA AXI
 	 * bridge.
@@ -605,9 +611,9 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)
 	reg_val64 = (ROC_ML_AXI_BRIDGE_CTRL_AXI_RESP_CTRL |
 		     ROC_ML_AXI_BRIDGE_CTRL_BRIDGE_CTRL_MODE | ROC_ML_AXI_BRIDGE_CTRL_NCB_WR_BLK |
 		     ROC_ML_AXI_BRIDGE_CTRL_FORCE_WRESP_OK | ROC_ML_AXI_BRIDGE_CTRL_FORCE_RRESP_OK);
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_AXI_BRIDGE_CTRL(1));
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_AXI_BRIDGE_CTRL(1));
 	plt_ml_dbg("ML_AXI_BRIDGE_CTRL(1) => 0x%016lx",
-		   roc_ml_reg_read64(&mldev->roc, ML_AXI_BRIDGE_CTRL(1)));
+		   roc_ml_reg_read64(&cn10k_mldev->roc, ML_AXI_BRIDGE_CTRL(1)));
 
 	/* (4) Set ML(0)_ANB(0..2)_BACKP_DISABLE = 0x3 to remove back-pressure on the AXI to NCB
 	 * bridges.
@@ -615,9 +621,9 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)
 	for (i = 0; i < ML_ANBX_NR; i++) {
 		reg_val64 = (ROC_ML_ANBX_BACKP_DISABLE_EXTMSTR_B_BACKP_DISABLE |
 			     ROC_ML_ANBX_BACKP_DISABLE_EXTMSTR_R_BACKP_DISABLE);
-		roc_ml_reg_write64(&mldev->roc, reg_val64, ML_ANBX_BACKP_DISABLE(i));
+		roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_ANBX_BACKP_DISABLE(i));
 		plt_ml_dbg("ML_ANBX_BACKP_DISABLE(%u) => 0x%016lx", i,
-			   roc_ml_reg_read64(&mldev->roc, ML_ANBX_BACKP_DISABLE(i)));
+			   roc_ml_reg_read64(&cn10k_mldev->roc, ML_ANBX_BACKP_DISABLE(i)));
 	}
 
 	/* (5) Set ML(0)_ANB(0..2)_NCBI_P_OVR = 0x3000 and ML(0)_ANB(0..2)_NCBI_NP_OVR = 0x3000 to
@@ -626,39 +632,40 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)
 	for (i = 0; i < ML_ANBX_NR; i++) {
 		reg_val64 = (ML_ANBX_NCBI_P_OVR_ANB_NCBI_P_NS_OVR |
 			     ML_ANBX_NCBI_P_OVR_ANB_NCBI_P_NS_OVR_VLD);
-		roc_ml_reg_write64(&mldev->roc, reg_val64, ML_ANBX_NCBI_P_OVR(i));
+		roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_ANBX_NCBI_P_OVR(i));
 		plt_ml_dbg("ML_ANBX_NCBI_P_OVR(%u) => 0x%016lx", i,
-			   roc_ml_reg_read64(&mldev->roc, ML_ANBX_NCBI_P_OVR(i)));
+			   roc_ml_reg_read64(&cn10k_mldev->roc, ML_ANBX_NCBI_P_OVR(i)));
 
 		reg_val64 |= (ML_ANBX_NCBI_NP_OVR_ANB_NCBI_NP_NS_OVR |
 			      ML_ANBX_NCBI_NP_OVR_ANB_NCBI_NP_NS_OVR_VLD);
-		roc_ml_reg_write64(&mldev->roc, reg_val64, ML_ANBX_NCBI_NP_OVR(i));
+		roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_ANBX_NCBI_NP_OVR(i));
 		plt_ml_dbg("ML_ANBX_NCBI_NP_OVR(%u) => 0x%016lx", i,
-			   roc_ml_reg_read64(&mldev->roc, ML_ANBX_NCBI_NP_OVR(i)));
+			   roc_ml_reg_read64(&cn10k_mldev->roc, ML_ANBX_NCBI_NP_OVR(i)));
 	}
 
 	/* (6) Set ML(0)_CFG[MLIP_CLK_FORCE] = 1, to force turning on the MLIP clock. */
-	reg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);
+	reg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);
 	reg_val64 |= ROC_ML_CFG_MLIP_CLK_FORCE;
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);
-	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_CFG));
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);
+	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));
 
 	/* (7) Set ML(0)_JOB_MGR_CTRL[STALL_ON_IDLE] = 0, to make sure the boot request is accepted
 	 * when there is no job in the command queue.
 	 */
-	reg_val64 = roc_ml_reg_read64(&mldev->roc, ML_JOB_MGR_CTRL);
+	reg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_JOB_MGR_CTRL);
 	reg_val64 &= ~ROC_ML_JOB_MGR_CTRL_STALL_ON_IDLE;
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_JOB_MGR_CTRL);
-	plt_ml_dbg("ML_JOB_MGR_CTRL => 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_JOB_MGR_CTRL));
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_JOB_MGR_CTRL);
+	plt_ml_dbg("ML_JOB_MGR_CTRL => 0x%016lx",
+		   roc_ml_reg_read64(&cn10k_mldev->roc, ML_JOB_MGR_CTRL));
 
 	/* (8) Set ML(0)_CFG[ENA] = 0 and ML(0)_CFG[MLIP_ENA] = 1 to bring MLIP out of reset while
 	 * keeping the job manager disabled.
 	 */
-	reg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);
+	reg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);
 	reg_val64 |= ROC_ML_CFG_MLIP_ENA;
 	reg_val64 &= ~ROC_ML_CFG_ENA;
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);
-	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_CFG));
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);
+	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));
 
 	/* (9) Wait at least 70 coprocessor clock cycles. */
 	plt_delay_us(FW_WAIT_CYCLES);
@@ -669,53 +676,57 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)
 	 * AXI outbound address divided by 4. Read after write.
 	 */
 	offset = PLT_PTR_ADD_U64_CAST(
-		fw->data, FW_LINKER_OFFSET - roc_ml_reg_read64(&mldev->roc, ML_MLR_BASE));
+		fw->data, FW_LINKER_OFFSET - roc_ml_reg_read64(&cn10k_mldev->roc, ML_MLR_BASE));
 	a35_0_rst_vector_base.s.addr = (offset + ML_AXI_START_ADDR) / 4;
 	a35_1_rst_vector_base.s.addr = (offset + ML_AXI_START_ADDR) / 4;
 
-	roc_ml_reg_write32(&mldev->roc, a35_0_rst_vector_base.w.w0, ML_A35_0_RST_VECTOR_BASE_W(0));
-	reg_val32 = roc_ml_reg_read32(&mldev->roc, ML_A35_0_RST_VECTOR_BASE_W(0));
+	roc_ml_reg_write32(&cn10k_mldev->roc, a35_0_rst_vector_base.w.w0,
+			   ML_A35_0_RST_VECTOR_BASE_W(0));
+	reg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_A35_0_RST_VECTOR_BASE_W(0));
 	plt_ml_dbg("ML_A35_0_RST_VECTOR_BASE_W(0) => 0x%08x", reg_val32);
 
-	roc_ml_reg_write32(&mldev->roc, a35_0_rst_vector_base.w.w1, ML_A35_0_RST_VECTOR_BASE_W(1));
-	reg_val32 = roc_ml_reg_read32(&mldev->roc, ML_A35_0_RST_VECTOR_BASE_W(1));
+	roc_ml_reg_write32(&cn10k_mldev->roc, a35_0_rst_vector_base.w.w1,
+			   ML_A35_0_RST_VECTOR_BASE_W(1));
+	reg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_A35_0_RST_VECTOR_BASE_W(1));
 	plt_ml_dbg("ML_A35_0_RST_VECTOR_BASE_W(1) => 0x%08x", reg_val32);
 
-	roc_ml_reg_write32(&mldev->roc, a35_1_rst_vector_base.w.w0, ML_A35_1_RST_VECTOR_BASE_W(0));
-	reg_val32 = roc_ml_reg_read32(&mldev->roc, ML_A35_1_RST_VECTOR_BASE_W(0));
+	roc_ml_reg_write32(&cn10k_mldev->roc, a35_1_rst_vector_base.w.w0,
+			   ML_A35_1_RST_VECTOR_BASE_W(0));
+	reg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_A35_1_RST_VECTOR_BASE_W(0));
 	plt_ml_dbg("ML_A35_1_RST_VECTOR_BASE_W(0) => 0x%08x", reg_val32);
 
-	roc_ml_reg_write32(&mldev->roc, a35_1_rst_vector_base.w.w1, ML_A35_1_RST_VECTOR_BASE_W(1));
-	reg_val32 = roc_ml_reg_read32(&mldev->roc, ML_A35_1_RST_VECTOR_BASE_W(1));
+	roc_ml_reg_write32(&cn10k_mldev->roc, a35_1_rst_vector_base.w.w1,
+			   ML_A35_1_RST_VECTOR_BASE_W(1));
+	reg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_A35_1_RST_VECTOR_BASE_W(1));
 	plt_ml_dbg("ML_A35_1_RST_VECTOR_BASE_W(1) => 0x%08x", reg_val32);
 
 	/* (11) Clear MLIP's ML(0)_SW_RST_CTRL[ACC_RST]. This will bring the ACC cores and other
 	 * MLIP components out of reset. The cores will execute firmware from the ML region as
 	 * written in step 1.
 	 */
-	reg_val32 = roc_ml_reg_read32(&mldev->roc, ML_SW_RST_CTRL);
+	reg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_SW_RST_CTRL);
 	reg_val32 &= ~ROC_ML_SW_RST_CTRL_ACC_RST;
-	roc_ml_reg_write32(&mldev->roc, reg_val32, ML_SW_RST_CTRL);
-	reg_val32 = roc_ml_reg_read32(&mldev->roc, ML_SW_RST_CTRL);
+	roc_ml_reg_write32(&cn10k_mldev->roc, reg_val32, ML_SW_RST_CTRL);
+	reg_val32 = roc_ml_reg_read32(&cn10k_mldev->roc, ML_SW_RST_CTRL);
 	plt_ml_dbg("ML_SW_RST_CTRL => 0x%08x", reg_val32);
 
 	/* (12) Wait for notification from firmware that ML is ready for job execution. */
 	fw->req->jd.hdr.jce.w1.u64 = PLT_U64_CAST(&fw->req->status);
 	fw->req->jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_LOAD;
-	fw->req->jd.hdr.result = roc_ml_addr_ap2mlip(&mldev->roc, &fw->req->result);
+	fw->req->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &fw->req->result);
 	fw->req->jd.fw_load.flags = cn10k_ml_fw_flags_get(fw);
-	plt_write64(ML_CN10K_POLL_JOB_START, &fw->req->status);
+	plt_write64(ML_CNXK_POLL_JOB_START, &fw->req->status);
 	plt_wmb();
 
 	/* Enqueue FW load through scratch registers */
 	timeout = true;
-	timeout_cycle = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();
-	roc_ml_scratch_enqueue(&mldev->roc, &fw->req->jd);
+	timeout_cycle = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();
+	roc_ml_scratch_enqueue(&cn10k_mldev->roc, &fw->req->jd);
 
 	plt_rmb();
 	do {
-		if (roc_ml_scratch_is_done_bit_set(&mldev->roc) &&
-		    (plt_read64(&fw->req->status) == ML_CN10K_POLL_JOB_FINISH)) {
+		if (roc_ml_scratch_is_done_bit_set(&cn10k_mldev->roc) &&
+		    (plt_read64(&fw->req->status) == ML_CNXK_POLL_JOB_FINISH)) {
 			timeout = false;
 			break;
 		}
@@ -727,11 +738,11 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)
 	} else {
 		/* Set ML to disable new jobs */
 		reg_val64 = (ROC_ML_CFG_JD_SIZE | ROC_ML_CFG_MLIP_ENA);
-		roc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);
+		roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);
 
 		/* Clear scratch registers */
-		roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_WORK_PTR);
-		roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_FW_CTRL);
+		roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_WORK_PTR);
+		roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_FW_CTRL);
 
 		if (timeout) {
 			plt_err("Firmware load timeout");
@@ -747,49 +758,51 @@ cn10k_ml_fw_load_cn10ka(struct cn10k_ml_fw *fw, void *buffer, uint64_t size)
 	/* (13) Set ML(0)_JOB_MGR_CTRL[STALL_ON_IDLE] = 0x1; this is needed to shut down the MLIP
 	 * clock when there are no more jobs to process.
 	 */
-	reg_val64 = roc_ml_reg_read64(&mldev->roc, ML_JOB_MGR_CTRL);
+	reg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_JOB_MGR_CTRL);
 	reg_val64 |= ROC_ML_JOB_MGR_CTRL_STALL_ON_IDLE;
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_JOB_MGR_CTRL);
-	plt_ml_dbg("ML_JOB_MGR_CTRL => 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_JOB_MGR_CTRL));
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_JOB_MGR_CTRL);
+	plt_ml_dbg("ML_JOB_MGR_CTRL => 0x%016lx",
+		   roc_ml_reg_read64(&cn10k_mldev->roc, ML_JOB_MGR_CTRL));
 
 	/* (14) Set ML(0)_CFG[MLIP_CLK_FORCE] = 0; the MLIP clock will be turned on/off based on job
 	 * activities.
 	 */
-	reg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);
+	reg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);
 	reg_val64 &= ~ROC_ML_CFG_MLIP_CLK_FORCE;
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);
-	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_CFG));
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);
+	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));
 
 	/* (15) Set ML(0)_CFG[ENA] to enable ML job execution. */
-	reg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);
+	reg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);
 	reg_val64 |= ROC_ML_CFG_ENA;
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);
-	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_CFG));
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);
+	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));
 
 	/* Reset scratch registers */
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_FW_CTRL);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_WORK_PTR);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_FW_CTRL);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_WORK_PTR);
 
 	/* Disable job execution, to be enabled in start */
-	reg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);
+	reg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);
 	reg_val64 &= ~ROC_ML_CFG_ENA;
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);
-	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_CFG));
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);
+	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));
 
 	/* Additional fixes: Set RO bit to fix O2D DMA bandwidth issue on cn10ka */
 	for (i = 0; i < ML_ANBX_NR; i++) {
-		reg_val64 = roc_ml_reg_read64(&mldev->roc, ML_ANBX_NCBI_P_OVR(i));
+		reg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_ANBX_NCBI_P_OVR(i));
 		reg_val64 |= (ML_ANBX_NCBI_P_OVR_ANB_NCBI_P_RO_OVR |
 			      ML_ANBX_NCBI_P_OVR_ANB_NCBI_P_RO_OVR_VLD);
-		roc_ml_reg_write64(&mldev->roc, reg_val64, ML_ANBX_NCBI_P_OVR(i));
+		roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_ANBX_NCBI_P_OVR(i));
 	}
 
 	return ret;
 }
 
 int
-cn10k_ml_fw_load(struct cn10k_ml_dev *mldev)
+cn10k_ml_fw_load(struct cnxk_ml_dev *cnxk_mldev)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
 	const struct plt_memzone *mz;
 	struct cn10k_ml_fw *fw;
 	char *fw_buffer = NULL;
@@ -797,8 +810,9 @@ cn10k_ml_fw_load(struct cn10k_ml_dev *mldev)
 	uint64_t fw_size = 0;
 	int ret = 0;
 
-	fw = &mldev->fw;
-	fw->mldev = mldev;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
+	fw = &cn10k_mldev->fw;
+	fw->cn10k_mldev = cn10k_mldev;
 
 	if (roc_env_is_emulator() || roc_env_is_hw()) {
 		/* Read firmware image to a buffer */
@@ -829,8 +843,8 @@ cn10k_ml_fw_load(struct cn10k_ml_dev *mldev)
 	memset(&fw->req->jd.fw_load.version[0], '\0', MLDEV_FIRMWARE_VERSION_LENGTH);
 
 	/* Reset device, if in active state */
-	if (roc_ml_mlip_is_enabled(&mldev->roc))
-		roc_ml_mlip_reset(&mldev->roc, true);
+	if (roc_ml_mlip_is_enabled(&cn10k_mldev->roc))
+		roc_ml_mlip_reset(&cn10k_mldev->roc, true);
 
 	/* Load firmware */
 	if (roc_env_is_emulator() || roc_env_is_hw()) {
@@ -843,22 +857,25 @@ cn10k_ml_fw_load(struct cn10k_ml_dev *mldev)
 	}
 
 	if (ret < 0)
-		cn10k_ml_fw_unload(mldev);
+		cn10k_ml_fw_unload(cnxk_mldev);
 
 	return ret;
 }
 
 void
-cn10k_ml_fw_unload(struct cn10k_ml_dev *mldev)
+cn10k_ml_fw_unload(struct cnxk_ml_dev *cnxk_mldev)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
 	const struct plt_memzone *mz;
 	uint64_t reg_val;
 
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
+
 	/* Disable and reset device */
-	reg_val = roc_ml_reg_read64(&mldev->roc, ML_CFG);
+	reg_val = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);
 	reg_val &= ~ROC_ML_CFG_MLIP_ENA;
-	roc_ml_reg_write64(&mldev->roc, reg_val, ML_CFG);
-	roc_ml_mlip_reset(&mldev->roc, true);
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val, ML_CFG);
+	roc_ml_mlip_reset(&cn10k_mldev->roc, true);
 
 	mz = plt_memzone_lookup(FW_MEMZONE_NAME);
 	if (mz != NULL)
diff --git a/drivers/ml/cnxk/cn10k_ml_dev.h b/drivers/ml/cnxk/cn10k_ml_dev.h
index 4aaeecff03d..f9da1548c4a 100644
--- a/drivers/ml/cnxk/cn10k_ml_dev.h
+++ b/drivers/ml/cnxk/cn10k_ml_dev.h
@@ -9,6 +9,9 @@
 
 #include "cn10k_ml_ocm.h"
 
+/* Dummy Device ops */
+extern struct rte_ml_dev_ops ml_dev_dummy_ops;
+
 /* Marvell OCTEON CN10K ML PMD device name */
 #define MLDEV_NAME_CN10K_PMD ml_cn10k
 
@@ -36,17 +39,10 @@
 /* Maximum number of segments for IO data */
 #define ML_CN10K_MAX_SEGMENTS 1
 
-/* ML command timeout in seconds */
-#define ML_CN10K_CMD_TIMEOUT 5
-
 /* ML slow-path job flags */
 #define ML_CN10K_SP_FLAGS_OCM_NONRELOCATABLE BIT(0)
 #define ML_CN10K_SP_FLAGS_EXTENDED_LOAD_JD   BIT(1)
 
-/* Poll mode job state */
-#define ML_CN10K_POLL_JOB_START	 0
-#define ML_CN10K_POLL_JOB_FINISH 1
-
 /* Memory barrier macros */
 #if defined(RTE_ARCH_ARM)
 #define dmb_st ({ asm volatile("dmb st" : : : "memory"); })
@@ -56,6 +52,7 @@
 #define dsb_st
 #endif
 
+struct cnxk_ml_dev;
 struct cn10k_ml_req;
 struct cn10k_ml_qp;
 
@@ -68,21 +65,6 @@ enum cn10k_ml_job_type {
 	ML_CN10K_JOB_TYPE_FIRMWARE_SELFTEST,
 };
 
-/* Device configuration state enum */
-enum cn10k_ml_dev_state {
-	/* Probed and not configured */
-	ML_CN10K_DEV_STATE_PROBED = 0,
-
-	/* Configured */
-	ML_CN10K_DEV_STATE_CONFIGURED,
-
-	/* Started */
-	ML_CN10K_DEV_STATE_STARTED,
-
-	/* Closed */
-	ML_CN10K_DEV_STATE_CLOSED
-};
-
 /* Error types enumeration */
 enum cn10k_ml_error_etype {
 	/* 0x0 */ ML_ETYPE_NO_ERROR = 0, /* No error */
@@ -379,7 +361,7 @@ struct cn10k_ml_jd {
 /* ML firmware structure */
 struct cn10k_ml_fw {
 	/* Device reference */
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
 
 	/* Firmware file path */
 	const char *path;
@@ -485,27 +467,12 @@ struct cn10k_ml_dev {
 	/* Device ROC */
 	struct roc_ml roc;
 
-	/* Configuration state */
-	enum cn10k_ml_dev_state state;
-
 	/* Firmware */
 	struct cn10k_ml_fw fw;
 
 	/* OCM info */
 	struct cn10k_ml_ocm ocm;
 
-	/* Number of models loaded */
-	uint16_t nb_models_loaded;
-
-	/* Number of models unloaded */
-	uint16_t nb_models_unloaded;
-
-	/* Number of models started */
-	uint16_t nb_models_started;
-
-	/* Number of models stopped */
-	uint16_t nb_models_stopped;
-
 	/* Extended stats data */
 	struct cn10k_ml_xstats xstats;
 
@@ -528,7 +495,7 @@ struct cn10k_ml_dev {
 };
 
 uint64_t cn10k_ml_fw_flags_get(struct cn10k_ml_fw *fw);
-int cn10k_ml_fw_load(struct cn10k_ml_dev *mldev);
-void cn10k_ml_fw_unload(struct cn10k_ml_dev *mldev);
+int cn10k_ml_fw_load(struct cnxk_ml_dev *cnxk_mldev);
+void cn10k_ml_fw_unload(struct cnxk_ml_dev *cnxk_mldev);
 
 #endif /* _CN10K_ML_DEV_H_ */
diff --git a/drivers/ml/cnxk/cn10k_ml_model.c b/drivers/ml/cnxk/cn10k_ml_model.c
index e0b750cd8ef..d146535866a 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.c
+++ b/drivers/ml/cnxk/cn10k_ml_model.c
@@ -10,6 +10,8 @@
 #include "cn10k_ml_model.h"
 #include "cn10k_ml_ocm.h"
 
+#include "cnxk_ml_dev.h"
+
 static enum rte_ml_io_type
 cn10k_ml_io_type_map(uint8_t type)
 {
@@ -461,7 +463,7 @@ cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer, uint8_
 }
 
 int
-cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, uint8_t *buffer,
+cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *cn10k_mldev, uint16_t model_id, uint8_t *buffer,
 			       uint16_t *wb_pages, uint16_t *scratch_pages)
 {
 	struct cn10k_ml_model_metadata *metadata;
@@ -470,7 +472,7 @@ cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, ui
 	uint64_t wb_size;
 
 	metadata = (struct cn10k_ml_model_metadata *)buffer;
-	ocm = &mldev->ocm;
+	ocm = &cn10k_mldev->ocm;
 
 	/* Assume wb_size is zero for non-relocatable models */
 	if (metadata->model.ocm_relocatable)
@@ -494,11 +496,11 @@ cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, ui
 		   scratch_size, *scratch_pages);
 
 	/* Check if the model can be loaded on OCM */
-	if ((*wb_pages + *scratch_pages) > mldev->ocm.num_pages) {
+	if ((*wb_pages + *scratch_pages) > cn10k_mldev->ocm.num_pages) {
 		plt_err("Cannot create the model, OCM relocatable = %u",
 			metadata->model.ocm_relocatable);
 		plt_err("wb_pages (%u) + scratch_pages (%u) > %u", *wb_pages, *scratch_pages,
-			mldev->ocm.num_pages);
+			cn10k_mldev->ocm.num_pages);
 		return -ENOMEM;
 	}
 
@@ -506,8 +508,8 @@ cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, ui
 	 * prevent the library from allocating the remaining space on the tile to other models.
 	 */
 	if (!metadata->model.ocm_relocatable)
-		*scratch_pages =
-			PLT_MAX(PLT_U64_CAST(*scratch_pages), PLT_U64_CAST(mldev->ocm.num_pages));
+		*scratch_pages = PLT_MAX(PLT_U64_CAST(*scratch_pages),
+					 PLT_U64_CAST(cn10k_mldev->ocm.num_pages));
 
 	return 0;
 }
diff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h
index 4cc0744891b..3128b28db73 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.h
+++ b/drivers/ml/cnxk/cn10k_ml_model.h
@@ -13,6 +13,8 @@
 #include "cn10k_ml_ocm.h"
 #include "cn10k_ml_ops.h"
 
+struct cnxk_ml_dev;
+
 /* Model state */
 enum cn10k_ml_model_state {
 	ML_CN10K_MODEL_STATE_LOADED,
@@ -489,7 +491,7 @@ struct cn10k_ml_model_stats {
 /* Model Object */
 struct cn10k_ml_model {
 	/* Device reference */
-	struct cn10k_ml_dev *mldev;
+	struct cnxk_ml_dev *mldev;
 
 	/* Name */
 	char name[RTE_ML_STR_MAX];
@@ -537,8 +539,8 @@ int cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t size);
 void cn10k_ml_model_metadata_update(struct cn10k_ml_model_metadata *metadata);
 void cn10k_ml_model_addr_update(struct cn10k_ml_model *model, uint8_t *buffer,
 				uint8_t *base_dma_addr);
-int cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *mldev, uint16_t model_id, uint8_t *buffer,
-				   uint16_t *wb_pages, uint16_t *scratch_pages);
+int cn10k_ml_model_ocm_pages_count(struct cn10k_ml_dev *cn10k_mldev, uint16_t model_id,
+				   uint8_t *buffer, uint16_t *wb_pages, uint16_t *scratch_pages);
 void cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct cn10k_ml_model *model);
 
 #endif /* _CN10K_ML_MODEL_H_ */
diff --git a/drivers/ml/cnxk/cn10k_ml_ocm.c b/drivers/ml/cnxk/cn10k_ml_ocm.c
index 93505c9c09b..d0f716bccea 100644
--- a/drivers/ml/cnxk/cn10k_ml_ocm.c
+++ b/drivers/ml/cnxk/cn10k_ml_ocm.c
@@ -4,11 +4,13 @@
 
 #include <rte_mldev_pmd.h>
 
+#include <roc_api.h>
+
 #include "cn10k_ml_dev.h"
 #include "cn10k_ml_model.h"
 #include "cn10k_ml_ocm.h"
 
-#include "roc_api.h"
+#include "cnxk_ml_dev.h"
 
 /* OCM macros */
 #define BYTE_LEN	   8
@@ -217,7 +219,8 @@ int
 cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t wb_pages,
 			   uint16_t scratch_pages, uint64_t *tilemask)
 {
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_ocm *ocm;
 
 	uint16_t used_scratch_pages_max;
@@ -236,8 +239,9 @@ cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t w
 	int max_slot_sz;
 	int page_id;
 
-	mldev = dev->data->dev_private;
-	ocm = &mldev->ocm;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
+	ocm = &cn10k_mldev->ocm;
 
 	if (num_tiles > ML_CN10K_OCM_NUMTILES) {
 		plt_err("Invalid num_tiles = %u (> %u)", num_tiles, ML_CN10K_OCM_NUMTILES);
@@ -254,8 +258,8 @@ cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t w
 	tile_start = 0;
 	search_end_tile = ocm->num_tiles - num_tiles;
 
-	/* allocate for local ocm mask */
-	local_ocm_mask = rte_zmalloc("local_ocm_mask", mldev->ocm.mask_words, RTE_CACHE_LINE_SIZE);
+	/* Allocate for local ocm mask */
+	local_ocm_mask = rte_zmalloc("local_ocm_mask", ocm->mask_words, RTE_CACHE_LINE_SIZE);
 	if (local_ocm_mask == NULL) {
 		plt_err("Unable to allocate memory for local_ocm_mask");
 		return -1;
@@ -271,7 +275,7 @@ cn10k_ml_ocm_tilemask_find(struct rte_ml_dev *dev, uint8_t num_tiles, uint16_t w
 			PLT_MAX(ocm->tile_ocm_info[tile_id].last_wb_page, used_last_wb_page_max);
 	}
 
-	memset(local_ocm_mask, 0, mldev->ocm.mask_words);
+	memset(local_ocm_mask, 0, ocm->mask_words);
 	for (tile_id = tile_start; tile_id < tile_start + num_tiles; tile_id++) {
 		for (word_id = 0; word_id < ocm->mask_words; word_id++)
 			local_ocm_mask[word_id] |= ocm->tile_ocm_info[tile_id].ocm_mask[word_id];
@@ -333,8 +337,9 @@ void
 cn10k_ml_ocm_reserve_pages(struct rte_ml_dev *dev, uint16_t model_id, uint64_t tilemask,
 			   int wb_page_start, uint16_t wb_pages, uint16_t scratch_pages)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_ocm *ocm;
 
 	int scratch_page_start;
@@ -345,8 +350,9 @@ cn10k_ml_ocm_reserve_pages(struct rte_ml_dev *dev, uint16_t model_id, uint64_t t
 	int tile_id;
 	int page_id;
 
-	mldev = dev->data->dev_private;
-	ocm = &mldev->ocm;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
+	ocm = &cn10k_mldev->ocm;
 	model = dev->data->models[model_id];
 
 	/* Get first set bit, tile_start */
@@ -391,8 +397,9 @@ void
 cn10k_ml_ocm_free_pages(struct rte_ml_dev *dev, uint16_t model_id)
 {
 	struct cn10k_ml_model *local_model;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_ocm *ocm;
 
 	int scratch_resize_pages;
@@ -404,8 +411,9 @@ cn10k_ml_ocm_free_pages(struct rte_ml_dev *dev, uint16_t model_id)
 	int page_id;
 	uint16_t i;
 
-	mldev = dev->data->dev_private;
-	ocm = &mldev->ocm;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
+	ocm = &cn10k_mldev->ocm;
 	model = dev->data->models[model_id];
 
 	/* Update OCM info for WB memory */
@@ -453,35 +461,37 @@ cn10k_ml_ocm_pagemask_to_str(struct cn10k_ml_ocm_tile_info *tile_info, uint16_t
 	char *p = str;
 	int word;
 
-	/* add prefix 0x */
+	/* Add prefix 0x */
 	*p++ = '0';
 	*p++ = 'x';
 
-	/* build one word at a time */
+	/* Build hex string */
 	for (word = nwords - 1; word >= 0; word--) {
 		sprintf(p, "%02X", tile_info->ocm_mask[word]);
 		p += 2;
 	}
 
-	/* terminate */
+	/* Terminate */
 	*p++ = 0;
 }
 
 void
 cn10k_ml_ocm_print(struct rte_ml_dev *dev, FILE *fp)
 {
-	char *str;
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_ocm *ocm;
 	uint8_t tile_id;
 	uint8_t word_id;
 	int wb_pages;
+	char *str;
 
-	mldev = dev->data->dev_private;
-	ocm = &mldev->ocm;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
+	ocm = &cn10k_mldev->ocm;
 
-	/* nibbles + prefix '0x' */
-	str = rte_zmalloc("ocm_mask_str", mldev->ocm.num_pages / 4 + 2, RTE_CACHE_LINE_SIZE);
+	/* Nibbles + prefix '0x' */
+	str = rte_zmalloc("ocm_mask_str", ocm->num_pages / 4 + 2, RTE_CACHE_LINE_SIZE);
 	if (str == NULL) {
 		plt_err("Unable to allocate memory for ocm_mask_str");
 		return;
@@ -492,7 +502,7 @@ cn10k_ml_ocm_print(struct rte_ml_dev *dev, FILE *fp)
 		cn10k_ml_ocm_pagemask_to_str(&ocm->tile_ocm_info[tile_id], ocm->mask_words, str);
 
 		wb_pages = 0 - ocm->tile_ocm_info[tile_id].scratch_pages;
-		for (word_id = 0; word_id < mldev->ocm.mask_words; word_id++)
+		for (word_id = 0; word_id < ocm->mask_words; word_id++)
 			wb_pages +=
 				__builtin_popcount(ocm->tile_ocm_info[tile_id].ocm_mask[word_id]);
 
diff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c
index 11531afd8c1..3385bf50c0d 100644
--- a/drivers/ml/cnxk/cn10k_ml_ops.c
+++ b/drivers/ml/cnxk/cn10k_ml_ops.c
@@ -11,6 +11,8 @@
 #include "cn10k_ml_model.h"
 #include "cn10k_ml_ops.h"
 
+#include "cnxk_ml_dev.h"
+
 /* ML model macros */
 #define CN10K_ML_MODEL_MEMZONE_NAME "ml_cn10k_model_mz"
 
@@ -85,7 +87,7 @@ cn10k_ml_set_poll_addr(struct cn10k_ml_req *req)
 static inline void
 cn10k_ml_set_poll_ptr(struct cn10k_ml_req *req)
 {
-	plt_write64(ML_CN10K_POLL_JOB_START, req->compl_W1);
+	plt_write64(ML_CNXK_POLL_JOB_START, req->compl_W1);
 }
 
 static inline uint64_t
@@ -175,7 +177,7 @@ cn10k_ml_qp_create(const struct rte_ml_dev *dev, uint16_t qp_id, uint32_t nb_des
 	qp->queue.reqs = (struct cn10k_ml_req *)va;
 	qp->queue.head = 0;
 	qp->queue.tail = 0;
-	qp->queue.wait_cycles = ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();
+	qp->queue.wait_cycles = ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();
 	qp->nb_desc = nb_desc;
 	qp->stats.enqueued_count = 0;
 	qp->stats.dequeued_count = 0;
@@ -199,16 +201,17 @@ cn10k_ml_qp_create(const struct rte_ml_dev *dev, uint16_t qp_id, uint32_t nb_des
 static void
 cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)
 {
-
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_ocm *ocm;
 	char str[STR_LEN];
 	uint8_t i;
 	uint8_t j;
 
-	mldev = dev->data->dev_private;
-	ocm = &mldev->ocm;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
+	ocm = &cn10k_mldev->ocm;
 	model = dev->data->models[model_id];
 
 	/* Print debug info */
@@ -249,7 +252,7 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)
 		fprintf(fp, "%*s : 0x%0*" PRIx64 "\n", FIELD_LEN, "tilemask",
 			ML_CN10K_OCM_NUMTILES / 4, model->model_mem_map.tilemask);
 		fprintf(fp, "%*s : 0x%" PRIx64 "\n", FIELD_LEN, "ocm_wb_start",
-			model->model_mem_map.wb_page_start * mldev->ocm.page_size);
+			model->model_mem_map.wb_page_start * cn10k_mldev->ocm.page_size);
 	}
 
 	fprintf(fp, "%*s : %u\n", FIELD_LEN, "num_inputs", model->metadata.model.num_input);
@@ -325,7 +328,7 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t model_id, FILE *fp)
 }
 
 static void
-cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_model *model,
+cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cn10k_ml_model *model,
 				struct cn10k_ml_req *req, enum cn10k_ml_job_type job_type)
 {
 	struct cn10k_ml_model_metadata *metadata;
@@ -340,7 +343,7 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_mode
 	req->jd.hdr.model_id = model->model_id;
 	req->jd.hdr.job_type = job_type;
 	req->jd.hdr.fp_flags = 0x0;
-	req->jd.hdr.result = roc_ml_addr_ap2mlip(&mldev->roc, &req->result);
+	req->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->result);
 
 	if (job_type == ML_CN10K_JOB_TYPE_MODEL_START) {
 		if (!model->metadata.model.ocm_relocatable)
@@ -350,9 +353,9 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_mode
 
 		req->jd.hdr.sp_flags |= ML_CN10K_SP_FLAGS_EXTENDED_LOAD_JD;
 		req->jd.model_start.extended_args =
-			PLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, &req->extended_args));
+			PLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->extended_args));
 		req->jd.model_start.model_dst_ddr_addr =
-			PLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, addr->init_run_addr));
+			PLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, addr->init_run_addr));
 		req->jd.model_start.model_init_offset = 0x0;
 		req->jd.model_start.model_main_offset = metadata->init_model.file_size;
 		req->jd.model_start.model_finish_offset =
@@ -372,7 +375,7 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_mode
 		req->jd.model_start.ocm_wb_range_start = metadata->model.ocm_wb_range_start;
 		req->jd.model_start.ocm_wb_range_end = metadata->model.ocm_wb_range_end;
 		req->jd.model_start.ddr_wb_base_address = PLT_U64_CAST(roc_ml_addr_ap2mlip(
-			&mldev->roc,
+			&cn10k_mldev->roc,
 			PLT_PTR_ADD(addr->finish_load_addr, metadata->finish_model.file_size)));
 		req->jd.model_start.ddr_wb_range_start = metadata->model.ddr_wb_range_start;
 		req->jd.model_start.ddr_wb_range_end = metadata->model.ddr_wb_range_end;
@@ -383,7 +386,7 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_mode
 		req->jd.model_start.output.s.ddr_range_end = metadata->model.ddr_output_range_end;
 
 		req->extended_args.start.ddr_scratch_base_address = PLT_U64_CAST(
-			roc_ml_addr_ap2mlip(&mldev->roc, model->addr.scratch_base_addr));
+			roc_ml_addr_ap2mlip(&cn10k_mldev->roc, model->addr.scratch_base_addr));
 		req->extended_args.start.ddr_scratch_range_start =
 			metadata->model.ddr_scratch_range_start;
 		req->extended_args.start.ddr_scratch_range_end =
@@ -392,24 +395,20 @@ cn10k_ml_prep_sp_job_descriptor(struct cn10k_ml_dev *mldev, struct cn10k_ml_mode
 }
 
 static __rte_always_inline void
-cn10k_ml_prep_fp_job_descriptor(struct rte_ml_dev *dev, struct cn10k_ml_req *req,
+cn10k_ml_prep_fp_job_descriptor(struct cn10k_ml_dev *cn10k_mldev, struct cn10k_ml_req *req,
 				struct rte_ml_op *op)
 {
-	struct cn10k_ml_dev *mldev;
-
-	mldev = dev->data->dev_private;
-
 	req->jd.hdr.jce.w0.u64 = 0;
 	req->jd.hdr.jce.w1.u64 = req->compl_W1;
 	req->jd.hdr.model_id = op->model_id;
 	req->jd.hdr.job_type = ML_CN10K_JOB_TYPE_MODEL_RUN;
 	req->jd.hdr.fp_flags = ML_FLAGS_POLL_COMPL;
 	req->jd.hdr.sp_flags = 0x0;
-	req->jd.hdr.result = roc_ml_addr_ap2mlip(&mldev->roc, &req->result);
+	req->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->result);
 	req->jd.model_run.input_ddr_addr =
-		PLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, op->input[0]->addr));
+		PLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, op->input[0]->addr));
 	req->jd.model_run.output_ddr_addr =
-		PLT_U64_CAST(roc_ml_addr_ap2mlip(&mldev->roc, op->output[0]->addr));
+		PLT_U64_CAST(roc_ml_addr_ap2mlip(&cn10k_mldev->roc, op->output[0]->addr));
 	req->jd.model_run.num_batches = op->nb_batches;
 }
 
@@ -436,66 +435,69 @@ static const struct xstat_info model_stats[] = {
 static int
 cn10k_ml_xstats_init(struct rte_ml_dev *dev)
 {
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	uint16_t nb_stats;
 	uint16_t stat_id;
 	uint16_t model;
 	uint16_t i;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 
 	/* Allocate memory for xstats entries. Don't allocate during reconfigure */
 	nb_stats = RTE_DIM(device_stats) + ML_CN10K_MAX_MODELS * RTE_DIM(model_stats);
-	if (mldev->xstats.entries == NULL)
-		mldev->xstats.entries = rte_zmalloc("cn10k_ml_xstats",
-						    sizeof(struct cn10k_ml_xstats_entry) * nb_stats,
-						    PLT_CACHE_LINE_SIZE);
+	if (cn10k_mldev->xstats.entries == NULL)
+		cn10k_mldev->xstats.entries = rte_zmalloc(
+			"cn10k_ml_xstats", sizeof(struct cn10k_ml_xstats_entry) * nb_stats,
+			PLT_CACHE_LINE_SIZE);
 
-	if (mldev->xstats.entries == NULL)
+	if (cn10k_mldev->xstats.entries == NULL)
 		return -ENOMEM;
 
 	/* Initialize device xstats */
 	stat_id = 0;
 	for (i = 0; i < RTE_DIM(device_stats); i++) {
-		mldev->xstats.entries[stat_id].map.id = stat_id;
-		snprintf(mldev->xstats.entries[stat_id].map.name,
-			 sizeof(mldev->xstats.entries[stat_id].map.name), "%s",
+		cn10k_mldev->xstats.entries[stat_id].map.id = stat_id;
+		snprintf(cn10k_mldev->xstats.entries[stat_id].map.name,
+			 sizeof(cn10k_mldev->xstats.entries[stat_id].map.name), "%s",
 			 device_stats[i].name);
 
-		mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_DEVICE;
-		mldev->xstats.entries[stat_id].type = device_stats[i].type;
-		mldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_DEVICE;
-		mldev->xstats.entries[stat_id].obj_idx = 0;
-		mldev->xstats.entries[stat_id].reset_allowed = device_stats[i].reset_allowed;
+		cn10k_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_DEVICE;
+		cn10k_mldev->xstats.entries[stat_id].type = device_stats[i].type;
+		cn10k_mldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_DEVICE;
+		cn10k_mldev->xstats.entries[stat_id].obj_idx = 0;
+		cn10k_mldev->xstats.entries[stat_id].reset_allowed = device_stats[i].reset_allowed;
 		stat_id++;
 	}
-	mldev->xstats.count_mode_device = stat_id;
+	cn10k_mldev->xstats.count_mode_device = stat_id;
 
 	/* Initialize model xstats */
 	for (model = 0; model < ML_CN10K_MAX_MODELS; model++) {
-		mldev->xstats.offset_for_model[model] = stat_id;
+		cn10k_mldev->xstats.offset_for_model[model] = stat_id;
 
 		for (i = 0; i < RTE_DIM(model_stats); i++) {
-			mldev->xstats.entries[stat_id].map.id = stat_id;
-			mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_MODEL;
-			mldev->xstats.entries[stat_id].type = model_stats[i].type;
-			mldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_MODEL;
-			mldev->xstats.entries[stat_id].obj_idx = model;
-			mldev->xstats.entries[stat_id].reset_allowed = model_stats[i].reset_allowed;
+			cn10k_mldev->xstats.entries[stat_id].map.id = stat_id;
+			cn10k_mldev->xstats.entries[stat_id].mode = RTE_ML_DEV_XSTATS_MODEL;
+			cn10k_mldev->xstats.entries[stat_id].type = model_stats[i].type;
+			cn10k_mldev->xstats.entries[stat_id].fn_id = CN10K_ML_XSTATS_FN_MODEL;
+			cn10k_mldev->xstats.entries[stat_id].obj_idx = model;
+			cn10k_mldev->xstats.entries[stat_id].reset_allowed =
+				model_stats[i].reset_allowed;
 
 			/* Name of xstat is updated during model load */
-			snprintf(mldev->xstats.entries[stat_id].map.name,
-				 sizeof(mldev->xstats.entries[stat_id].map.name), "Model-%u-%s",
-				 model, model_stats[i].name);
+			snprintf(cn10k_mldev->xstats.entries[stat_id].map.name,
+				 sizeof(cn10k_mldev->xstats.entries[stat_id].map.name),
+				 "Model-%u-%s", model, model_stats[i].name);
 
 			stat_id++;
 		}
 
-		mldev->xstats.count_per_model[model] = RTE_DIM(model_stats);
+		cn10k_mldev->xstats.count_per_model[model] = RTE_DIM(model_stats);
 	}
 
-	mldev->xstats.count_mode_model = stat_id - mldev->xstats.count_mode_device;
-	mldev->xstats.count = stat_id;
+	cn10k_mldev->xstats.count_mode_model = stat_id - cn10k_mldev->xstats.count_mode_device;
+	cn10k_mldev->xstats.count = stat_id;
 
 	return 0;
 }
@@ -503,28 +505,32 @@ cn10k_ml_xstats_init(struct rte_ml_dev *dev)
 static void
 cn10k_ml_xstats_uninit(struct rte_ml_dev *dev)
 {
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 
-	rte_free(mldev->xstats.entries);
-	mldev->xstats.entries = NULL;
+	rte_free(cn10k_mldev->xstats.entries);
+	cn10k_mldev->xstats.entries = NULL;
 
-	mldev->xstats.count = 0;
+	cn10k_mldev->xstats.count = 0;
 }
 
 static void
 cn10k_ml_xstats_model_name_update(struct rte_ml_dev *dev, uint16_t model_id)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	uint16_t rclk_freq;
 	uint16_t sclk_freq;
 	uint16_t stat_id;
 	char suffix[8];
 	uint16_t i;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 	model = dev->data->models[model_id];
 	stat_id = RTE_DIM(device_stats) + model_id * RTE_DIM(model_stats);
 
@@ -536,8 +542,8 @@ cn10k_ml_xstats_model_name_update(struct rte_ml_dev *dev, uint16_t model_id)
 
 	/* Update xstat name based on model name and sclk availability */
 	for (i = 0; i < RTE_DIM(model_stats); i++) {
-		snprintf(mldev->xstats.entries[stat_id].map.name,
-			 sizeof(mldev->xstats.entries[stat_id].map.name), "%s-%s-%s",
+		snprintf(cn10k_mldev->xstats.entries[stat_id].map.name,
+			 sizeof(cn10k_mldev->xstats.entries[stat_id].map.name), "%s-%s-%s",
 			 model->metadata.model.name, model_stats[i].name, suffix);
 		stat_id++;
 	}
@@ -547,19 +553,19 @@ static uint64_t
 cn10k_ml_dev_xstat_get(struct rte_ml_dev *dev, uint16_t obj_idx __rte_unused,
 		       enum cn10k_ml_xstats_type type)
 {
-	struct cn10k_ml_dev *mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
 
 	switch (type) {
 	case nb_models_loaded:
-		return mldev->nb_models_loaded;
+		return cnxk_mldev->nb_models_loaded;
 	case nb_models_unloaded:
-		return mldev->nb_models_unloaded;
+		return cnxk_mldev->nb_models_unloaded;
 	case nb_models_started:
-		return mldev->nb_models_started;
+		return cnxk_mldev->nb_models_started;
 	case nb_models_stopped:
-		return mldev->nb_models_stopped;
+		return cnxk_mldev->nb_models_stopped;
 	default:
 		return -1;
 	}
@@ -651,15 +657,17 @@ static int
 cn10k_ml_device_xstats_reset(struct rte_ml_dev *dev, const uint16_t stat_ids[], uint16_t nb_ids)
 {
 	struct cn10k_ml_xstats_entry *xs;
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	uint16_t nb_stats;
 	uint16_t stat_id;
 	uint32_t i;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 
 	if (stat_ids == NULL)
-		nb_stats = mldev->xstats.count_mode_device;
+		nb_stats = cn10k_mldev->xstats.count_mode_device;
 	else
 		nb_stats = nb_ids;
 
@@ -669,10 +677,10 @@ cn10k_ml_device_xstats_reset(struct rte_ml_dev *dev, const uint16_t stat_ids[],
 		else
 			stat_id = stat_ids[i];
 
-		if (stat_id >= mldev->xstats.count_mode_device)
+		if (stat_id >= cn10k_mldev->xstats.count_mode_device)
 			return -EINVAL;
 
-		xs = &mldev->xstats.entries[stat_id];
+		xs = &cn10k_mldev->xstats.entries[stat_id];
 		if (!xs->reset_allowed)
 			continue;
 
@@ -740,15 +748,17 @@ cn10k_ml_model_xstats_reset(struct rte_ml_dev *dev, int32_t model_id, const uint
 			    uint16_t nb_ids)
 {
 	struct cn10k_ml_xstats_entry *xs;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	int32_t lcl_model_id = 0;
 	uint16_t start_id;
 	uint16_t end_id;
 	int32_t i;
 	int32_t j;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 	for (i = 0; i < ML_CN10K_MAX_MODELS; i++) {
 		if (model_id == -1) {
 			model = dev->data->models[i];
@@ -765,12 +775,13 @@ cn10k_ml_model_xstats_reset(struct rte_ml_dev *dev, int32_t model_id, const uint
 			}
 		}
 
-		start_id = mldev->xstats.offset_for_model[i];
-		end_id = mldev->xstats.offset_for_model[i] + mldev->xstats.count_per_model[i] - 1;
+		start_id = cn10k_mldev->xstats.offset_for_model[i];
+		end_id = cn10k_mldev->xstats.offset_for_model[i] +
+			 cn10k_mldev->xstats.count_per_model[i] - 1;
 
 		if (stat_ids == NULL) {
 			for (j = start_id; j <= end_id; j++) {
-				xs = &mldev->xstats.entries[j];
+				xs = &cn10k_mldev->xstats.entries[j];
 				cn10k_ml_reset_model_stat(dev, i, xs->type);
 			}
 		} else {
@@ -780,7 +791,7 @@ cn10k_ml_model_xstats_reset(struct rte_ml_dev *dev, int32_t model_id, const uint
 						stat_ids[j], lcl_model_id);
 					return -EINVAL;
 				}
-				xs = &mldev->xstats.entries[stat_ids[j]];
+				xs = &cn10k_mldev->xstats.entries[stat_ids[j]];
 				cn10k_ml_reset_model_stat(dev, i, xs->type);
 			}
 		}
@@ -854,17 +865,19 @@ cn10k_ml_cache_model_data(struct rte_ml_dev *dev, uint16_t model_id)
 static int
 cn10k_ml_dev_info_get(struct rte_ml_dev *dev, struct rte_ml_dev_info *dev_info)
 {
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 
 	if (dev_info == NULL)
 		return -EINVAL;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 
 	memset(dev_info, 0, sizeof(struct rte_ml_dev_info));
 	dev_info->driver_name = dev->device->driver->name;
 	dev_info->max_models = ML_CN10K_MAX_MODELS;
-	if (mldev->hw_queue_lock)
+	if (cn10k_mldev->hw_queue_lock)
 		dev_info->max_queue_pairs = ML_CN10K_MAX_QP_PER_DEVICE_SL;
 	else
 		dev_info->max_queue_pairs = ML_CN10K_MAX_QP_PER_DEVICE_LF;
@@ -881,8 +894,9 @@ static int
 cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *conf)
 {
 	struct rte_ml_dev_info dev_info;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_ocm *ocm;
 	struct cn10k_ml_qp *qp;
 	uint16_t model_id;
@@ -895,7 +909,8 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c
 		return -EINVAL;
 
 	/* Get CN10K device handle */
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 
 	cn10k_ml_dev_info_get(dev, &dev_info);
 	if (conf->nb_models > dev_info.max_models) {
@@ -908,21 +923,21 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c
 		return -EINVAL;
 	}
 
-	if (mldev->state == ML_CN10K_DEV_STATE_PROBED) {
+	if (cnxk_mldev->state == ML_CNXK_DEV_STATE_PROBED) {
 		plt_ml_dbg("Configuring ML device, nb_queue_pairs = %u, nb_models = %u",
 			   conf->nb_queue_pairs, conf->nb_models);
 
 		/* Load firmware */
-		ret = cn10k_ml_fw_load(mldev);
+		ret = cn10k_ml_fw_load(cnxk_mldev);
 		if (ret != 0)
 			return ret;
-	} else if (mldev->state == ML_CN10K_DEV_STATE_CONFIGURED) {
+	} else if (cnxk_mldev->state == ML_CNXK_DEV_STATE_CONFIGURED) {
 		plt_ml_dbg("Re-configuring ML device, nb_queue_pairs = %u, nb_models = %u",
 			   conf->nb_queue_pairs, conf->nb_models);
-	} else if (mldev->state == ML_CN10K_DEV_STATE_STARTED) {
+	} else if (cnxk_mldev->state == ML_CNXK_DEV_STATE_STARTED) {
 		plt_err("Device can't be reconfigured in started state\n");
 		return -ENOTSUP;
-	} else if (mldev->state == ML_CN10K_DEV_STATE_CLOSED) {
+	} else if (cnxk_mldev->state == ML_CNXK_DEV_STATE_CLOSED) {
 		plt_err("Device can't be reconfigured after close\n");
 		return -ENOTSUP;
 	}
@@ -1013,10 +1028,10 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c
 	}
 	dev->data->nb_models = conf->nb_models;
 
-	ocm = &mldev->ocm;
+	ocm = &cn10k_mldev->ocm;
 	ocm->num_tiles = ML_CN10K_OCM_NUMTILES;
 	ocm->size_per_tile = ML_CN10K_OCM_TILESIZE;
-	ocm->page_size = mldev->ocm_page_size;
+	ocm->page_size = cn10k_mldev->ocm_page_size;
 	ocm->num_pages = ocm->size_per_tile / ocm->page_size;
 	ocm->mask_words = ocm->num_pages / (8 * sizeof(uint8_t));
 
@@ -1044,25 +1059,25 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c
 	}
 
 	/* Set JCMDQ enqueue function */
-	if (mldev->hw_queue_lock == 1)
-		mldev->ml_jcmdq_enqueue = roc_ml_jcmdq_enqueue_sl;
+	if (cn10k_mldev->hw_queue_lock == 1)
+		cn10k_mldev->ml_jcmdq_enqueue = roc_ml_jcmdq_enqueue_sl;
 	else
-		mldev->ml_jcmdq_enqueue = roc_ml_jcmdq_enqueue_lf;
+		cn10k_mldev->ml_jcmdq_enqueue = roc_ml_jcmdq_enqueue_lf;
 
 	/* Set polling function pointers */
-	mldev->set_poll_addr = cn10k_ml_set_poll_addr;
-	mldev->set_poll_ptr = cn10k_ml_set_poll_ptr;
-	mldev->get_poll_ptr = cn10k_ml_get_poll_ptr;
+	cn10k_mldev->set_poll_addr = cn10k_ml_set_poll_addr;
+	cn10k_mldev->set_poll_ptr = cn10k_ml_set_poll_ptr;
+	cn10k_mldev->get_poll_ptr = cn10k_ml_get_poll_ptr;
 
 	dev->enqueue_burst = cn10k_ml_enqueue_burst;
 	dev->dequeue_burst = cn10k_ml_dequeue_burst;
 	dev->op_error_get = cn10k_ml_op_error_get;
 
-	mldev->nb_models_loaded = 0;
-	mldev->nb_models_started = 0;
-	mldev->nb_models_stopped = 0;
-	mldev->nb_models_unloaded = 0;
-	mldev->state = ML_CN10K_DEV_STATE_CONFIGURED;
+	cnxk_mldev->nb_models_loaded = 0;
+	cnxk_mldev->nb_models_started = 0;
+	cnxk_mldev->nb_models_stopped = 0;
+	cnxk_mldev->nb_models_unloaded = 0;
+	cnxk_mldev->state = ML_CNXK_DEV_STATE_CONFIGURED;
 
 	return 0;
 
@@ -1077,8 +1092,9 @@ cn10k_ml_dev_configure(struct rte_ml_dev *dev, const struct rte_ml_dev_config *c
 static int
 cn10k_ml_dev_close(struct rte_ml_dev *dev)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_qp *qp;
 	uint16_t model_id;
 	uint16_t qp_id;
@@ -1086,10 +1102,11 @@ cn10k_ml_dev_close(struct rte_ml_dev *dev)
 	if (dev == NULL)
 		return -EINVAL;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 
 	/* Release ocm_mask memory */
-	rte_free(mldev->ocm.ocm_mask);
+	rte_free(cn10k_mldev->ocm.ocm_mask);
 
 	/* Stop and unload all models */
 	for (model_id = 0; model_id < dev->data->nb_models; model_id++) {
@@ -1125,21 +1142,21 @@ cn10k_ml_dev_close(struct rte_ml_dev *dev)
 	cn10k_ml_xstats_uninit(dev);
 
 	/* Unload firmware */
-	cn10k_ml_fw_unload(mldev);
+	cn10k_ml_fw_unload(cnxk_mldev);
 
 	/* Clear scratch registers */
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_WORK_PTR);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_FW_CTRL);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);
-	roc_ml_reg_write64(&mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_WORK_PTR);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_FW_CTRL);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C0);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C0);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_HEAD_C1);
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_SCRATCH_DBG_BUFFER_TAIL_C1);
 
 	/* Reset ML_MLR_BASE */
-	roc_ml_reg_write64(&mldev->roc, 0, ML_MLR_BASE);
-	plt_ml_dbg("ML_MLR_BASE = 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_MLR_BASE));
+	roc_ml_reg_write64(&cn10k_mldev->roc, 0, ML_MLR_BASE);
+	plt_ml_dbg("ML_MLR_BASE = 0x%016lx", roc_ml_reg_read64(&cn10k_mldev->roc, ML_MLR_BASE));
 
-	mldev->state = ML_CN10K_DEV_STATE_CLOSED;
+	cnxk_mldev->state = ML_CNXK_DEV_STATE_CLOSED;
 
 	/* Remove PCI device */
 	return rte_dev_remove(dev->device);
@@ -1148,17 +1165,19 @@ cn10k_ml_dev_close(struct rte_ml_dev *dev)
 static int
 cn10k_ml_dev_start(struct rte_ml_dev *dev)
 {
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	uint64_t reg_val64;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 
-	reg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);
+	reg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);
 	reg_val64 |= ROC_ML_CFG_ENA;
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);
-	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_CFG));
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);
+	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));
 
-	mldev->state = ML_CN10K_DEV_STATE_STARTED;
+	cnxk_mldev->state = ML_CNXK_DEV_STATE_STARTED;
 
 	return 0;
 }
@@ -1166,17 +1185,19 @@ cn10k_ml_dev_start(struct rte_ml_dev *dev)
 static int
 cn10k_ml_dev_stop(struct rte_ml_dev *dev)
 {
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	uint64_t reg_val64;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 
-	reg_val64 = roc_ml_reg_read64(&mldev->roc, ML_CFG);
+	reg_val64 = roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG);
 	reg_val64 &= ~ROC_ML_CFG_ENA;
-	roc_ml_reg_write64(&mldev->roc, reg_val64, ML_CFG);
-	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&mldev->roc, ML_CFG));
+	roc_ml_reg_write64(&cn10k_mldev->roc, reg_val64, ML_CFG);
+	plt_ml_dbg("ML_CFG => 0x%016lx", roc_ml_reg_read64(&cn10k_mldev->roc, ML_CFG));
 
-	mldev->state = ML_CN10K_DEV_STATE_CONFIGURED;
+	cnxk_mldev->state = ML_CNXK_DEV_STATE_CONFIGURED;
 
 	return 0;
 }
@@ -1259,22 +1280,24 @@ cn10k_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mod
 			      int32_t model_id, struct rte_ml_dev_xstats_map *xstats_map,
 			      uint32_t size)
 {
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	uint32_t xstats_mode_count;
 	uint32_t idx = 0;
 	uint32_t i;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 
 	xstats_mode_count = 0;
 	switch (mode) {
 	case RTE_ML_DEV_XSTATS_DEVICE:
-		xstats_mode_count = mldev->xstats.count_mode_device;
+		xstats_mode_count = cn10k_mldev->xstats.count_mode_device;
 		break;
 	case RTE_ML_DEV_XSTATS_MODEL:
 		if (model_id >= ML_CN10K_MAX_MODELS)
 			break;
-		xstats_mode_count = mldev->xstats.count_per_model[model_id];
+		xstats_mode_count = cn10k_mldev->xstats.count_per_model[model_id];
 		break;
 	default:
 		return -EINVAL;
@@ -1283,16 +1306,17 @@ cn10k_ml_dev_xstats_names_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mod
 	if (xstats_mode_count > size || xstats_map == NULL)
 		return xstats_mode_count;
 
-	for (i = 0; i < mldev->xstats.count && idx < size; i++) {
-		if (mldev->xstats.entries[i].mode != mode)
+	for (i = 0; i < cn10k_mldev->xstats.count && idx < size; i++) {
+		if (cn10k_mldev->xstats.entries[i].mode != mode)
 			continue;
 
 		if (mode != RTE_ML_DEV_XSTATS_DEVICE &&
-		    model_id != mldev->xstats.entries[i].obj_idx)
+		    model_id != cn10k_mldev->xstats.entries[i].obj_idx)
 			continue;
 
-		strncpy(xstats_map[idx].name, mldev->xstats.entries[i].map.name, RTE_ML_STR_MAX);
-		xstats_map[idx].id = mldev->xstats.entries[i].map.id;
+		strncpy(xstats_map[idx].name, cn10k_mldev->xstats.entries[i].map.name,
+			RTE_ML_STR_MAX);
+		xstats_map[idx].id = cn10k_mldev->xstats.entries[i].map.id;
 		idx++;
 	}
 
@@ -1304,13 +1328,15 @@ cn10k_ml_dev_xstats_by_name_get(struct rte_ml_dev *dev, const char *name, uint16
 				uint64_t *value)
 {
 	struct cn10k_ml_xstats_entry *xs;
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	cn10k_ml_xstats_fn fn;
 	uint32_t i;
 
-	mldev = dev->data->dev_private;
-	for (i = 0; i < mldev->xstats.count; i++) {
-		xs = &mldev->xstats.entries[i];
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
+	for (i = 0; i < cn10k_mldev->xstats.count; i++) {
+		xs = &cn10k_mldev->xstats.entries[i];
 		if (strncmp(xs->map.name, name, RTE_ML_STR_MAX) == 0) {
 			if (stat_id != NULL)
 				*stat_id = xs->map.id;
@@ -1344,24 +1370,26 @@ cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode
 			const uint16_t stat_ids[], uint64_t values[], uint16_t nb_ids)
 {
 	struct cn10k_ml_xstats_entry *xs;
-	struct cn10k_ml_dev *mldev;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	uint32_t xstats_mode_count;
 	cn10k_ml_xstats_fn fn;
 	uint64_t val;
 	uint32_t idx;
 	uint32_t i;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 	xstats_mode_count = 0;
 
 	switch (mode) {
 	case RTE_ML_DEV_XSTATS_DEVICE:
-		xstats_mode_count = mldev->xstats.count_mode_device;
+		xstats_mode_count = cn10k_mldev->xstats.count_mode_device;
 		break;
 	case RTE_ML_DEV_XSTATS_MODEL:
 		if (model_id >= ML_CN10K_MAX_MODELS)
 			return -EINVAL;
-		xstats_mode_count = mldev->xstats.count_per_model[model_id];
+		xstats_mode_count = cn10k_mldev->xstats.count_per_model[model_id];
 		break;
 	default:
 		return -EINVAL;
@@ -1369,8 +1397,8 @@ cn10k_ml_dev_xstats_get(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mode
 
 	idx = 0;
 	for (i = 0; i < nb_ids && idx < xstats_mode_count; i++) {
-		xs = &mldev->xstats.entries[stat_ids[i]];
-		if (stat_ids[i] > mldev->xstats.count || xs->mode != mode)
+		xs = &cn10k_mldev->xstats.entries[stat_ids[i]];
+		if (stat_ids[i] > cn10k_mldev->xstats.count || xs->mode != mode)
 			continue;
 
 		if (mode == RTE_ML_DEV_XSTATS_MODEL && model_id != xs->obj_idx) {
@@ -1418,8 +1446,9 @@ cn10k_ml_dev_xstats_reset(struct rte_ml_dev *dev, enum rte_ml_dev_xstats_mode mo
 static int
 cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_fw *fw;
 
 	uint32_t head_loc;
@@ -1432,8 +1461,9 @@ cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)
 	if (roc_env_is_asim())
 		return 0;
 
-	mldev = dev->data->dev_private;
-	fw = &mldev->fw;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
+	fw = &cn10k_mldev->fw;
 
 	/* Dump model info */
 	for (model_id = 0; model_id < dev->data->nb_models; model_id++) {
@@ -1451,15 +1481,19 @@ cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)
 	for (core_id = 0; core_id <= 1; core_id++) {
 		bufsize = fw->req->jd.fw_load.debug.debug_buffer_size;
 		if (core_id == 0) {
-			head_loc = roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_DBG_BUFFER_HEAD_C0);
-			tail_loc = roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_DBG_BUFFER_TAIL_C0);
+			head_loc =
+				roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_HEAD_C0);
+			tail_loc =
+				roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_TAIL_C0);
 			head_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core0_debug_ptr);
-			head_ptr = roc_ml_addr_mlip2ap(&mldev->roc, head_ptr);
+			head_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);
 		} else {
-			head_loc = roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_DBG_BUFFER_HEAD_C1);
-			tail_loc = roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_DBG_BUFFER_TAIL_C1);
+			head_loc =
+				roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_HEAD_C1);
+			tail_loc =
+				roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_DBG_BUFFER_TAIL_C1);
 			head_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core1_debug_ptr);
-			head_ptr = roc_ml_addr_mlip2ap(&mldev->roc, head_ptr);
+			head_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);
 		}
 		if (head_loc < tail_loc) {
 			fprintf(fp, "%.*s\n", tail_loc - head_loc, &head_ptr[head_loc]);
@@ -1473,18 +1507,18 @@ cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)
 	for (core_id = 0; core_id <= 1; core_id++) {
 		bufsize = fw->req->jd.fw_load.debug.exception_state_size;
 		if ((core_id == 0) &&
-		    (roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0) != 0)) {
+		    (roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0) != 0)) {
 			head_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core0_exception_buffer);
 			fprintf(fp, "ML_SCRATCH_EXCEPTION_SP_C0 = 0x%016lx",
-				roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0));
-			head_ptr = roc_ml_addr_mlip2ap(&mldev->roc, head_ptr);
+				roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0));
+			head_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);
 			fprintf(fp, "%.*s", bufsize, head_ptr);
-		} else if ((core_id == 1) &&
-			   (roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1) != 0)) {
+		} else if ((core_id == 1) && (roc_ml_reg_read64(&cn10k_mldev->roc,
+								ML_SCRATCH_EXCEPTION_SP_C1) != 0)) {
 			head_ptr = PLT_PTR_CAST(fw->req->jd.fw_load.debug.core1_exception_buffer);
 			fprintf(fp, "ML_SCRATCH_EXCEPTION_SP_C1 = 0x%016lx",
-				roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1));
-			head_ptr = roc_ml_addr_mlip2ap(&mldev->roc, head_ptr);
+				roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1));
+			head_ptr = roc_ml_addr_mlip2ap(&cn10k_mldev->roc, head_ptr);
 			fprintf(fp, "%.*s", bufsize, head_ptr);
 		}
 	}
@@ -1495,14 +1529,16 @@ cn10k_ml_dev_dump(struct rte_ml_dev *dev, FILE *fp)
 static int
 cn10k_ml_dev_selftest(struct rte_ml_dev *dev)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	const struct plt_memzone *mz;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_req *req;
 	uint64_t timeout_cycle;
 	bool timeout;
 	int ret;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 	mz = plt_memzone_reserve_aligned("dev_selftest", sizeof(struct cn10k_ml_req), 0,
 					 ML_CN10K_ALIGN_SIZE);
 	if (mz == NULL) {
@@ -1515,20 +1551,20 @@ cn10k_ml_dev_selftest(struct rte_ml_dev *dev)
 	memset(&req->jd, 0, sizeof(struct cn10k_ml_jd));
 	req->jd.hdr.jce.w1.u64 = PLT_U64_CAST(&req->status);
 	req->jd.hdr.job_type = ML_CN10K_JOB_TYPE_FIRMWARE_SELFTEST;
-	req->jd.hdr.result = roc_ml_addr_ap2mlip(&mldev->roc, &req->result);
-	req->jd.fw_load.flags = cn10k_ml_fw_flags_get(&mldev->fw);
-	plt_write64(ML_CN10K_POLL_JOB_START, &req->status);
+	req->jd.hdr.result = roc_ml_addr_ap2mlip(&cn10k_mldev->roc, &req->result);
+	req->jd.fw_load.flags = cn10k_ml_fw_flags_get(&cn10k_mldev->fw);
+	plt_write64(ML_CNXK_POLL_JOB_START, &req->status);
 	plt_wmb();
 
 	/* Enqueue firmware selftest request through scratch registers */
 	timeout = true;
-	timeout_cycle = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();
-	roc_ml_scratch_enqueue(&mldev->roc, &req->jd);
+	timeout_cycle = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();
+	roc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->jd);
 
 	plt_rmb();
 	do {
-		if (roc_ml_scratch_is_done_bit_set(&mldev->roc) &&
-		    (plt_read64(&req->status) == ML_CN10K_POLL_JOB_FINISH)) {
+		if (roc_ml_scratch_is_done_bit_set(&cn10k_mldev->roc) &&
+		    (plt_read64(&req->status) == ML_CNXK_POLL_JOB_FINISH)) {
 			timeout = false;
 			break;
 		}
@@ -1552,8 +1588,8 @@ int
 cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params, uint16_t *model_id)
 {
 	struct cn10k_ml_model_metadata *metadata;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 
 	char str[RTE_MEMZONE_NAMESIZE];
 	const struct plt_memzone *mz;
@@ -1574,7 +1610,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	if (ret != 0)
 		return ret;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
 
 	/* Find model ID */
 	found = false;
@@ -1591,7 +1627,8 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	}
 
 	/* Get WB and scratch pages, check if model can be loaded. */
-	ret = cn10k_ml_model_ocm_pages_count(mldev, idx, params->addr, &wb_pages, &scratch_pages);
+	ret = cn10k_ml_model_ocm_pages_count(&cnxk_mldev->cn10k_mldev, idx, params->addr, &wb_pages,
+					     &scratch_pages);
 	if (ret < 0)
 		return ret;
 
@@ -1623,7 +1660,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	}
 
 	model = mz->addr;
-	model->mldev = mldev;
+	model->mldev = cnxk_mldev;
 	model->model_id = idx;
 
 	rte_memcpy(&model->metadata, params->addr, sizeof(struct cn10k_ml_model_metadata));
@@ -1680,7 +1717,7 @@ cn10k_ml_model_load(struct rte_ml_dev *dev, struct rte_ml_model_params *params,
 	plt_spinlock_init(&model->lock);
 	model->state = ML_CN10K_MODEL_STATE_LOADED;
 	dev->data->models[idx] = model;
-	mldev->nb_models_loaded++;
+	cnxk_mldev->nb_models_loaded++;
 
 	/* Update xstats names */
 	cn10k_ml_xstats_model_name_update(dev, idx);
@@ -1695,9 +1732,9 @@ cn10k_ml_model_unload(struct rte_ml_dev *dev, uint16_t model_id)
 {
 	char str[RTE_MEMZONE_NAMESIZE];
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
 	model = dev->data->models[model_id];
 
 	if (model == NULL) {
@@ -1711,7 +1748,7 @@ cn10k_ml_model_unload(struct rte_ml_dev *dev, uint16_t model_id)
 	}
 
 	dev->data->models[model_id] = NULL;
-	mldev->nb_models_unloaded++;
+	cnxk_mldev->nb_models_unloaded++;
 
 	snprintf(str, RTE_MEMZONE_NAMESIZE, "%s_%u", CN10K_ML_MODEL_MEMZONE_NAME, model_id);
 	return plt_memzone_free(plt_memzone_lookup(str));
@@ -1720,8 +1757,9 @@ cn10k_ml_model_unload(struct rte_ml_dev *dev, uint16_t model_id)
 int
 cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_ocm *ocm;
 	struct cn10k_ml_req *req;
 
@@ -1735,8 +1773,9 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)
 	bool locked;
 	int ret = 0;
 
-	mldev = dev->data->dev_private;
-	ocm = &mldev->ocm;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
+	ocm = &cn10k_mldev->ocm;
 	model = dev->data->models[model_id];
 
 	if (model == NULL) {
@@ -1746,11 +1785,11 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)
 
 	/* Prepare JD */
 	req = model->req;
-	cn10k_ml_prep_sp_job_descriptor(mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_START);
+	cn10k_ml_prep_sp_job_descriptor(cn10k_mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_START);
 	req->result.error_code.u64 = 0x0;
 	req->result.user_ptr = NULL;
 
-	plt_write64(ML_CN10K_POLL_JOB_START, &req->status);
+	plt_write64(ML_CNXK_POLL_JOB_START, &req->status);
 	plt_wmb();
 
 	num_tiles = model->metadata.model.tile_end - model->metadata.model.tile_start + 1;
@@ -1815,26 +1854,26 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)
 	job_dequeued = false;
 	do {
 		if (!job_enqueued) {
-			req->timeout = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();
-			job_enqueued = roc_ml_scratch_enqueue(&mldev->roc, &req->jd);
+			req->timeout = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();
+			job_enqueued = roc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->jd);
 		}
 
 		if (job_enqueued && !job_dequeued)
-			job_dequeued = roc_ml_scratch_dequeue(&mldev->roc, &req->jd);
+			job_dequeued = roc_ml_scratch_dequeue(&cn10k_mldev->roc, &req->jd);
 
 		if (job_dequeued)
 			break;
 	} while (plt_tsc_cycles() < req->timeout);
 
 	if (job_dequeued) {
-		if (plt_read64(&req->status) == ML_CN10K_POLL_JOB_FINISH) {
+		if (plt_read64(&req->status) == ML_CNXK_POLL_JOB_FINISH) {
 			if (req->result.error_code.u64 == 0)
 				ret = 0;
 			else
 				ret = -1;
 		}
 	} else { /* Reset scratch registers */
-		roc_ml_scratch_queue_reset(&mldev->roc);
+		roc_ml_scratch_queue_reset(&cn10k_mldev->roc);
 		ret = -ETIME;
 	}
 
@@ -1843,7 +1882,7 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)
 		if (plt_spinlock_trylock(&model->lock) != 0) {
 			if (ret == 0) {
 				model->state = ML_CN10K_MODEL_STATE_STARTED;
-				mldev->nb_models_started++;
+				cnxk_mldev->nb_models_started++;
 			} else {
 				model->state = ML_CN10K_MODEL_STATE_UNKNOWN;
 			}
@@ -1867,7 +1906,7 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)
 	if (ret < 0) { /* Call unload to update model and FW state, ignore error */
 		rte_ml_model_stop(dev->data->dev_id, model_id);
 	} else {
-		if (mldev->cache_model_data && roc_model_is_cn10ka())
+		if (cn10k_mldev->cache_model_data && roc_model_is_cn10ka())
 			ret = cn10k_ml_cache_model_data(dev, model_id);
 	}
 
@@ -1877,8 +1916,9 @@ cn10k_ml_model_start(struct rte_ml_dev *dev, uint16_t model_id)
 int
 cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_ocm *ocm;
 	struct cn10k_ml_req *req;
 
@@ -1887,8 +1927,9 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)
 	bool locked;
 	int ret = 0;
 
-	mldev = dev->data->dev_private;
-	ocm = &mldev->ocm;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
+	ocm = &cn10k_mldev->ocm;
 	model = dev->data->models[model_id];
 
 	if (model == NULL) {
@@ -1898,11 +1939,11 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)
 
 	/* Prepare JD */
 	req = model->req;
-	cn10k_ml_prep_sp_job_descriptor(mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_STOP);
+	cn10k_ml_prep_sp_job_descriptor(cn10k_mldev, model, req, ML_CN10K_JOB_TYPE_MODEL_STOP);
 	req->result.error_code.u64 = 0x0;
 	req->result.user_ptr = NULL;
 
-	plt_write64(ML_CN10K_POLL_JOB_START, &req->status);
+	plt_write64(ML_CNXK_POLL_JOB_START, &req->status);
 	plt_wmb();
 
 	locked = false;
@@ -1941,33 +1982,33 @@ cn10k_ml_model_stop(struct rte_ml_dev *dev, uint16_t model_id)
 	job_dequeued = false;
 	do {
 		if (!job_enqueued) {
-			req->timeout = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();
-			job_enqueued = roc_ml_scratch_enqueue(&mldev->roc, &req->jd);
+			req->timeout = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();
+			job_enqueued = roc_ml_scratch_enqueue(&cn10k_mldev->roc, &req->jd);
 		}
 
 		if (job_enqueued && !job_dequeued)
-			job_dequeued = roc_ml_scratch_dequeue(&mldev->roc, &req->jd);
+			job_dequeued = roc_ml_scratch_dequeue(&cn10k_mldev->roc, &req->jd);
 
 		if (job_dequeued)
 			break;
 	} while (plt_tsc_cycles() < req->timeout);
 
 	if (job_dequeued) {
-		if (plt_read64(&req->status) == ML_CN10K_POLL_JOB_FINISH) {
+		if (plt_read64(&req->status) == ML_CNXK_POLL_JOB_FINISH) {
 			if (req->result.error_code.u64 == 0x0)
 				ret = 0;
 			else
 				ret = -1;
 		}
 	} else {
-		roc_ml_scratch_queue_reset(&mldev->roc);
+		roc_ml_scratch_queue_reset(&cn10k_mldev->roc);
 		ret = -ETIME;
 	}
 
 	locked = false;
 	while (!locked) {
 		if (plt_spinlock_trylock(&model->lock) != 0) {
-			mldev->nb_models_stopped++;
+			cnxk_mldev->nb_models_stopped++;
 			model->state = ML_CN10K_MODEL_STATE_LOADED;
 			plt_spinlock_unlock(&model->lock);
 			locked = true;
@@ -2211,8 +2252,9 @@ cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cn10k_ml_result
 		       struct rte_ml_op *op)
 {
 	struct cn10k_ml_model_stats *stats;
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_qp *qp;
 	uint64_t hw_latency;
 	uint64_t fw_latency;
@@ -2258,14 +2300,16 @@ cn10k_ml_result_update(struct rte_ml_dev *dev, int qp_id, struct cn10k_ml_result
 
 		/* Handle driver error */
 		if (result->error_code.s.etype == ML_ETYPE_DRIVER) {
-			mldev = dev->data->dev_private;
+			cnxk_mldev = dev->data->dev_private;
+			cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 
 			/* Check for exception */
-			if ((roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0) != 0) ||
-			    (roc_ml_reg_read64(&mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1) != 0))
+			if ((roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C0) !=
+			     0) ||
+			    (roc_ml_reg_read64(&cn10k_mldev->roc, ML_SCRATCH_EXCEPTION_SP_C1) != 0))
 				result->error_code.s.stype = ML_DRIVER_ERR_EXCEPTION;
-			else if ((roc_ml_reg_read64(&mldev->roc, ML_CORE_INT_LO) != 0) ||
-				 (roc_ml_reg_read64(&mldev->roc, ML_CORE_INT_HI) != 0))
+			else if ((roc_ml_reg_read64(&cn10k_mldev->roc, ML_CORE_INT_LO) != 0) ||
+				 (roc_ml_reg_read64(&cn10k_mldev->roc, ML_CORE_INT_HI) != 0))
 				result->error_code.s.stype = ML_DRIVER_ERR_FW_ERROR;
 			else
 				result->error_code.s.stype = ML_DRIVER_ERR_UNKNOWN;
@@ -2282,8 +2326,9 @@ __rte_hot uint16_t
 cn10k_ml_enqueue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op **ops,
 		       uint16_t nb_ops)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_queue *queue;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_req *req;
 	struct cn10k_ml_qp *qp;
 	struct rte_ml_op *op;
@@ -2292,7 +2337,8 @@ cn10k_ml_enqueue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op
 	uint64_t head;
 	bool enqueued;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 	qp = dev->data->queue_pairs[qp_id];
 	queue = &qp->queue;
 
@@ -2307,15 +2353,15 @@ cn10k_ml_enqueue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op
 	op = ops[count];
 	req = &queue->reqs[head];
 
-	mldev->set_poll_addr(req);
-	cn10k_ml_prep_fp_job_descriptor(dev, req, op);
+	cn10k_mldev->set_poll_addr(req);
+	cn10k_ml_prep_fp_job_descriptor(cn10k_mldev, req, op);
 
 	memset(&req->result, 0, sizeof(struct cn10k_ml_result));
 	req->result.error_code.s.etype = ML_ETYPE_UNKNOWN;
 	req->result.user_ptr = op->user_ptr;
 
-	mldev->set_poll_ptr(req);
-	enqueued = mldev->ml_jcmdq_enqueue(&mldev->roc, &req->jcmd);
+	cn10k_mldev->set_poll_ptr(req);
+	enqueued = cn10k_mldev->ml_jcmdq_enqueue(&cn10k_mldev->roc, &req->jcmd);
 	if (unlikely(!enqueued))
 		goto jcmdq_full;
 
@@ -2339,8 +2385,9 @@ __rte_hot uint16_t
 cn10k_ml_dequeue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op **ops,
 		       uint16_t nb_ops)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_queue *queue;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_req *req;
 	struct cn10k_ml_qp *qp;
 
@@ -2348,7 +2395,8 @@ cn10k_ml_dequeue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op
 	uint16_t count;
 	uint64_t tail;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 	qp = dev->data->queue_pairs[qp_id];
 	queue = &qp->queue;
 
@@ -2361,8 +2409,8 @@ cn10k_ml_dequeue_burst(struct rte_ml_dev *dev, uint16_t qp_id, struct rte_ml_op
 
 dequeue_req:
 	req = &queue->reqs[tail];
-	status = mldev->get_poll_ptr(req);
-	if (unlikely(status != ML_CN10K_POLL_JOB_FINISH)) {
+	status = cn10k_mldev->get_poll_ptr(req);
+	if (unlikely(status != ML_CNXK_POLL_JOB_FINISH)) {
 		if (plt_tsc_cycles() < req->timeout)
 			goto empty_or_active;
 		else /* Timeout, set indication of driver error */
@@ -2420,30 +2468,32 @@ cn10k_ml_op_error_get(struct rte_ml_dev *dev, struct rte_ml_op *op, struct rte_m
 __rte_hot int
 cn10k_ml_inference_sync(struct rte_ml_dev *dev, struct rte_ml_op *op)
 {
+	struct cn10k_ml_dev *cn10k_mldev;
+	struct cnxk_ml_dev *cnxk_mldev;
 	struct cn10k_ml_model *model;
-	struct cn10k_ml_dev *mldev;
 	struct cn10k_ml_req *req;
 	bool timeout;
 	int ret = 0;
 
-	mldev = dev->data->dev_private;
+	cnxk_mldev = dev->data->dev_private;
+	cn10k_mldev = &cnxk_mldev->cn10k_mldev;
 	model = dev->data->models[op->model_id];
 	req = model->req;
 
 	cn10k_ml_set_poll_addr(req);
-	cn10k_ml_prep_fp_job_descriptor(dev, req, op);
+	cn10k_ml_prep_fp_job_descriptor(cn10k_mldev, req, op);
 
 	memset(&req->result, 0, sizeof(struct cn10k_ml_result));
 	req->result.error_code.s.etype = ML_ETYPE_UNKNOWN;
 	req->result.user_ptr = op->user_ptr;
 
-	mldev->set_poll_ptr(req);
+	cn10k_mldev->set_poll_ptr(req);
 	req->jcmd.w1.s.jobptr = PLT_U64_CAST(&req->jd);
 
 	timeout = true;
-	req->timeout = plt_tsc_cycles() + ML_CN10K_CMD_TIMEOUT * plt_tsc_hz();
+	req->timeout = plt_tsc_cycles() + ML_CNXK_CMD_TIMEOUT * plt_tsc_hz();
 	do {
-		if (mldev->ml_jcmdq_enqueue(&mldev->roc, &req->jcmd)) {
+		if (cn10k_mldev->ml_jcmdq_enqueue(&cn10k_mldev->roc, &req->jcmd)) {
 			req->op = op;
 			timeout = false;
 			break;
@@ -2457,7 +2507,7 @@ cn10k_ml_inference_sync(struct rte_ml_dev *dev, struct rte_ml_op *op)
 
 	timeout = true;
 	do {
-		if (mldev->get_poll_ptr(req) == ML_CN10K_POLL_JOB_FINISH) {
+		if (cn10k_mldev->get_poll_ptr(req) == ML_CNXK_POLL_JOB_FINISH) {
 			timeout = false;
 			break;
 		}
diff --git a/drivers/ml/cnxk/cnxk_ml_dev.c b/drivers/ml/cnxk/cnxk_ml_dev.c
new file mode 100644
index 00000000000..2a5c17c973b
--- /dev/null
+++ b/drivers/ml/cnxk/cnxk_ml_dev.c
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Marvell.
+ */
+
+#include <rte_mldev.h>
+#include <rte_mldev_pmd.h>
+
+#include "cnxk_ml_dev.h"
+
+/* Dummy operations for ML device */
+struct rte_ml_dev_ops ml_dev_dummy_ops = {0};
diff --git a/drivers/ml/cnxk/cnxk_ml_dev.h b/drivers/ml/cnxk/cnxk_ml_dev.h
new file mode 100644
index 00000000000..51315de6227
--- /dev/null
+++ b/drivers/ml/cnxk/cnxk_ml_dev.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Marvell.
+ */
+
+#ifndef _CNXK_ML_DEV_H_
+#define _CNXK_ML_DEV_H_
+
+#include <roc_api.h>
+
+#include "cn10k_ml_dev.h"
+
+/* ML command timeout in seconds */
+#define ML_CNXK_CMD_TIMEOUT 5
+
+/* Poll mode job state */
+#define ML_CNXK_POLL_JOB_START	0
+#define ML_CNXK_POLL_JOB_FINISH 1
+
+/* Device configuration state enum */
+enum cnxk_ml_dev_state {
+	/* Probed and not configured */
+	ML_CNXK_DEV_STATE_PROBED = 0,
+
+	/* Configured */
+	ML_CNXK_DEV_STATE_CONFIGURED,
+
+	/* Started */
+	ML_CNXK_DEV_STATE_STARTED,
+
+	/* Closed */
+	ML_CNXK_DEV_STATE_CLOSED
+};
+
+/* Device private data */
+struct cnxk_ml_dev {
+	/* RTE device */
+	struct rte_ml_dev *mldev;
+
+	/* Configuration state */
+	enum cnxk_ml_dev_state state;
+
+	/* Number of models loaded */
+	uint16_t nb_models_loaded;
+
+	/* Number of models unloaded */
+	uint16_t nb_models_unloaded;
+
+	/* Number of models started */
+	uint16_t nb_models_started;
+
+	/* Number of models stopped */
+	uint16_t nb_models_stopped;
+
+	/* CN10K device structure */
+	struct cn10k_ml_dev cn10k_mldev;
+};
+
+#endif /* _CNXK_ML_DEV_H_ */
diff --git a/drivers/ml/cnxk/meson.build b/drivers/ml/cnxk/meson.build
index 94fa4283b13..03a2d4ecf2f 100644
--- a/drivers/ml/cnxk/meson.build
+++ b/drivers/ml/cnxk/meson.build
@@ -12,6 +12,7 @@ driver_sdk_headers = files(
         'cn10k_ml_ops.h',
         'cn10k_ml_model.h',
         'cn10k_ml_ocm.h',
+        'cnxk_ml_dev.h',
 )
 
 sources = files(
@@ -19,6 +20,7 @@ sources = files(
         'cn10k_ml_ops.c',
         'cn10k_ml_model.c',
         'cn10k_ml_ocm.c',
+        'cnxk_ml_dev.c',
 )
 
 deps += ['mldev', 'common_cnxk', 'kvargs', 'hash']
-- 
2.41.0


  parent reply	other threads:[~2023-08-30 15:59 UTC|newest]

Thread overview: 340+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-30 15:58 [PATCH v1 00/34] Implemenation of revised ml/cnxk driver Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 02/34] ml/cnxk: drop use of RTE API for firmware read Srikanth Yalavarthi
2023-09-21 12:08   ` Jerin Jacob
2023-09-21 12:52     ` David Marchand
2023-09-21 13:06       ` [EXT] " Srikanth Yalavarthi
2023-09-21 13:26         ` David Marchand
2023-09-22  3:59           ` Srikanth Yalavarthi
2023-09-22  8:07             ` David Marchand
2023-09-22 16:59               ` Srikanth Yalavarthi
2023-09-27  9:38     ` David Marchand
2023-09-27 10:00       ` [EXT] " Srikanth Yalavarthi
2023-09-27 18:37     ` Srikanth Yalavarthi
2023-08-30 15:58 ` Srikanth Yalavarthi [this message]
2023-08-30 15:58 ` [PATCH v1 04/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 05/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 06/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 07/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 08/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-08-30 15:58 ` [PATCH v1 09/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 10/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 11/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 12/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 13/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 14/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 15/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 16/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 17/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 18/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 19/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-09-21 12:32   ` Jerin Jacob
2023-09-27 18:38     ` [EXT] " Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 20/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 21/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 22/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 23/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 24/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 25/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-08-30 15:59 ` [PATCH v1 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-09-20  7:24 ` [PATCH v2 00/34] Implemenation of revised ml/cnxk driver Srikanth Yalavarthi
2023-09-20  7:24   ` [PATCH v2 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-09-20  7:24   ` [PATCH v2 02/34] ml/cnxk: drop use of RTE API for firmware read Srikanth Yalavarthi
2023-09-20  7:24   ` [PATCH v2 03/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-09-20  7:24   ` [PATCH v2 04/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-09-20  7:24   ` [PATCH v2 05/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-09-20  7:24   ` [PATCH v2 06/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-09-20  7:24   ` [PATCH v2 07/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-09-20  7:24   ` [PATCH v2 08/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 09/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 10/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 11/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 12/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 13/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 14/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 15/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 16/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 17/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 18/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 19/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 20/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 21/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 22/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 23/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 24/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 25/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-09-20  7:25   ` [PATCH v2 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-09-21 12:15   ` [PATCH v2 00/34] Implemenation of revised ml/cnxk driver Jerin Jacob
2023-09-27 18:39     ` [EXT] " Srikanth Yalavarthi
2023-09-27 18:30 ` [PATCH v3 00/35] " Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 01/35] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 02/35] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 03/35] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 04/35] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 05/35] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 06/35] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 07/35] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 08/35] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 09/35] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 10/35] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 11/35] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 12/35] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 13/35] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 14/35] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 15/35] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 16/35] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 17/35] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 18/35] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 19/35] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 20/35] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 21/35] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 22/35] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 23/35] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 24/35] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 25/35] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 26/35] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 27/35] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 28/35] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 29/35] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 30/35] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 31/35] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 32/35] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 33/35] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 34/35] ml/cnxk: update dependency info in driver docs Srikanth Yalavarthi
2023-09-28  4:12     ` Jerin Jacob
2023-10-01  0:32       ` [EXT] " Srikanth Yalavarthi
2023-10-17 17:03       ` Srikanth Yalavarthi
2023-09-27 18:30   ` [PATCH v3 35/35] ml/cnxk: update release notes for 23.11 Srikanth Yalavarthi
2023-10-17 16:59 ` [PATCH v4 00/34] Implementation of revised ml/cnxk driver Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 05/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 10/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 11/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 12/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 13/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 14/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 15/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 16/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 17/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 18/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 19/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 20/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 21/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 22/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 23/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 24/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 25/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-17 16:59   ` [PATCH v4 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-18  1:56   ` [PATCH v4 00/34] Implementation of revised ml/cnxk driver Jerin Jacob
2023-10-18  6:55     ` [EXT] " Srikanth Yalavarthi
2023-10-18  6:47 ` [PATCH v5 " Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 05/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 10/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 11/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 12/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 13/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 14/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 15/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 16/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 17/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 18/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 19/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 20/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 21/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 22/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 23/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 24/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 25/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-18  6:47   ` [PATCH v5 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-18  6:48   ` [PATCH v5 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-18  6:48   ` [PATCH v5 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-18  6:48   ` [PATCH v5 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-18 13:53 ` [PATCH v6 00/34] Implementation of revised ml/cnxk driver Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 05/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 10/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 11/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 12/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 13/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-18 13:53   ` [PATCH v6 14/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 15/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 16/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 17/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 18/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-18 18:34     ` Jerin Jacob
2023-10-19  6:44       ` [EXT] " Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 19/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 20/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 21/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 22/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 23/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 24/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 25/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-18 13:54   ` [PATCH v6 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-18 14:20   ` [PATCH v6 00/34] Implementation of revised ml/cnxk driver Jerin Jacob
2023-10-19  6:41     ` [EXT] " Srikanth Yalavarthi
2023-10-19  4:16 ` [PATCH v7 " Srikanth Yalavarthi
2023-10-19  4:16   ` [PATCH v7 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-19  4:16   ` [PATCH v7 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-19  4:16   ` [PATCH v7 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-19  4:16   ` [PATCH v7 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-19  4:16   ` [PATCH v7 05/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-10-19  4:16   ` [PATCH v7 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-19  4:16   ` [PATCH v7 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-19  4:16   ` [PATCH v7 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-19  4:16   ` [PATCH v7 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-19  4:16   ` [PATCH v7 10/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 11/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 12/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 13/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 14/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 15/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 16/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 17/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 18/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 19/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 20/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 21/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 22/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 23/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 24/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 25/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-19  4:17   ` [PATCH v7 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-23  4:41 ` [PATCH v8 00/34] Implementation of revised ml/cnxk driver Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 05/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 10/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 11/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 12/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 13/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 14/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 15/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 16/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 17/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 18/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 19/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 20/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 21/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 22/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 23/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 24/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 25/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-23  4:41   ` [PATCH v8 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-26 12:43 ` [PATCH v9 00/34] Implementation of revised ml/cnxk driver Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 01/34] ml/cnxk: drop support for register polling Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 02/34] ml/cnxk: add generic cnxk device structure Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 03/34] ml/cnxk: add generic model and layer structures Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 04/34] ml/cnxk: add generic cnxk request structure Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 05/34] ml/cnxk: add generic cnxk xstats structures Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 06/34] ml/cnxk: rename cnxk ops function pointers struct Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 07/34] ml/cnxk: update device handling functions Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 08/34] ml/cnxk: update queue-pair " Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 09/34] ml/cnxk: update model load and unload functions Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 10/34] ml/cnxk: update model start and stop functions Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 11/34] ml/cnxk: update model utility functions Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 12/34] ml/cnxk: update data quantization functions Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 13/34] ml/cnxk: update device debug functions Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 14/34] ml/cnxk: update device stats functions Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 15/34] ml/cnxk: update device and model xstats functions Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 16/34] ml/cnxk: update fast path functions Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 17/34] ml/cnxk: move error handling to cnxk layer Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 18/34] ml/cnxk: support config and close of tvmdp library Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 19/34] ml/cnxk: add structures to support TVM model type Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 20/34] ml/cnxk: add support for identify " Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 21/34] ml/cnxk: add support to parse TVM model objects Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 22/34] ml/cnxk: fetch layer info and load TVM model Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 23/34] ml/cnxk: update internal info for " Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 24/34] ml/cnxk: enable model unload in tvmdp library Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 25/34] ml/cnxk: enable OCM check for multilayer TVM model Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 26/34] ml/cnxk: support start and stop for TVM models Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 27/34] ml/cnxk: update internal TVM model info structure Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 28/34] ml/cnxk: support device dump for TVM models Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 29/34] ml/cnxk: enable reporting model runtime as xstats Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 30/34] ml/cnxk: implement I/O alloc and free callbacks Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 31/34] ml/cnxk: add generic ML malloc and free callback Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 32/34] ml/cnxk: support quantize and dequantize callback Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 33/34] ml/cnxk: enable fast-path ops for TVM models Srikanth Yalavarthi
2023-10-26 12:43   ` [PATCH v9 34/34] ml/cnxk: enable creation of mvtvm virtual device Srikanth Yalavarthi
2023-10-29 12:53   ` [PATCH v9 00/34] Implementation of revised ml/cnxk driver Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230830155927.3566-4-syalavarthi@marvell.com \
    --to=syalavarthi@marvell.com \
    --cc=aprabhu@marvell.com \
    --cc=dev@dpdk.org \
    --cc=ptakkar@marvell.com \
    --cc=sshankarnara@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).