DPDK patches and discussions
 help / color / mirror / Atom feed
From: Srikanth Yalavarthi <syalavarthi@marvell.com>
To: Srikanth Yalavarthi <syalavarthi@marvell.com>,
	Anup Prabhu <aprabhu@marvell.com>
Cc: <dev@dpdk.org>, <sshankarnara@marvell.com>, <ptakkar@marvell.com>
Subject: [PATCH v5] app/mldev: add internal function for file read
Date: Wed, 7 Jun 2023 09:20:30 -0700	[thread overview]
Message-ID: <20230607162030.3004-1-syalavarthi@marvell.com> (raw)
In-Reply-To: <20230323152801.27666-1-syalavarthi@marvell.com>

Added internal function to read model, input and reference
files with required error checks. This change fixes the
unchecked return value and improper use of negative value
issues reported by coverity scan for file read operations.

Coverity issue: 383742, 383743
Fixes: f6661e6d9a3a ("app/mldev: validate model operations")
Fixes: da6793390596 ("app/mldev: support inference validation")

Signed-off-by: Srikanth Yalavarthi <syalavarthi@marvell.com>
---
v5:
* Updated as per review comments

v4:
* Drop use of fread and replace with system calls

v3:
* Fix incorrect use of rte_free with free

v2:
* Replace rte_malloc in ml_read_file with malloc

 app/test-mldev/test_common.c           | 56 ++++++++++++++++++++++++++
 app/test-mldev/test_common.h           |  2 +
 app/test-mldev/test_inference_common.c | 56 ++++++++++----------------
 app/test-mldev/test_model_common.c     | 39 +++++-------------
 4 files changed, 89 insertions(+), 64 deletions(-)

diff --git a/app/test-mldev/test_common.c b/app/test-mldev/test_common.c
index 016b31c6ba..cc38e8af05 100644
--- a/app/test-mldev/test_common.c
+++ b/app/test-mldev/test_common.c
@@ -5,12 +5,68 @@
 #include <errno.h>
 
 #include <rte_common.h>
+#include <rte_malloc.h>
 #include <rte_memory.h>
 #include <rte_mldev.h>
 
 #include "ml_common.h"
 #include "test_common.h"
 
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+int
+ml_read_file(char *file, size_t *size, char **buffer)
+{
+	char *file_buffer = NULL;
+	struct stat file_stat;
+	char *file_map;
+	int ret;
+	int fd;
+
+	fd = open(file, O_RDONLY);
+	if (fd == -1) {
+		ml_err("Failed to open file: %s\n", file);
+		return -errno;
+	}
+
+	if (fstat(fd, &file_stat) != 0) {
+		ml_err("fstat failed for file: %s\n", file);
+		return -errno;
+	}
+
+	file_buffer = malloc(file_stat.st_size);
+	if (file_buffer == NULL) {
+		ml_err("Failed to allocate memory: %s\n", file);
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	file_map = mmap(0, file_stat.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
+	if (file_map == MAP_FAILED) {
+		ml_err("Failed to map file: %s\n", file);
+		ret = -errno;
+		goto error;
+	}
+
+	rte_memcpy(file_buffer, file_map, file_stat.st_size);
+	munmap(file_map, file_stat.st_size);
+
+	*size = file_stat.st_size;
+	*buffer = file_buffer;
+
+	return 0;
+
+error:
+	free(file_buffer);
+
+	close(fd);
+
+	return ret;
+}
+
 bool
 ml_test_cap_check(struct ml_options *opt)
 {
diff --git a/app/test-mldev/test_common.h b/app/test-mldev/test_common.h
index def108d5b2..793841a917 100644
--- a/app/test-mldev/test_common.h
+++ b/app/test-mldev/test_common.h
@@ -27,4 +27,6 @@ int ml_test_device_close(struct ml_test *test, struct ml_options *opt);
 int ml_test_device_start(struct ml_test *test, struct ml_options *opt);
 int ml_test_device_stop(struct ml_test *test, struct ml_options *opt);
 
+int ml_read_file(char *file, size_t *size, char **buffer);
+
 #endif /* TEST_COMMON_H */
diff --git a/app/test-mldev/test_inference_common.c b/app/test-mldev/test_inference_common.c
index d929ff6e61..8a6e4725df 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -599,10 +599,10 @@ ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t
 	char mp_name[RTE_MEMPOOL_NAMESIZE];
 	const struct rte_memzone *mz;
 	uint64_t nb_buffers;
+	char *buffer = NULL;
 	uint32_t buff_size;
 	uint32_t mz_size;
-	uint32_t fsize;
-	FILE *fp;
+	size_t fsize;
 	int ret;
 
 	/* get input buffer size */
@@ -642,51 +642,36 @@ ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t
 		t->model[fid].reference = NULL;
 
 	/* load input file */
-	fp = fopen(opt->filelist[fid].input, "r");
-	if (fp == NULL) {
-		ml_err("Failed to open input file : %s\n", opt->filelist[fid].input);
-		ret = -errno;
+	ret = ml_read_file(opt->filelist[fid].input, &fsize, &buffer);
+	if (ret != 0)
 		goto error;
-	}
 
-	fseek(fp, 0, SEEK_END);
-	fsize = ftell(fp);
-	fseek(fp, 0, SEEK_SET);
-	if (fsize != t->model[fid].inp_dsize) {
-		ml_err("Invalid input file, size = %u (expected size = %" PRIu64 ")\n", fsize,
+	if (fsize == t->model[fid].inp_dsize) {
+		rte_memcpy(t->model[fid].input, buffer, fsize);
+		free(buffer);
+	} else {
+		ml_err("Invalid input file, size = %zu (expected size = %" PRIu64 ")\n", fsize,
 		       t->model[fid].inp_dsize);
 		ret = -EINVAL;
-		fclose(fp);
-		goto error;
-	}
-
-	if (fread(t->model[fid].input, 1, t->model[fid].inp_dsize, fp) != t->model[fid].inp_dsize) {
-		ml_err("Failed to read input file : %s\n", opt->filelist[fid].input);
-		ret = -errno;
-		fclose(fp);
 		goto error;
 	}
-	fclose(fp);
 
 	/* load reference file */
+	buffer = NULL;
 	if (t->model[fid].reference != NULL) {
-		fp = fopen(opt->filelist[fid].reference, "r");
-		if (fp == NULL) {
-			ml_err("Failed to open reference file : %s\n",
-			       opt->filelist[fid].reference);
-			ret = -errno;
+		ret = ml_read_file(opt->filelist[fid].reference, &fsize, &buffer);
+		if (ret != 0)
 			goto error;
-		}
 
-		if (fread(t->model[fid].reference, 1, t->model[fid].out_dsize, fp) !=
-		    t->model[fid].out_dsize) {
-			ml_err("Failed to read reference file : %s\n",
-			       opt->filelist[fid].reference);
-			ret = -errno;
-			fclose(fp);
+		if (fsize == t->model[fid].out_dsize) {
+			rte_memcpy(t->model[fid].reference, buffer, fsize);
+			free(buffer);
+		} else {
+			ml_err("Invalid reference file, size = %zu (expected size = %" PRIu64 ")\n",
+			       fsize, t->model[fid].out_dsize);
+			ret = -EINVAL;
 			goto error;
 		}
-		fclose(fp);
 	}
 
 	/* create mempool for quantized input and output buffers. ml_request_initialize is
@@ -718,6 +703,9 @@ ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t
 		t->model[fid].io_pool = NULL;
 	}
 
+	if (buffer)
+		free(buffer);
+
 	return ret;
 }
 
diff --git a/app/test-mldev/test_model_common.c b/app/test-mldev/test_model_common.c
index c28e452f29..8dbb0ff89f 100644
--- a/app/test-mldev/test_model_common.c
+++ b/app/test-mldev/test_model_common.c
@@ -14,11 +14,11 @@
 int
 ml_model_load(struct ml_test *test, struct ml_options *opt, struct ml_model *model, uint16_t fid)
 {
-	struct test_common *t = ml_test_priv(test);
 	struct rte_ml_model_params model_params;
-	FILE *fp;
 	int ret;
 
+	RTE_SET_USED(test);
+
 	if (model->state == MODEL_LOADED)
 		return 0;
 
@@ -26,43 +26,22 @@ ml_model_load(struct ml_test *test, struct ml_options *opt, struct ml_model *mod
 		return -EINVAL;
 
 	/* read model binary */
-	fp = fopen(opt->filelist[fid].model, "r");
-	if (fp == NULL) {
-		ml_err("Failed to open model file : %s\n", opt->filelist[fid].model);
-		return -1;
-	}
-
-	fseek(fp, 0, SEEK_END);
-	model_params.size = ftell(fp);
-	fseek(fp, 0, SEEK_SET);
-
-	model_params.addr = rte_malloc_socket("ml_model", model_params.size,
-					      t->dev_info.min_align_size, opt->socket_id);
-	if (model_params.addr == NULL) {
-		ml_err("Failed to allocate memory for model: %s\n", opt->filelist[fid].model);
-		fclose(fp);
-		return -ENOMEM;
-	}
-
-	if (fread(model_params.addr, 1, model_params.size, fp) != model_params.size) {
-		ml_err("Failed to read model file : %s\n", opt->filelist[fid].model);
-		rte_free(model_params.addr);
-		fclose(fp);
-		return -1;
-	}
-	fclose(fp);
+	ret = ml_read_file(opt->filelist[fid].model, &model_params.size,
+			   (char **)&model_params.addr);
+	if (ret != 0)
+		return ret;
 
 	/* load model to device */
 	ret = rte_ml_model_load(opt->dev_id, &model_params, &model->id);
 	if (ret != 0) {
 		ml_err("Failed to load model : %s\n", opt->filelist[fid].model);
 		model->state = MODEL_ERROR;
-		rte_free(model_params.addr);
+		free(model_params.addr);
 		return ret;
 	}
 
-	/* release mz */
-	rte_free(model_params.addr);
+	/* release buffer */
+	free(model_params.addr);
 
 	/* get model info */
 	ret = rte_ml_model_info_get(opt->dev_id, model->id, &model->info);
-- 
2.17.1


  parent reply	other threads:[~2023-06-07 16:20 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-23 15:28 [PATCH 1/1] " Srikanth Yalavarthi
2023-03-28 15:52 ` Stephen Hemminger
2023-04-12  8:48   ` [EXT] " Srikanth Yalavarthi
2023-04-23  4:55 ` [PATCH v2] " Srikanth Yalavarthi
2023-05-03  8:56 ` [PATCH v3] " Srikanth Yalavarthi
2023-05-03 14:54   ` Stephen Hemminger
2023-05-03 14:59     ` [EXT] " Srikanth Yalavarthi
2023-05-03 18:28       ` Stephen Hemminger
2023-05-03 23:04         ` Tyler Retzlaff
2023-06-07 11:35 ` [PATCH v4] " Srikanth Yalavarthi
2023-06-07 15:02   ` Stephen Hemminger
2023-06-07 16:21     ` [EXT] " Srikanth Yalavarthi
2023-06-07 16:20 ` Srikanth Yalavarthi [this message]
2023-06-07 16:49   ` [PATCH v5] " Stephen Hemminger
2023-06-07 17:24     ` [EXT] " Srikanth Yalavarthi
2023-06-07 17:24 ` [PATCH v6] " Srikanth Yalavarthi
2023-07-07  8:06   ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230607162030.3004-1-syalavarthi@marvell.com \
    --to=syalavarthi@marvell.com \
    --cc=aprabhu@marvell.com \
    --cc=dev@dpdk.org \
    --cc=ptakkar@marvell.com \
    --cc=sshankarnara@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).