From: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
To: <dev@dpdk.org>
Cc: <anoobj@marvell.com>, Cheng Jiang <cheng1.jiang@intel.com>,
Kevin Laatz <kevin.laatz@intel.com>,
Bruce Richardson <bruce.richardson@intel.com>,
Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
Subject: [PATCH v3 2/2] app/dma-perf: add SG copy support
Date: Thu, 10 Aug 2023 18:31:37 +0530 [thread overview]
Message-ID: <20230810130137.2529-3-gmuthukrishn@marvell.com> (raw)
In-Reply-To: <20230810130137.2529-1-gmuthukrishn@marvell.com>
Add SG copy support.
Signed-off-by: Gowrishankar Muthukrishnan <gmuthukrishn@marvell.com>
---
app/test-dma-perf/benchmark.c | 204 +++++++++++++++++++++++++++++-----
app/test-dma-perf/config.ini | 17 +++
app/test-dma-perf/main.c | 35 +++++-
app/test-dma-perf/main.h | 5 +-
4 files changed, 231 insertions(+), 30 deletions(-)
diff --git a/app/test-dma-perf/benchmark.c b/app/test-dma-perf/benchmark.c
index 9e5b5dc770..5f03f99b7b 100644
--- a/app/test-dma-perf/benchmark.c
+++ b/app/test-dma-perf/benchmark.c
@@ -46,6 +46,10 @@ struct lcore_params {
uint16_t test_secs;
struct rte_mbuf **srcs;
struct rte_mbuf **dsts;
+ struct rte_dma_sge **src_sges;
+ struct rte_dma_sge **dst_sges;
+ uint8_t src_ptrs;
+ uint8_t dst_ptrs;
volatile struct worker_info worker_info;
};
@@ -86,21 +90,31 @@ calc_result(uint32_t buf_size, uint32_t nr_buf, uint16_t nb_workers, uint16_t te
}
static void
-output_result(uint8_t scenario_id, uint32_t lcore_id, char *dma_name, uint16_t ring_size,
- uint16_t kick_batch, uint64_t ave_cycle, uint32_t buf_size, uint32_t nr_buf,
- float memory, float bandwidth, float mops, bool is_dma)
+output_result(struct test_configure *cfg, struct lcore_params *para,
+ uint16_t kick_batch, uint64_t ave_cycle, uint32_t buf_size,
+ uint32_t nr_buf, float memory, float bandwidth, float mops)
{
- if (is_dma)
- printf("lcore %u, DMA %s, DMA Ring Size: %u, Kick Batch Size: %u.\n",
- lcore_id, dma_name, ring_size, kick_batch);
- else
+ uint16_t ring_size = cfg->ring_size.cur;
+ uint8_t scenario_id = cfg->scenario_id;
+ uint32_t lcore_id = para->lcore_id;
+ char *dma_name = para->dma_name;
+
+ if (cfg->is_dma) {
+ printf("lcore %u, DMA %s, DMA Ring Size: %u, Kick Batch Size: %u", lcore_id,
+ dma_name, ring_size, kick_batch);
+ if (cfg->is_sg)
+ printf(" DMA src ptrs: %u, dst ptrs: %u",
+ para->src_ptrs, para->dst_ptrs);
+ printf(".\n");
+ } else {
printf("lcore %u\n", lcore_id);
+ }
printf("Average Cycles/op: %" PRIu64 ", Buffer Size: %u B, Buffer Number: %u, Memory: %.2lf MB, Frequency: %.3lf Ghz.\n",
ave_cycle, buf_size, nr_buf, memory, rte_get_timer_hz()/1000000000.0);
printf("Average Bandwidth: %.3lf Gbps, MOps: %.3lf\n", bandwidth, mops);
- if (is_dma)
+ if (cfg->is_dma)
snprintf(output_str[lcore_id], MAX_OUTPUT_STR_LEN, CSV_LINE_DMA_FMT,
scenario_id, lcore_id, dma_name, ring_size, kick_batch, buf_size,
nr_buf, memory, ave_cycle, bandwidth, mops);
@@ -130,7 +144,7 @@ cache_flush_buf(__rte_unused struct rte_mbuf **array,
/* Configuration of device. */
static void
-configure_dmadev_queue(uint32_t dev_id, uint32_t ring_size)
+configure_dmadev_queue(uint32_t dev_id, uint32_t ring_size, uint8_t ptrs_max)
{
uint16_t vchan = 0;
struct rte_dma_info info;
@@ -153,6 +167,10 @@ configure_dmadev_queue(uint32_t dev_id, uint32_t ring_size)
rte_exit(EXIT_FAILURE, "Error, no configured queues reported on device id. %u\n",
dev_id);
+ if (info.max_sges < ptrs_max)
+ rte_exit(EXIT_FAILURE, "Error, DMA ptrs more than supported by device id %u.\n",
+ dev_id);
+
if (rte_dma_start(dev_id) != 0)
rte_exit(EXIT_FAILURE, "Error with dma start.\n");
}
@@ -166,8 +184,12 @@ config_dmadevs(struct test_configure *cfg)
uint32_t i;
int dev_id;
uint16_t nb_dmadevs = 0;
+ uint8_t ptrs_max = 0;
char *dma_name;
+ if (cfg->is_sg)
+ ptrs_max = RTE_MAX(cfg->src_ptrs, cfg->dst_ptrs);
+
for (i = 0; i < ldm->cnt; i++) {
dma_name = ldm->dma_names[i];
dev_id = rte_dma_get_dev_id_by_name(dma_name);
@@ -177,7 +199,7 @@ config_dmadevs(struct test_configure *cfg)
}
ldm->dma_ids[i] = dev_id;
- configure_dmadev_queue(dev_id, ring_size);
+ configure_dmadev_queue(dev_id, ring_size, ptrs_max);
++nb_dmadevs;
}
@@ -217,7 +239,7 @@ do_dma_submit_and_poll(uint16_t dev_id, uint64_t *async_cnt,
}
static inline int
-do_dma_mem_copy(void *p)
+do_dma_plain_mem_copy(void *p)
{
struct lcore_params *para = (struct lcore_params *)p;
volatile struct worker_info *worker_info = &(para->worker_info);
@@ -270,6 +292,61 @@ do_dma_mem_copy(void *p)
return 0;
}
+static inline int
+do_dma_sg_mem_copy(void *p)
+{
+ struct lcore_params *para = (struct lcore_params *)p;
+ volatile struct worker_info *worker_info = &(para->worker_info);
+ struct rte_dma_sge **src_sges = para->src_sges;
+ struct rte_dma_sge **dst_sges = para->dst_sges;
+ const uint16_t dev_id = para->dev_id;
+ const uint32_t nr_buf = para->nr_buf;
+ const uint16_t kick_batch = para->kick_batch;
+ const uint8_t src_ptrs = para->src_ptrs;
+ const uint8_t dst_ptrs = para->dst_ptrs;
+ uint16_t nr_cpl;
+ uint64_t async_cnt = 0;
+ uint32_t i;
+ uint32_t poll_cnt = 0;
+ int ret;
+
+ worker_info->stop_flag = false;
+ worker_info->ready_flag = true;
+
+ while (!worker_info->start_flag)
+ ;
+
+ while (1) {
+ for (i = 0; i < nr_buf; i++) {
+dma_copy:
+ ret = rte_dma_copy_sg(dev_id, 0, src_sges[i], dst_sges[i],
+ src_ptrs, dst_ptrs, 0);
+ if (unlikely(ret < 0)) {
+ if (ret == -ENOSPC) {
+ do_dma_submit_and_poll(dev_id, &async_cnt, worker_info);
+ goto dma_copy;
+ } else
+ error_exit(dev_id);
+ }
+ async_cnt++;
+
+ if ((async_cnt % kick_batch) == 0)
+ do_dma_submit_and_poll(dev_id, &async_cnt, worker_info);
+ }
+
+ if (worker_info->stop_flag)
+ break;
+ }
+
+ rte_dma_submit(dev_id, 0);
+ while ((async_cnt > 0) && (poll_cnt++ < POLL_MAX)) {
+ nr_cpl = rte_dma_completed(dev_id, 0, MAX_DMA_CPL_NB, NULL, NULL);
+ async_cnt -= nr_cpl;
+ }
+
+ return 0;
+}
+
static inline int
do_cpu_mem_copy(void *p)
{
@@ -303,8 +380,9 @@ do_cpu_mem_copy(void *p)
}
static int
-setup_memory_env(struct test_configure *cfg, struct rte_mbuf ***srcs,
- struct rte_mbuf ***dsts)
+setup_memory_env(struct test_configure *cfg,
+ struct rte_mbuf ***srcs, struct rte_mbuf ***dsts,
+ struct rte_dma_sge ***src_sges, struct rte_dma_sge ***dst_sges)
{
unsigned int buf_size = cfg->buf_size.cur;
unsigned int nr_sockets, i;
@@ -366,15 +444,69 @@ setup_memory_env(struct test_configure *cfg, struct rte_mbuf ***srcs,
memset(rte_pktmbuf_mtod((*dsts)[i], void *), 0, buf_size);
}
+ if (cfg->is_sg) {
+ uint8_t src_ptrs = cfg->src_ptrs;
+ uint8_t dst_ptrs = cfg->dst_ptrs;
+ uint32_t sglen_src, sglen_dst;
+ uint32_t nr_buf = cfg->nr_buf;
+ uint8_t j;
+
+ *src_sges = rte_malloc(NULL, nr_buf * sizeof(struct rte_dma_sge **), 0);
+ if (*src_sges == NULL) {
+ printf("Error: src_sges array malloc failed.\n");
+ return -1;
+ }
+
+ for (i = 0; i < nr_buf; i++) {
+ (*src_sges)[i] = rte_malloc(NULL, src_ptrs * sizeof(struct rte_dma_sge), 0);
+ if ((*src_sges)[i] == NULL) {
+ printf("Error: src_sges malloc failed.\n");
+ return -1;
+ }
+ }
+
+ *dst_sges = rte_malloc(NULL, nr_buf * sizeof(struct rte_dma_sge **), 0);
+ if (*dst_sges == NULL) {
+ printf("Error: dst_sges array malloc failed.\n");
+ return -1;
+ }
+
+ for (i = 0; i < nr_buf; i++) {
+ (*dst_sges)[i] = rte_malloc(NULL, dst_ptrs * sizeof(struct rte_dma_sge), 0);
+ if ((*dst_sges)[i] == NULL) {
+ printf("Error: dst_sges malloc failed.\n");
+ return -1;
+ }
+ }
+
+ sglen_src = buf_size / src_ptrs;
+ sglen_dst = buf_size / dst_ptrs;
+ for (i = 0; i < nr_buf; i++) {
+ for (j = 0; j < src_ptrs; j++) {
+ (*src_sges)[i][j].addr = rte_pktmbuf_iova((*srcs)[i]) +
+ sglen_src * j;
+ (*src_sges)[i][j].length = sglen_src;
+ }
+ (*src_sges)[i][j-1].length += buf_size % src_ptrs;
+
+ for (j = 0; j < dst_ptrs; j++) {
+ (*dst_sges)[i][j].addr = rte_pktmbuf_iova((*dsts)[i]) +
+ sglen_dst * j;
+ (*dst_sges)[i][j].length = sglen_dst;
+ }
+ (*dst_sges)[i][j-1].length += buf_size % dst_ptrs;
+ }
+ }
return 0;
}
int
-mem_copy_benchmark(struct test_configure *cfg, bool is_dma)
+mem_copy_benchmark(struct test_configure *cfg)
{
uint16_t i;
uint32_t offset;
unsigned int lcore_id = 0;
+ struct rte_dma_sge **src_sges = NULL, **dst_sges = NULL;
struct rte_mbuf **srcs = NULL, **dsts = NULL;
struct lcore_dma_map_t *ldm = &cfg->lcore_dma_map;
unsigned int buf_size = cfg->buf_size.cur;
@@ -389,10 +521,10 @@ mem_copy_benchmark(struct test_configure *cfg, bool is_dma)
float bandwidth, bandwidth_total;
int ret = 0;
- if (setup_memory_env(cfg, &srcs, &dsts) < 0)
+ if (setup_memory_env(cfg, &srcs, &dsts, &src_sges, &dst_sges) < 0)
goto out;
- if (is_dma)
+ if (cfg->is_dma)
if (config_dmadevs(cfg) < 0)
goto out;
@@ -412,7 +544,7 @@ mem_copy_benchmark(struct test_configure *cfg, bool is_dma)
printf("lcore parameters malloc failure for lcore %d\n", lcore_id);
break;
}
- if (is_dma) {
+ if (cfg->is_dma) {
lcores[i]->dma_name = ldm->dma_names[i];
lcores[i]->dev_id = ldm->dma_ids[i];
lcores[i]->kick_batch = kick_batch;
@@ -426,10 +558,23 @@ mem_copy_benchmark(struct test_configure *cfg, bool is_dma)
lcores[i]->scenario_id = cfg->scenario_id;
lcores[i]->lcore_id = lcore_id;
- if (is_dma)
- rte_eal_remote_launch(do_dma_mem_copy, (void *)(lcores[i]), lcore_id);
- else
+ if (cfg->is_sg) {
+ lcores[i]->src_ptrs = cfg->src_ptrs;
+ lcores[i]->dst_ptrs = cfg->dst_ptrs;
+ lcores[i]->src_sges = src_sges + offset * cfg->src_ptrs;
+ lcores[i]->dst_sges = dst_sges + offset * cfg->dst_ptrs;
+ }
+
+ if (cfg->is_dma) {
+ if (!cfg->is_sg)
+ rte_eal_remote_launch(do_dma_plain_mem_copy, (void *)(lcores[i]),
+ lcore_id);
+ else
+ rte_eal_remote_launch(do_dma_sg_mem_copy, (void *)(lcores[i]),
+ lcore_id);
+ } else {
rte_eal_remote_launch(do_cpu_mem_copy, (void *)(lcores[i]), lcore_id);
+ }
}
while (1) {
@@ -478,10 +623,8 @@ mem_copy_benchmark(struct test_configure *cfg, bool is_dma)
calc_result(buf_size, nr_buf, nb_workers, test_secs,
lcores[i]->worker_info.test_cpl,
&memory, &avg_cycles, &bandwidth, &mops);
- output_result(cfg->scenario_id, lcores[i]->lcore_id,
- lcores[i]->dma_name, cfg->ring_size.cur, kick_batch,
- avg_cycles, buf_size, nr_buf / nb_workers, memory,
- bandwidth, mops, is_dma);
+ output_result(cfg, lcores[i], kick_batch, avg_cycles, buf_size,
+ nr_buf / nb_workers, memory, bandwidth, mops);
mops_total += mops;
bandwidth_total += bandwidth;
avg_cycles_total += avg_cycles;
@@ -510,13 +653,24 @@ mem_copy_benchmark(struct test_configure *cfg, bool is_dma)
rte_mempool_free(dst_pool);
dst_pool = NULL;
+ /* free sges for mbufs */
+ for (i = 0; i < nr_buf; i++) {
+ rte_free(src_sges[i]);
+ rte_free(dst_sges[i]);
+ }
+
+ rte_free(src_sges);
+ src_sges = NULL;
+
+ rte_free(dst_sges);
+ dst_sges = NULL;
/* free the worker parameters */
for (i = 0; i < nb_workers; i++) {
rte_free(lcores[i]);
lcores[i] = NULL;
}
- if (is_dma) {
+ if (cfg->is_dma) {
for (i = 0; i < nb_workers; i++) {
printf("Stopping dmadev %d\n", ldm->dma_ids[i]);
rte_dma_stop(ldm->dma_ids[i]);
diff --git a/app/test-dma-perf/config.ini b/app/test-dma-perf/config.ini
index b550f4b23f..f1b268a384 100644
--- a/app/test-dma-perf/config.ini
+++ b/app/test-dma-perf/config.ini
@@ -9,6 +9,8 @@
; "buf_size" denotes the memory size of a single operation.
; "dma_ring_size" denotes the dma ring buffer size. It should be must be a power of two, and between
; 64 and 4096.
+; "dma_ptrs_src" denotes number of source segments.
+; "dma_ptrs_dst" denotes number of destination segments.
; "kick_batch" denotes the dma operation batch size, and should be greater than 1 normally.
; The format for variables is variable=first,last,increment,ADD|MUL.
@@ -50,6 +52,21 @@ lcore_dma=lcore10@0000:00:04.2, lcore11@0000:00:04.3
eal_args=--in-memory --file-prefix=test
[case2]
+type=DMA_MEM_COPY
+mem_size=10
+buf_size=64,8192,2,MUL
+dma_ring_size=1024
+dma_ptrs_src=4
+dma_ptrs_dst=1
+kick_batch=32
+src_numa_node=0
+dst_numa_node=0
+cache_flush=0
+test_seconds=2
+lcore_dma=lcore10@0000:00:04.2, lcore11@0000:00:04.3
+eal_args=--in-memory --file-prefix=test
+
+[case3]
type=CPU_MEM_COPY
mem_size=10
buf_size=64,8192,2,MUL
diff --git a/app/test-dma-perf/main.c b/app/test-dma-perf/main.c
index f917be4216..f1779a166b 100644
--- a/app/test-dma-perf/main.c
+++ b/app/test-dma-perf/main.c
@@ -93,10 +93,8 @@ run_test_case(struct test_configure *case_cfg)
switch (case_cfg->test_type) {
case TEST_TYPE_DMA_MEM_COPY:
- ret = mem_copy_benchmark(case_cfg, true);
- break;
case TEST_TYPE_CPU_MEM_COPY:
- ret = mem_copy_benchmark(case_cfg, false);
+ ret = mem_copy_benchmark(case_cfg);
break;
default:
printf("Unknown test type. %s\n", case_cfg->test_type_str);
@@ -325,7 +323,8 @@ load_configs(const char *path)
char section_name[CFG_NAME_LEN];
const char *case_type;
const char *lcore_dma;
- const char *mem_size_str, *buf_size_str, *ring_size_str, *kick_batch_str;
+ const char *mem_size_str, *buf_size_str, *ring_size_str, *kick_batch_str,
+ *src_ptrs_str, *dst_ptrs_str;
int args_nr, nb_vp;
bool is_dma;
@@ -361,12 +360,14 @@ load_configs(const char *path)
test_case->test_type = TEST_TYPE_CPU_MEM_COPY;
test_case->test_type_str = CPU_MEM_COPY;
is_dma = false;
+
} else {
printf("Error: Wrong test case type %s in case%d.\n", case_type, i + 1);
test_case->is_valid = false;
continue;
}
+ test_case->is_dma = is_dma;
test_case->src_numa_node = (int)atoi(rte_cfgfile_get_entry(cfgfile,
section_name, "src_numa_node"));
test_case->dst_numa_node = (int)atoi(rte_cfgfile_get_entry(cfgfile,
@@ -401,6 +402,32 @@ load_configs(const char *path)
} else if (args_nr == 4)
nb_vp++;
+ src_ptrs_str = rte_cfgfile_get_entry(cfgfile, section_name,
+ "dma_ptrs_src");
+ if (src_ptrs_str != NULL) {
+ test_case->src_ptrs = (int)atoi(rte_cfgfile_get_entry(cfgfile,
+ section_name, "dma_ptrs_src"));
+ }
+
+ dst_ptrs_str = rte_cfgfile_get_entry(cfgfile, section_name,
+ "dma_ptrs_dst");
+ if (dst_ptrs_str != NULL) {
+ test_case->dst_ptrs = (int)atoi(rte_cfgfile_get_entry(cfgfile,
+ section_name, "dma_ptrs_dst"));
+ }
+
+ if ((src_ptrs_str != NULL && dst_ptrs_str == NULL) ||
+ (src_ptrs_str == NULL && dst_ptrs_str != NULL)) {
+ printf("parse dma_ptrs_src, dma_ptrs_dst error in case %d.\n",
+ i + 1);
+ test_case->is_valid = false;
+ continue;
+ } else if (src_ptrs_str != NULL && dst_ptrs_str != NULL) {
+ test_case->is_sg = true;
+ } else {
+ test_case->is_sg = false;
+ }
+
kick_batch_str = rte_cfgfile_get_entry(cfgfile, section_name, "kick_batch");
args_nr = parse_entry(kick_batch_str, &test_case->kick_batch);
if (args_nr < 0) {
diff --git a/app/test-dma-perf/main.h b/app/test-dma-perf/main.h
index 658f22f673..b240bb5497 100644
--- a/app/test-dma-perf/main.h
+++ b/app/test-dma-perf/main.h
@@ -47,11 +47,14 @@ struct test_configure {
uint16_t dst_numa_node;
uint16_t opcode;
bool is_dma;
+ bool is_sg;
struct lcore_dma_map_t lcore_dma_map;
struct test_configure_entry mem_size;
struct test_configure_entry buf_size;
struct test_configure_entry ring_size;
struct test_configure_entry kick_batch;
+ uint8_t src_ptrs;
+ uint8_t dst_ptrs;
uint8_t cache_flush;
uint32_t nr_buf;
uint16_t test_secs;
@@ -59,6 +62,6 @@ struct test_configure {
uint8_t scenario_id;
};
-int mem_copy_benchmark(struct test_configure *cfg, bool is_dma);
+int mem_copy_benchmark(struct test_configure *cfg);
#endif /* MAIN_H */
--
2.25.1
next prev parent reply other threads:[~2023-08-10 13:02 UTC|newest]
Thread overview: 79+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-10 10:57 [PATCH v2] " Gowrishankar Muthukrishnan
2023-08-10 13:01 ` [PATCH v3 0/2] " Gowrishankar Muthukrishnan
2023-08-10 13:01 ` [PATCH v3 1/2] app/dma-perf: validate copied memory Gowrishankar Muthukrishnan
2023-08-23 11:46 ` [EXT] " Pavan Nikhilesh Bhagavatula
2023-08-10 13:01 ` Gowrishankar Muthukrishnan [this message]
2023-09-21 3:02 ` [PATCH v3 0/2] app/dma-perf: add SG copy support Jiang, Cheng1
2023-09-24 9:32 ` [PATCH v4 " Gowrishankar Muthukrishnan
2023-09-24 9:32 ` [PATCH v4 1/2] app/dma-perf: validate copied memory Gowrishankar Muthukrishnan
2023-09-24 9:32 ` [PATCH v4 2/2] app/dma-perf: add SG copy support Gowrishankar Muthukrishnan
2023-09-28 21:12 ` Pavan Nikhilesh Bhagavatula
2023-10-26 18:31 ` [PATCH v5 0/4] app/dma-perf: PCI Dev and " Gowrishankar Muthukrishnan
2023-10-26 18:31 ` [PATCH v5 1/4] app/dma-perf: add skip support Gowrishankar Muthukrishnan
2023-11-10 9:03 ` Anoob Joseph
2023-10-26 18:31 ` [PATCH v5 2/4] app/dma-perf: add PCI device support Gowrishankar Muthukrishnan
2023-11-10 9:04 ` Anoob Joseph
2023-10-26 18:31 ` [PATCH v5 3/4] app/dma-perf: validate copied memory Gowrishankar Muthukrishnan
2023-11-10 9:05 ` Anoob Joseph
2023-10-26 18:31 ` [PATCH v5 4/4] app/dma-perf: add SG copy support Gowrishankar Muthukrishnan
2023-11-10 9:07 ` Anoob Joseph
2023-11-13 4:41 ` [PATCH v6 0/4] PCI Dev and " Gowrishankar Muthukrishnan
2023-11-13 4:41 ` [PATCH v6 1/4] app/dma-perf: add skip support Gowrishankar Muthukrishnan
2023-11-13 4:41 ` [PATCH v6 2/4] app/dma-perf: add PCI device support Gowrishankar Muthukrishnan
2023-11-13 4:41 ` [PATCH v6 3/4] app/dma-perf: validate copied memory Gowrishankar Muthukrishnan
2023-11-13 4:41 ` [PATCH v6 4/4] app/dma-perf: add SG copy support Gowrishankar Muthukrishnan
2023-11-17 12:15 ` [PATCH v7 0/4] PCI Dev and " Gowrishankar Muthukrishnan
2023-11-17 12:15 ` [PATCH v7 1/4] app/dma-perf: add skip support Gowrishankar Muthukrishnan
2023-11-20 2:54 ` fengchengwen
2023-11-22 12:01 ` [EXT] " Amit Prakash Shukla
2023-11-17 12:15 ` [PATCH v7 2/4] app/dma-perf: add PCI device support Gowrishankar Muthukrishnan
2023-11-17 12:15 ` [PATCH v7 3/4] app/dma-perf: validate copied memory Gowrishankar Muthukrishnan
2023-11-17 12:15 ` [PATCH v7 4/4] app/dma-perf: add SG copy support Gowrishankar Muthukrishnan
2023-11-22 11:06 ` [PATCH v8 0/4] PCI Dev and " Gowrishankar Muthukrishnan
2023-11-22 11:06 ` [PATCH v8 1/4] app/dma-perf: add skip support Gowrishankar Muthukrishnan
2023-11-22 11:06 ` [PATCH v8 2/4] app/dma-perf: add PCI device support Gowrishankar Muthukrishnan
2023-11-23 1:12 ` fengchengwen
2024-02-21 3:26 ` fengchengwen
2024-02-27 9:27 ` [EXT] " Amit Prakash Shukla
2023-11-22 11:06 ` [PATCH v8 3/4] app/dma-perf: validate copied memory Gowrishankar Muthukrishnan
2023-11-23 1:14 ` fengchengwen
2023-11-22 11:06 ` [PATCH v8 4/4] app/dma-perf: add SG copy support Gowrishankar Muthukrishnan
2024-01-25 12:44 ` fengchengwen
2024-02-21 3:52 ` fengchengwen
2024-02-27 16:09 ` [EXT] " Gowrishankar Muthukrishnan
2023-12-07 10:11 ` [PATCH v8 0/4] PCI Dev and " Gowrishankar Muthukrishnan
2024-02-05 10:37 ` Gowrishankar Muthukrishnan
2024-02-27 16:00 ` [PATCH v9 " Amit Prakash Shukla
2024-02-27 16:00 ` [PATCH v9 1/4] app/dma-perf: add skip support Amit Prakash Shukla
2024-02-27 16:00 ` [PATCH v9 2/4] app/dma-perf: add PCI device support Amit Prakash Shukla
2024-02-27 16:00 ` [PATCH v9 3/4] app/dma-perf: validate copied memory Amit Prakash Shukla
2024-02-27 16:00 ` [PATCH v9 4/4] app/dma-perf: add SG copy support Amit Prakash Shukla
2024-02-27 18:35 ` [PATCH v10 0/4] PCI Dev and " Amit Prakash Shukla
2024-02-27 18:35 ` [PATCH v10 1/4] app/dma-perf: add skip support Amit Prakash Shukla
2024-02-27 18:35 ` [PATCH v10 2/4] app/dma-perf: add PCI device support Amit Prakash Shukla
2024-02-27 18:35 ` [PATCH v10 3/4] app/dma-perf: validate copied memory Amit Prakash Shukla
2024-02-28 8:10 ` fengchengwen
2024-02-28 9:09 ` [EXT] " Gowrishankar Muthukrishnan
2024-02-29 13:48 ` [v11 0/4] PCI Dev and SG copy support Gowrishankar Muthukrishnan
2024-02-29 13:48 ` [v11 1/4] app/dma-perf: add skip support Gowrishankar Muthukrishnan
2024-02-29 13:48 ` [v11 2/4] app/dma-perf: add PCI device support Gowrishankar Muthukrishnan
2024-02-29 13:48 ` [v11 3/4] app/dma-perf: validate copied memory Gowrishankar Muthukrishnan
2024-02-29 13:48 ` [v11 4/4] app/dma-perf: add SG copy support Gowrishankar Muthukrishnan
2024-03-06 19:50 ` [v11 0/4] PCI Dev and " Thomas Monjalon
2024-03-07 13:48 ` fengchengwen
2024-03-07 13:55 ` [EXTERNAL] " Gowrishankar Muthukrishnan
2024-03-12 9:15 ` Thomas Monjalon
2024-03-12 12:05 ` fengchengwen
2024-03-12 12:24 ` Gowrishankar Muthukrishnan
2024-03-13 7:26 ` fengchengwen
2024-03-13 8:22 ` Gowrishankar Muthukrishnan
2024-03-15 7:30 ` Gowrishankar Muthukrishnan
2024-03-15 13:09 ` Thomas Monjalon
2024-03-18 7:32 ` Gowrishankar Muthukrishnan
2024-03-07 13:48 ` Gowrishankar Muthukrishnan
2024-02-27 18:56 ` [PATCH v10 4/4] app/dma-perf: add " Amit Prakash Shukla
2024-02-28 9:31 ` fengchengwen
2024-02-29 13:16 ` [EXT] " Gowrishankar Muthukrishnan
2024-03-01 2:07 ` fengchengwen
2024-03-01 8:06 ` [EXTERNAL] " Gowrishankar Muthukrishnan
2024-03-01 9:45 ` fengchengwen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230810130137.2529-3-gmuthukrishn@marvell.com \
--to=gmuthukrishn@marvell.com \
--cc=anoobj@marvell.com \
--cc=bruce.richardson@intel.com \
--cc=cheng1.jiang@intel.com \
--cc=dev@dpdk.org \
--cc=kevin.laatz@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).