From: Pablo de Lara <pablo.de.lara.guarch@intel.com>
To: declan.doherty@intel.com, fiona.trahe@intel.com,
deepak.k.jain@intel.com, john.griffin@intel.com,
jerin.jacob@caviumnetworks.com, akhil.goyal@nxp.com,
hemant.agrawal@nxp.com
Cc: dev@dpdk.org, Pablo de Lara <pablo.de.lara.guarch@intel.com>
Subject: [dpdk-dev] [PATCH 3/6] app/crypto-perf: parse segment size
Date: Fri, 18 Aug 2017 09:05:17 +0100 [thread overview]
Message-ID: <20170818080520.43088-4-pablo.de.lara.guarch@intel.com> (raw)
In-Reply-To: <20170818080520.43088-1-pablo.de.lara.guarch@intel.com>
Instead of parsing number of segments, from the command line,
parse segment size, as it is a more usual case to have
the segment size fixed and then different packet sizes
will require different number of segments.
Signed-off-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
---
app/test-crypto-perf/cperf_ops.c | 24 ++++++++
app/test-crypto-perf/cperf_options.h | 4 +-
app/test-crypto-perf/cperf_options_parsing.c | 38 +++++++++----
app/test-crypto-perf/cperf_test_latency.c | 82 +++++++++++++++++-----------
app/test-crypto-perf/cperf_test_throughput.c | 82 +++++++++++++++++-----------
app/test-crypto-perf/cperf_test_verify.c | 82 +++++++++++++++++-----------
doc/guides/tools/cryptoperf.rst | 6 +-
7 files changed, 207 insertions(+), 111 deletions(-)
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index 5be20d9..ad32065 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -175,6 +175,14 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ /*
+ * If there is not enough room in segment,
+ * place the digest in the next segment
+ */
+ if ((tbuf->data_len - offset) < options->digest_sz) {
+ tbuf = tbuf->next;
+ offset = 0;
+ }
buf = tbuf;
}
@@ -256,6 +264,14 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ /*
+ * If there is not enough room in segment,
+ * place the digest in the next segment
+ */
+ if ((tbuf->data_len - offset) < options->digest_sz) {
+ tbuf = tbuf->next;
+ offset = 0;
+ }
buf = tbuf;
}
@@ -346,6 +362,14 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
offset -= tbuf->data_len;
tbuf = tbuf->next;
}
+ /*
+ * If there is not enough room in segment,
+ * place the digest in the next segment
+ */
+ if ((tbuf->data_len - offset) < options->digest_sz) {
+ tbuf = tbuf->next;
+ offset = 0;
+ }
buf = tbuf;
}
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
index 10cd2d8..5f2b28b 100644
--- a/app/test-crypto-perf/cperf_options.h
+++ b/app/test-crypto-perf/cperf_options.h
@@ -11,7 +11,7 @@
#define CPERF_TOTAL_OPS ("total-ops")
#define CPERF_BURST_SIZE ("burst-sz")
#define CPERF_BUFFER_SIZE ("buffer-sz")
-#define CPERF_SEGMENTS_NB ("segments-nb")
+#define CPERF_SEGMENT_SIZE ("segment-sz")
#define CPERF_DEVTYPE ("devtype")
#define CPERF_OPTYPE ("optype")
@@ -66,7 +66,7 @@ struct cperf_options {
uint32_t pool_sz;
uint32_t total_ops;
- uint32_t segments_nb;
+ uint32_t segment_sz;
uint32_t test_buffer_size;
uint32_t sessionless:1;
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
index 085aa8f..dbe87df 100644
--- a/app/test-crypto-perf/cperf_options_parsing.c
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -322,17 +322,17 @@ parse_buffer_sz(struct cperf_options *opts, const char *arg)
}
static int
-parse_segments_nb(struct cperf_options *opts, const char *arg)
+parse_segment_sz(struct cperf_options *opts, const char *arg)
{
- int ret = parse_uint32_t(&opts->segments_nb, arg);
+ int ret = parse_uint32_t(&opts->segment_sz, arg);
if (ret) {
- RTE_LOG(ERR, USER1, "failed to parse segments number\n");
+ RTE_LOG(ERR, USER1, "failed to parse segment size\n");
return -1;
}
- if ((opts->segments_nb == 0) || (opts->segments_nb > 255)) {
- RTE_LOG(ERR, USER1, "invalid segments number specified\n");
+ if (opts->segment_sz == 0) {
+ RTE_LOG(ERR, USER1, "Segment size has to be bigger than 0\n");
return -1;
}
@@ -640,7 +640,7 @@ static struct option lgopts[] = {
{ CPERF_TOTAL_OPS, required_argument, 0, 0 },
{ CPERF_BURST_SIZE, required_argument, 0, 0 },
{ CPERF_BUFFER_SIZE, required_argument, 0, 0 },
- { CPERF_SEGMENTS_NB, required_argument, 0, 0 },
+ { CPERF_SEGMENT_SIZE, required_argument, 0, 0 },
{ CPERF_DEVTYPE, required_argument, 0, 0 },
{ CPERF_OPTYPE, required_argument, 0, 0 },
@@ -697,7 +697,11 @@ cperf_options_default(struct cperf_options *opts)
opts->min_burst_size = 32;
opts->inc_burst_size = 0;
- opts->segments_nb = 1;
+ /*
+ * Will be parsed from command line or set to
+ * maximum buffer size + digest, later
+ */
+ opts->segment_sz = 0;
strncpy(opts->device_type, "crypto_aesni_mb",
sizeof(opts->device_type));
@@ -739,7 +743,7 @@ cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
{ CPERF_TOTAL_OPS, parse_total_ops },
{ CPERF_BURST_SIZE, parse_burst_sz },
{ CPERF_BUFFER_SIZE, parse_buffer_sz },
- { CPERF_SEGMENTS_NB, parse_segments_nb },
+ { CPERF_SEGMENT_SIZE, parse_segment_sz },
{ CPERF_DEVTYPE, parse_device_type },
{ CPERF_OPTYPE, parse_op_type },
{ CPERF_SESSIONLESS, parse_sessionless },
@@ -847,9 +851,21 @@ check_cipher_buffer_length(struct cperf_options *options)
int
cperf_options_check(struct cperf_options *options)
{
- if (options->segments_nb > options->min_buffer_size) {
+ if (options->op_type == CPERF_CIPHER_ONLY)
+ options->digest_sz = 0;
+
+ /*
+ * If segment size is not set, assume only one segment,
+ * big enough to contain the largest buffer and the digest
+ */
+ if (options->segment_sz == 0)
+ options->segment_sz = options->max_buffer_size +
+ options->digest_sz;
+
+ if (options->segment_sz < options->digest_sz) {
RTE_LOG(ERR, USER1,
- "Segments number greater than buffer size.\n");
+ "Segment size should be at least "
+ "the size of the digest\n");
return -EINVAL;
}
@@ -965,7 +981,7 @@ cperf_options_dump(struct cperf_options *opts)
printf("%u ", opts->burst_size_list[size_idx]);
printf("\n");
}
- printf("\n# segments per buffer: %u\n", opts->segments_nb);
+ printf("\n# segment size: %u\n", opts->segment_sz);
printf("#\n");
printf("# cryptodev type: %s\n", opts->device_type);
printf("#\n");
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
index 2a46af9..b272bb1 100644
--- a/app/test-crypto-perf/cperf_test_latency.c
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -116,18 +116,18 @@ cperf_latency_test_free(struct cperf_latency_ctx *ctx, uint32_t mbuf_nb)
static struct rte_mbuf *
cperf_mbuf_create(struct rte_mempool *mempool,
- uint32_t segments_nb,
+ uint32_t segment_sz,
+ uint32_t segment_nb,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector)
{
struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->max_buffer_size / segments_nb;
- uint32_t last_sz = options->max_buffer_size % segments_nb;
uint8_t *mbuf_data;
uint8_t *test_data =
(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
test_vector->plaintext.data :
test_vector->ciphertext.data;
+ uint32_t remaining_bytes = options->max_buffer_size;
mbuf = rte_pktmbuf_alloc(mempool);
if (mbuf == NULL)
@@ -137,11 +137,18 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (mbuf_data == NULL)
goto error;
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
+ if (options->max_buffer_size <= segment_sz) {
+ memcpy(mbuf_data, test_data, options->max_buffer_size);
+ test_data += options->max_buffer_size;
+ remaining_bytes = 0;
+ } else {
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ remaining_bytes -= segment_sz;
+ }
+ segment_nb--;
- while (segments_nb) {
+ while (remaining_bytes) {
struct rte_mbuf *m;
m = rte_pktmbuf_alloc(mempool);
@@ -154,22 +161,32 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (mbuf_data == NULL)
goto error;
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
+ if (remaining_bytes <= segment_sz) {
+ memcpy(mbuf_data, test_data, remaining_bytes);
+ remaining_bytes = 0;
+ test_data += remaining_bytes;
+ } else {
+ memcpy(mbuf_data, test_data, segment_sz);
+ remaining_bytes -= segment_sz;
+ test_data += segment_sz;
+ }
+ segment_nb--;
}
- if (last_sz) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
- if (mbuf_data == NULL)
- goto error;
+ /*
+ * If there was not enough room for the digest at the end
+ * of the last segment, allocate a new one
+ */
+ if (segment_nb != 0) {
+ struct rte_mbuf *m;
- memcpy(mbuf_data, test_data, last_sz);
- }
+ m = rte_pktmbuf_alloc(mempool);
+
+ if (m == NULL)
+ goto error;
- if (options->op_type != CPERF_CIPHER_ONLY) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->digest_sz);
+ rte_pktmbuf_chain(mbuf, m);
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
if (mbuf_data == NULL)
goto error;
}
@@ -217,13 +234,14 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
dev_id);
+ uint32_t max_size = options->max_buffer_size + options->digest_sz;
+ uint32_t segment_nb = (max_size % options->segment_sz) ?
+ (max_size / options->segment_sz) + 1 :
+ max_size / options->segment_sz;
+
ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
- options->pool_sz * options->segments_nb, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- (options->max_buffer_size / options->segments_nb) +
- (options->max_buffer_size % options->segments_nb) +
- options->digest_sz),
+ options->pool_sz * segment_nb, 0, 0,
+ RTE_PKTMBUF_HEADROOM + options->segment_sz,
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -236,7 +254,9 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_in, options->segments_nb,
+ ctx->pkt_mbuf_pool_in,
+ options->segment_sz,
+ segment_nb,
options, test_vector);
if (ctx->mbufs_in[mbuf_idx] == NULL)
goto err;
@@ -251,9 +271,7 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
pool_name, options->pool_sz, 0, 0,
RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- options->max_buffer_size +
- options->digest_sz),
+ max_size,
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -267,8 +285,8 @@ cperf_latency_test_constructor(struct rte_mempool *sess_mp,
for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
if (options->out_of_place == 1) {
ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_out, 1,
- options, test_vector);
+ ctx->pkt_mbuf_pool_out, max_size,
+ 1, options, test_vector);
if (ctx->mbufs_out[mbuf_idx] == NULL)
goto err;
} else {
@@ -339,7 +357,7 @@ cperf_latency_test_runner(void *arg)
int linearize = 0;
/* Check if source mbufs require coalescing */
- if (ctx->options->segments_nb > 1) {
+ if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
rte_cryptodev_info_get(ctx->dev_id, &dev_info);
if ((dev_info.feature_flags &
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
index 07aea6a..d5e93f7 100644
--- a/app/test-crypto-perf/cperf_test_throughput.c
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -100,18 +100,18 @@ cperf_throughput_test_free(struct cperf_throughput_ctx *ctx, uint32_t mbuf_nb)
static struct rte_mbuf *
cperf_mbuf_create(struct rte_mempool *mempool,
- uint32_t segments_nb,
+ uint32_t segment_sz,
+ uint32_t segment_nb,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector)
{
struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->max_buffer_size / segments_nb;
- uint32_t last_sz = options->max_buffer_size % segments_nb;
uint8_t *mbuf_data;
uint8_t *test_data =
(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
test_vector->plaintext.data :
test_vector->ciphertext.data;
+ uint32_t remaining_bytes = options->max_buffer_size;
mbuf = rte_pktmbuf_alloc(mempool);
if (mbuf == NULL)
@@ -121,11 +121,18 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (mbuf_data == NULL)
goto error;
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
+ if (options->max_buffer_size <= segment_sz) {
+ memcpy(mbuf_data, test_data, options->max_buffer_size);
+ test_data += options->max_buffer_size;
+ remaining_bytes = 0;
+ } else {
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ remaining_bytes -= segment_sz;
+ }
+ segment_nb--;
- while (segments_nb) {
+ while (remaining_bytes) {
struct rte_mbuf *m;
m = rte_pktmbuf_alloc(mempool);
@@ -138,22 +145,32 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (mbuf_data == NULL)
goto error;
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
+ if (remaining_bytes <= segment_sz) {
+ memcpy(mbuf_data, test_data, remaining_bytes);
+ remaining_bytes = 0;
+ test_data += remaining_bytes;
+ } else {
+ memcpy(mbuf_data, test_data, segment_sz);
+ remaining_bytes -= segment_sz;
+ test_data += segment_sz;
+ }
+ segment_nb--;
}
- if (last_sz) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
- if (mbuf_data == NULL)
- goto error;
+ /*
+ * If there was not enough room for the digest at the end
+ * of the last segment, allocate a new one
+ */
+ if (segment_nb != 0) {
+ struct rte_mbuf *m;
- memcpy(mbuf_data, test_data, last_sz);
- }
+ m = rte_pktmbuf_alloc(mempool);
+
+ if (m == NULL)
+ goto error;
- if (options->op_type != CPERF_CIPHER_ONLY) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->digest_sz);
+ rte_pktmbuf_chain(mbuf, m);
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
if (mbuf_data == NULL)
goto error;
}
@@ -200,13 +217,14 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
dev_id);
+ uint32_t max_size = options->max_buffer_size + options->digest_sz;
+ uint32_t segment_nb = (max_size % options->segment_sz) ?
+ (max_size / options->segment_sz) + 1 :
+ max_size / options->segment_sz;
+
ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
- options->pool_sz * options->segments_nb, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- (options->max_buffer_size / options->segments_nb) +
- (options->max_buffer_size % options->segments_nb) +
- options->digest_sz),
+ options->pool_sz * segment_nb, 0, 0,
+ RTE_PKTMBUF_HEADROOM + options->segment_sz,
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -218,7 +236,9 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_in, options->segments_nb,
+ ctx->pkt_mbuf_pool_in,
+ options->segment_sz,
+ segment_nb,
options, test_vector);
if (ctx->mbufs_in[mbuf_idx] == NULL)
goto err;
@@ -232,9 +252,7 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
pool_name, options->pool_sz, 0, 0,
RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- options->max_buffer_size +
- options->digest_sz),
+ max_size,
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -248,8 +266,8 @@ cperf_throughput_test_constructor(struct rte_mempool *sess_mp,
for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
if (options->out_of_place == 1) {
ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_out, 1,
- options, test_vector);
+ ctx->pkt_mbuf_pool_out, max_size,
+ 1, options, test_vector);
if (ctx->mbufs_out[mbuf_idx] == NULL)
goto err;
} else {
@@ -297,7 +315,7 @@ cperf_throughput_test_runner(void *test_ctx)
int linearize = 0;
/* Check if source mbufs require coalescing */
- if (ctx->options->segments_nb > 1) {
+ if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
rte_cryptodev_info_get(ctx->dev_id, &dev_info);
if ((dev_info.feature_flags &
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
index bc07eb6..6f790ce 100644
--- a/app/test-crypto-perf/cperf_test_verify.c
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -104,18 +104,18 @@ cperf_verify_test_free(struct cperf_verify_ctx *ctx, uint32_t mbuf_nb)
static struct rte_mbuf *
cperf_mbuf_create(struct rte_mempool *mempool,
- uint32_t segments_nb,
+ uint32_t segment_sz,
+ uint32_t segment_nb,
const struct cperf_options *options,
const struct cperf_test_vector *test_vector)
{
struct rte_mbuf *mbuf;
- uint32_t segment_sz = options->max_buffer_size / segments_nb;
- uint32_t last_sz = options->max_buffer_size % segments_nb;
uint8_t *mbuf_data;
uint8_t *test_data =
(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
test_vector->plaintext.data :
test_vector->ciphertext.data;
+ uint32_t remaining_bytes = options->max_buffer_size;
mbuf = rte_pktmbuf_alloc(mempool);
if (mbuf == NULL)
@@ -125,11 +125,18 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (mbuf_data == NULL)
goto error;
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
+ if (options->max_buffer_size <= segment_sz) {
+ memcpy(mbuf_data, test_data, options->max_buffer_size);
+ test_data += options->max_buffer_size;
+ remaining_bytes = 0;
+ } else {
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ remaining_bytes -= segment_sz;
+ }
+ segment_nb--;
- while (segments_nb) {
+ while (remaining_bytes) {
struct rte_mbuf *m;
m = rte_pktmbuf_alloc(mempool);
@@ -142,22 +149,32 @@ cperf_mbuf_create(struct rte_mempool *mempool,
if (mbuf_data == NULL)
goto error;
- memcpy(mbuf_data, test_data, segment_sz);
- test_data += segment_sz;
- segments_nb--;
+ if (remaining_bytes <= segment_sz) {
+ memcpy(mbuf_data, test_data, remaining_bytes);
+ remaining_bytes = 0;
+ test_data += remaining_bytes;
+ } else {
+ memcpy(mbuf_data, test_data, segment_sz);
+ remaining_bytes -= segment_sz;
+ test_data += segment_sz;
+ }
+ segment_nb--;
}
- if (last_sz) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
- if (mbuf_data == NULL)
- goto error;
+ /*
+ * If there was not enough room for the digest at the end
+ * of the last segment, allocate a new one
+ */
+ if (segment_nb != 0) {
+ struct rte_mbuf *m;
- memcpy(mbuf_data, test_data, last_sz);
- }
+ m = rte_pktmbuf_alloc(mempool);
- if (options->op_type != CPERF_CIPHER_ONLY) {
- mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
- options->digest_sz);
+ if (m == NULL)
+ goto error;
+
+ rte_pktmbuf_chain(mbuf, m);
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
if (mbuf_data == NULL)
goto error;
}
@@ -204,13 +221,14 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
dev_id);
+ uint32_t max_size = options->max_buffer_size + options->digest_sz;
+ uint32_t segment_nb = (max_size % options->segment_sz) ?
+ (max_size / options->segment_sz) + 1 :
+ max_size / options->segment_sz;
+
ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
- options->pool_sz * options->segments_nb, 0, 0,
- RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- (options->max_buffer_size / options->segments_nb) +
- (options->max_buffer_size % options->segments_nb) +
- options->digest_sz),
+ options->pool_sz * segment_nb, 0, 0,
+ RTE_PKTMBUF_HEADROOM + options->segment_sz,
rte_socket_id());
if (ctx->pkt_mbuf_pool_in == NULL)
@@ -222,7 +240,9 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_in, options->segments_nb,
+ ctx->pkt_mbuf_pool_in,
+ options->segment_sz,
+ segment_nb,
options, test_vector);
if (ctx->mbufs_in[mbuf_idx] == NULL)
goto err;
@@ -236,9 +256,7 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
pool_name, options->pool_sz, 0, 0,
RTE_PKTMBUF_HEADROOM +
- RTE_CACHE_LINE_ROUNDUP(
- options->max_buffer_size +
- options->digest_sz),
+ max_size,
rte_socket_id());
if (ctx->pkt_mbuf_pool_out == NULL)
@@ -252,8 +270,8 @@ cperf_verify_test_constructor(struct rte_mempool *sess_mp,
for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
if (options->out_of_place == 1) {
ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
- ctx->pkt_mbuf_pool_out, 1,
- options, test_vector);
+ ctx->pkt_mbuf_pool_out, max_size,
+ 1, options, test_vector);
if (ctx->mbufs_out[mbuf_idx] == NULL)
goto err;
} else {
@@ -405,7 +423,7 @@ cperf_verify_test_runner(void *test_ctx)
int linearize = 0;
/* Check if source mbufs require coalescing */
- if (ctx->options->segments_nb > 1) {
+ if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
rte_cryptodev_info_get(ctx->dev_id, &dev_info);
if ((dev_info.feature_flags &
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
diff --git a/doc/guides/tools/cryptoperf.rst b/doc/guides/tools/cryptoperf.rst
index 457f817..23b2b98 100644
--- a/doc/guides/tools/cryptoperf.rst
+++ b/doc/guides/tools/cryptoperf.rst
@@ -170,9 +170,11 @@ The following are the appication command-line options:
* List of values, up to 32 values, separated in commas (i.e. ``--buffer-sz 32,64,128``)
-* ``--segments-nb <n>``
+* ``--segment-sz <n>``
- Set the number of segments per packet.
+ Set the size of the segment to use, for Scatter Gather List testing.
+ By default, it is set to the size of the maximum buffer size, including the digest size,
+ so a single segment is created.
* ``--devtype <name>``
--
2.9.4
next prev parent reply other threads:[~2017-08-18 16:05 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-08-18 8:05 [dpdk-dev] [PATCH 0/6] Crypto-perf app improvements Pablo de Lara
2017-08-18 8:05 ` [dpdk-dev] [PATCH 1/6] app/crypto-perf: set AAD after the crypto operation Pablo de Lara
2017-08-18 8:05 ` [dpdk-dev] [PATCH 2/6] app/crypto-perf: parse AEAD data from vectors Pablo de Lara
2017-08-18 8:05 ` Pablo de Lara [this message]
2017-08-18 8:05 ` [dpdk-dev] [PATCH 4/6] app/crypto-perf: overwrite mbuf when verifying Pablo de Lara
2017-08-18 8:05 ` [dpdk-dev] [PATCH 5/6] app/crypto-perf: do not populate the mbufs at init Pablo de Lara
2017-08-18 8:05 ` [dpdk-dev] [PATCH 6/6] app/crypto-perf: use single mempool Pablo de Lara
2017-08-30 8:30 ` Akhil Goyal
[not found] ` <9F7182E3F746AB4EA17801C148F3C60433039119@IRSMSX101.ger.corp.intel.com>
2017-09-11 11:08 ` De Lara Guarch, Pablo
2017-09-11 13:10 ` Shreyansh Jain
2017-09-11 13:56 ` De Lara Guarch, Pablo
2017-09-04 13:08 ` [dpdk-dev] [PATCH 0/6] Crypto-perf app improvements Zhang, Roy Fan
2017-09-13 7:20 ` [dpdk-dev] [PATCH v2 0/7] " Pablo de Lara
2017-09-13 7:20 ` [dpdk-dev] [PATCH v2 1/7] app/crypto-perf: set AAD after the crypto operation Pablo de Lara
2017-09-13 7:20 ` [dpdk-dev] [PATCH v2 2/7] app/crypto-perf: parse AEAD data from vectors Pablo de Lara
2017-09-13 7:20 ` [dpdk-dev] [PATCH v2 3/7] app/crypto-perf: parse segment size Pablo de Lara
2017-09-13 7:20 ` [dpdk-dev] [PATCH v2 4/7] app/crypto-perf: overwrite mbuf when verifying Pablo de Lara
2017-09-13 7:20 ` [dpdk-dev] [PATCH v2 5/7] app/crypto-perf: do not populate the mbufs at init Pablo de Lara
2017-09-22 7:55 ` [dpdk-dev] [PATCH v3 0/7] Crypto-perf app improvements Pablo de Lara
2017-09-22 7:55 ` [dpdk-dev] [PATCH v3 1/7] app/crypto-perf: set AAD after the crypto operation Pablo de Lara
2017-09-22 7:55 ` [dpdk-dev] [PATCH v3 2/7] app/crypto-perf: parse AEAD data from vectors Pablo de Lara
2017-09-22 7:55 ` [dpdk-dev] [PATCH v3 3/7] app/crypto-perf: parse segment size Pablo de Lara
2017-09-22 7:55 ` [dpdk-dev] [PATCH v3 4/7] app/crypto-perf: overwrite mbuf when verifying Pablo de Lara
2017-09-22 7:55 ` [dpdk-dev] [PATCH v3 5/7] app/crypto-perf: do not populate the mbufs at init Pablo de Lara
2017-09-22 7:55 ` [dpdk-dev] [PATCH v3 6/7] app/crypto-perf: support multiple queue pairs Pablo de Lara
2017-09-26 8:42 ` Akhil Goyal
2017-10-04 10:25 ` De Lara Guarch, Pablo
2017-09-22 7:55 ` [dpdk-dev] [PATCH v3 7/7] app/crypto-perf: use single mempool Pablo de Lara
2017-09-26 9:21 ` Akhil Goyal
2017-10-04 7:47 ` De Lara Guarch, Pablo
2017-10-04 3:46 ` [dpdk-dev] [PATCH v4 0/8] Crypto-perf app improvements Pablo de Lara
2017-10-04 3:46 ` [dpdk-dev] [PATCH v4 1/8] app/crypto-perf: refactor common test code Pablo de Lara
2017-10-04 3:46 ` [dpdk-dev] [PATCH v4 2/8] app/crypto-perf: set AAD after the crypto operation Pablo de Lara
2017-10-04 3:46 ` [dpdk-dev] [PATCH v4 3/8] app/crypto-perf: parse AEAD data from vectors Pablo de Lara
2017-10-04 3:46 ` [dpdk-dev] [PATCH v4 4/8] app/crypto-perf: parse segment size Pablo de Lara
2017-10-04 3:46 ` [dpdk-dev] [PATCH v4 5/8] app/crypto-perf: overwrite mbuf when verifying Pablo de Lara
2017-10-04 3:46 ` [dpdk-dev] [PATCH v4 6/8] app/crypto-perf: do not populate the mbufs at init Pablo de Lara
2017-10-04 3:46 ` [dpdk-dev] [PATCH v4 7/8] app/crypto-perf: support multiple queue pairs Pablo de Lara
2017-10-04 3:46 ` [dpdk-dev] [PATCH v4 8/8] app/crypto-perf: use single mempool Pablo de Lara
2017-10-06 11:57 ` [dpdk-dev] [PATCH v4 0/8] Crypto-perf app improvements Akhil Goyal
2017-10-06 12:50 ` De Lara Guarch, Pablo
2017-09-13 7:22 ` [dpdk-dev] [PATCH v2 0/7] " Pablo de Lara
2017-09-13 7:22 ` [dpdk-dev] [PATCH v2 1/7] app/crypto-perf: set AAD after the crypto operation Pablo de Lara
2017-09-13 7:22 ` [dpdk-dev] [PATCH v2 2/7] app/crypto-perf: parse AEAD data from vectors Pablo de Lara
2017-09-13 7:22 ` [dpdk-dev] [PATCH v2 3/7] app/crypto-perf: parse segment size Pablo de Lara
2017-09-13 7:22 ` [dpdk-dev] [PATCH v2 4/7] app/crypto-perf: overwrite mbuf when verifying Pablo de Lara
2017-09-13 7:22 ` [dpdk-dev] [PATCH v2 5/7] app/crypto-perf: do not populate the mbufs at init Pablo de Lara
2017-09-13 7:22 ` [dpdk-dev] [PATCH v2 6/7] app/crypto-perf: support multiple queue pairs Pablo de Lara
2017-09-13 7:22 ` [dpdk-dev] [PATCH v2 7/7] app/crypto-perf: use single mempool Pablo de Lara
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170818080520.43088-4-pablo.de.lara.guarch@intel.com \
--to=pablo.de.lara.guarch@intel.com \
--cc=akhil.goyal@nxp.com \
--cc=declan.doherty@intel.com \
--cc=deepak.k.jain@intel.com \
--cc=dev@dpdk.org \
--cc=fiona.trahe@intel.com \
--cc=hemant.agrawal@nxp.com \
--cc=jerin.jacob@caviumnetworks.com \
--cc=john.griffin@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).