* [dpdk-dev] [PATCH 1/2] net/pcap: multiple queues fix
@ 2018-06-16 15:36 ido goshen
2018-06-16 15:36 ` [dpdk-dev] [PATCH 2/2] net/pcap: duplicate code consolidation ido goshen
2018-06-18 8:25 ` [dpdk-dev] [PATCH 1/2] net/pcap: multiple queues fix Ferruh Yigit
0 siblings, 2 replies; 5+ messages in thread
From: ido goshen @ 2018-06-16 15:36 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: dev, ido goshen
Change open_rx/tx_pcap/iface functions to open only a single pcap/dumper
and not loop num_of_queue times
The num_of_queue loop is already acheived by the caller rte_kvargs_process
Fixes:
1. Opens N requested pcaps/dumpers instead of N^2
2. Leak of pcap/dumper's which are being overwritten by
the sequential calls to open_rx/tx_pcap/iface functions
3. Use the filename/iface args per queue and not just the last one
that overwrites the previous names
Signed-off-by: ido goshen <ido@cgstowernetworks.com>
---
drivers/net/pcap/rte_eth_pcap.c | 85 +++++++++++++++++------------------------
1 file changed, 35 insertions(+), 50 deletions(-)
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 6bd4a7d..444abbb 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -665,19 +665,17 @@ struct pmd_devargs {
static int
open_rx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *pcap_filename = value;
struct pmd_devargs *rx = extra_args;
pcap_t *pcap = NULL;
- for (i = 0; i < rx->num_of_queue; i++) {
- if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
- return -1;
+ if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
+ return -1;
- rx->queue[i].pcap = pcap;
- rx->queue[i].name = pcap_filename;
- rx->queue[i].type = key;
- }
+ rx->queue[rx->num_of_queue].pcap = pcap;
+ rx->queue[rx->num_of_queue].name = pcap_filename;
+ rx->queue[rx->num_of_queue].type = key;
+ rx->num_of_queue++;
return 0;
}
@@ -689,19 +687,17 @@ struct pmd_devargs {
static int
open_tx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *pcap_filename = value;
struct pmd_devargs *dumpers = extra_args;
pcap_dumper_t *dumper;
- for (i = 0; i < dumpers->num_of_queue; i++) {
- if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
- return -1;
+ if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
+ return -1;
- dumpers->queue[i].dumper = dumper;
- dumpers->queue[i].name = pcap_filename;
- dumpers->queue[i].type = key;
- }
+ dumpers->queue[dumpers->num_of_queue].dumper = dumper;
+ dumpers->queue[dumpers->num_of_queue].name = pcap_filename;
+ dumpers->queue[dumpers->num_of_queue].type = key;
+ dumpers->num_of_queue++;
return 0;
}
@@ -732,18 +728,16 @@ struct pmd_devargs {
static inline int
open_rx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *iface = value;
struct pmd_devargs *rx = extra_args;
pcap_t *pcap = NULL;
- for (i = 0; i < rx->num_of_queue; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- rx->queue[i].pcap = pcap;
- rx->queue[i].name = iface;
- rx->queue[i].type = key;
- }
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+ rx->queue[rx->num_of_queue].pcap = pcap;
+ rx->queue[rx->num_of_queue].name = iface;
+ rx->queue[rx->num_of_queue].type = key;
+ rx->num_of_queue++;
return 0;
}
@@ -754,18 +748,16 @@ struct pmd_devargs {
static int
open_tx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *iface = value;
struct pmd_devargs *tx = extra_args;
pcap_t *pcap;
- for (i = 0; i < tx->num_of_queue; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- tx->queue[i].pcap = pcap;
- tx->queue[i].name = iface;
- tx->queue[i].type = key;
- }
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+ tx->queue[tx->num_of_queue].pcap = pcap;
+ tx->queue[tx->num_of_queue].name = iface;
+ tx->queue[tx->num_of_queue].type = key;
+ tx->num_of_queue++;
return 0;
}
@@ -958,15 +950,8 @@ struct pmd_devargs {
* We check whether we want to open a RX stream from a real NIC or a
* pcap file
*/
- pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
- if (pcaps.num_of_queue)
- is_rx_pcap = 1;
- else
- pcaps.num_of_queue = rte_kvargs_count(kvlist,
- ETH_PCAP_RX_IFACE_ARG);
-
- if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
- pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+ is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
+ pcaps.num_of_queue = 0;
if (is_rx_pcap)
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
@@ -975,6 +960,10 @@ struct pmd_devargs {
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
&open_rx_iface, &pcaps);
+ if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
+ pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+
+
if (ret < 0)
goto free_kvlist;
@@ -982,15 +971,8 @@ struct pmd_devargs {
* We check whether we want to open a TX stream to a real NIC or a
* pcap file
*/
- dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
- if (dumpers.num_of_queue)
- is_tx_pcap = 1;
- else
- dumpers.num_of_queue = rte_kvargs_count(kvlist,
- ETH_PCAP_TX_IFACE_ARG);
-
- if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
- dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+ is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
+ dumpers.num_of_queue = 0;
if (is_tx_pcap)
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
@@ -999,6 +981,9 @@ struct pmd_devargs {
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
&open_tx_iface, &dumpers);
+ if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
+ dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+
if (ret < 0)
goto free_kvlist;
--
1.9.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [dpdk-dev] [PATCH 2/2] net/pcap: duplicate code consolidation
2018-06-16 15:36 [dpdk-dev] [PATCH 1/2] net/pcap: multiple queues fix ido goshen
@ 2018-06-16 15:36 ` ido goshen
2018-06-18 8:25 ` [dpdk-dev] [PATCH 1/2] net/pcap: multiple queues fix Ferruh Yigit
1 sibling, 0 replies; 5+ messages in thread
From: ido goshen @ 2018-06-16 15:36 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: dev, ido goshen
Signed-off-by: ido goshen <ido@cgstowernetworks.com>
---
drivers/net/pcap/rte_eth_pcap.c | 58 ++++++++++++++++++++---------------------
1 file changed, 29 insertions(+), 29 deletions(-)
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 444abbb..e63998b 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -658,6 +658,20 @@ struct pmd_devargs {
.stats_reset = eth_stats_reset,
};
+static int
+add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
+ pcap_t *pcap, pcap_dumper_t *dumper)
+{
+ if (pcap)
+ pmd->queue[pmd->num_of_queue].pcap = pcap;
+ if (dumper)
+ pmd->queue[pmd->num_of_queue].dumper = dumper;
+ pmd->queue[pmd->num_of_queue].name = name;
+ pmd->queue[pmd->num_of_queue].type = type;
+ pmd->num_of_queue++;
+ return 0;
+}
+
/*
* Function handler that opens the pcap file for reading a stores a
* reference of it for use it later on.
@@ -672,10 +686,7 @@ struct pmd_devargs {
if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
return -1;
- rx->queue[rx->num_of_queue].pcap = pcap;
- rx->queue[rx->num_of_queue].name = pcap_filename;
- rx->queue[rx->num_of_queue].type = key;
- rx->num_of_queue++;
+ add_queue(rx, pcap_filename, key, pcap, NULL);
return 0;
}
@@ -694,10 +705,7 @@ struct pmd_devargs {
if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
return -1;
- dumpers->queue[dumpers->num_of_queue].dumper = dumper;
- dumpers->queue[dumpers->num_of_queue].name = pcap_filename;
- dumpers->queue[dumpers->num_of_queue].type = key;
- dumpers->num_of_queue++;
+ add_queue(dumpers, pcap_filename, key, NULL, dumper);
return 0;
}
@@ -722,44 +730,36 @@ struct pmd_devargs {
return 0;
}
-/*
- * Opens a NIC for reading packets from it
- */
static inline int
-open_rx_iface(const char *key, const char *value, void *extra_args)
+open_iface(const char *key, const char *value, void *extra_args)
{
const char *iface = value;
- struct pmd_devargs *rx = extra_args;
+ struct pmd_devargs *pmd = extra_args;
pcap_t *pcap = NULL;
if (open_single_iface(iface, &pcap) < 0)
return -1;
- rx->queue[rx->num_of_queue].pcap = pcap;
- rx->queue[rx->num_of_queue].name = iface;
- rx->queue[rx->num_of_queue].type = key;
- rx->num_of_queue++;
+ add_queue(pmd, iface, key, pcap, NULL);
return 0;
}
/*
+ * Opens a NIC for reading packets from it
+ */
+static inline int
+open_rx_iface(const char *key, const char *value, void *extra_args)
+{
+ return open_iface(key, value, extra_args);
+}
+
+/*
* Opens a NIC for writing packets to it
*/
static int
open_tx_iface(const char *key, const char *value, void *extra_args)
{
- const char *iface = value;
- struct pmd_devargs *tx = extra_args;
- pcap_t *pcap;
-
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- tx->queue[tx->num_of_queue].pcap = pcap;
- tx->queue[tx->num_of_queue].name = iface;
- tx->queue[tx->num_of_queue].type = key;
- tx->num_of_queue++;
-
- return 0;
+ return open_iface(key, value, extra_args);
}
static struct rte_vdev_driver pmd_pcap_drv;
--
1.9.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] net/pcap: multiple queues fix
2018-06-16 15:36 [dpdk-dev] [PATCH 1/2] net/pcap: multiple queues fix ido goshen
2018-06-16 15:36 ` [dpdk-dev] [PATCH 2/2] net/pcap: duplicate code consolidation ido goshen
@ 2018-06-18 8:25 ` Ferruh Yigit
2018-06-19 9:45 ` Ido Goshen
1 sibling, 1 reply; 5+ messages in thread
From: Ferruh Yigit @ 2018-06-18 8:25 UTC (permalink / raw)
To: ido goshen; +Cc: dev
On 6/16/2018 4:36 PM, ido goshen wrote:
> Change open_rx/tx_pcap/iface functions to open only a single pcap/dumper
> and not loop num_of_queue times
> The num_of_queue loop is already acheived by the caller rte_kvargs_process
You are right, thanks for fixing this, a few comments below.
>
> Fixes:
> 1. Opens N requested pcaps/dumpers instead of N^2
> 2. Leak of pcap/dumper's which are being overwritten by
> the sequential calls to open_rx/tx_pcap/iface functions
> 3. Use the filename/iface args per queue and not just the last one
> that overwrites the previous names
Please add a "Fixes: xx" line, that is to trace initial commit the issue
introduced. More details in contribution guide.
Also please add "Cc: stable@dpdk.org" to be sure patch sent to stable tree too
and to help stable tree maintainers"
>
> Signed-off-by: ido goshen <ido@cgstowernetworks.com>
<...>
> @@ -958,15 +950,8 @@ struct pmd_devargs {
> * We check whether we want to open a RX stream from a real NIC or a
> * pcap file
> */
> - pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
> - if (pcaps.num_of_queue)
> - is_rx_pcap = 1;
> - else
> - pcaps.num_of_queue = rte_kvargs_count(kvlist,
> - ETH_PCAP_RX_IFACE_ARG);
> -
> - if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> - pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
> + is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
> + pcaps.num_of_queue = 0;
>
> if (is_rx_pcap)
> ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
> @@ -975,6 +960,10 @@ struct pmd_devargs {
> ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
> &open_rx_iface, &pcaps);
>
> + if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> + pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
Here is late for this check. You may be already access to rx->queue[],
tx->queue[] out of boundary at this point.
You should either check this value before rte_kvargs_process(), via
rte_kvargs_count(), OR you should add this check into callback functions.
> if (ret < 0)
> goto free_kvlist;
>
> @@ -982,15 +971,8 @@ struct pmd_devargs {
> * We check whether we want to open a TX stream to a real NIC or a
> * pcap file
> */
> - dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
> - if (dumpers.num_of_queue)
> - is_tx_pcap = 1;
> - else
> - dumpers.num_of_queue = rte_kvargs_count(kvlist,
> - ETH_PCAP_TX_IFACE_ARG);
> -
> - if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> - dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
> + is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
Is "is_rx_pcap" or "is_tx_pcap" flags really required? Is there anything
preventing have a mixture of interface and pcap in multi queue case? With the
changes you are doing, I guess we can remove these checks and call following
sequentially:
rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG..)
rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG ..)
What do you think?
But please be sure the fix and refactor patches are separate, so that fix patch
can be backported to stable trees. But refactor patches won't be backported.
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] net/pcap: multiple queues fix
2018-06-18 8:25 ` [dpdk-dev] [PATCH 1/2] net/pcap: multiple queues fix Ferruh Yigit
@ 2018-06-19 9:45 ` Ido Goshen
2018-06-19 10:00 ` Ferruh Yigit
0 siblings, 1 reply; 5+ messages in thread
From: Ido Goshen @ 2018-06-19 9:45 UTC (permalink / raw)
To: Ferruh Yigit; +Cc: dev
See Inline prefixed with [ido]
-----Original Message-----
From: Ferruh Yigit <ferruh.yigit@intel.com>
Sent: Monday, June 18, 2018 11:25 AM
To: Ido Goshen <Ido@cgstowernetworks.com>
Cc: dev@dpdk.org
Subject: Re: [PATCH 1/2] net/pcap: multiple queues fix
On 6/16/2018 4:36 PM, ido goshen wrote:
> Change open_rx/tx_pcap/iface functions to open only a single
> pcap/dumper and not loop num_of_queue times The num_of_queue loop is
> already acheived by the caller rte_kvargs_process
You are right, thanks for fixing this, a few comments below.
>
> Fixes:
> 1. Opens N requested pcaps/dumpers instead of N^2 2. Leak of
> pcap/dumper's which are being overwritten by
> the sequential calls to open_rx/tx_pcap/iface functions 3. Use the
> filename/iface args per queue and not just the last one
> that overwrites the previous names
Please add a "Fixes: xx" line, that is to trace initial commit the issue introduced. More details in contribution guide.
Also please add "Cc: stable@dpdk.org" to be sure patch sent to stable tree too and to help stable tree maintainers"
[ido] as far as I can trace back this is from day one (4c17330 pcap: add new driver), Would "Fixes: 4c17330" be ok?
>
> Signed-off-by: ido goshen <ido@cgstowernetworks.com>
<...>
> @@ -958,15 +950,8 @@ struct pmd_devargs {
> * We check whether we want to open a RX stream from a real NIC or a
> * pcap file
> */
> - pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
> - if (pcaps.num_of_queue)
> - is_rx_pcap = 1;
> - else
> - pcaps.num_of_queue = rte_kvargs_count(kvlist,
> - ETH_PCAP_RX_IFACE_ARG);
> -
> - if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> - pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
> + is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
> + pcaps.num_of_queue = 0;
>
> if (is_rx_pcap)
> ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG, @@ -975,6
> +960,10 @@ struct pmd_devargs {
> ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
> &open_rx_iface, &pcaps);
>
> + if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> + pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
Here is late for this check. You may be already access to rx->queue[],
tx->queue[] out of boundary at this point.
You should either check this value before rte_kvargs_process(), via rte_kvargs_count(), OR you should add this check into callback functions.
[ido] good catch - will fix that
> if (ret < 0)
> goto free_kvlist;
>
> @@ -982,15 +971,8 @@ struct pmd_devargs {
> * We check whether we want to open a TX stream to a real NIC or a
> * pcap file
> */
> - dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
> - if (dumpers.num_of_queue)
> - is_tx_pcap = 1;
> - else
> - dumpers.num_of_queue = rte_kvargs_count(kvlist,
> - ETH_PCAP_TX_IFACE_ARG);
> -
> - if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
> - dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
> + is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
Is "is_rx_pcap" or "is_tx_pcap" flags really required? Is there anything preventing have a mixture of interface and pcap in multi queue case? With the changes you are doing, I guess we can remove these checks and call following
sequentially:
rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG..) rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG ..) What do you think?
[ido] nice idea - will test if they can co-exist
But please be sure the fix and refactor patches are separate, so that fix patch can be backported to stable trees. But refactor patches won't be backported.
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] net/pcap: multiple queues fix
2018-06-19 9:45 ` Ido Goshen
@ 2018-06-19 10:00 ` Ferruh Yigit
0 siblings, 0 replies; 5+ messages in thread
From: Ferruh Yigit @ 2018-06-19 10:00 UTC (permalink / raw)
To: Ido Goshen; +Cc: dev
On 6/19/2018 10:45 AM, Ido Goshen wrote:
> See Inline prefixed with [ido]
>
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Monday, June 18, 2018 11:25 AM
> To: Ido Goshen <Ido@cgstowernetworks.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH 1/2] net/pcap: multiple queues fix
>
> On 6/16/2018 4:36 PM, ido goshen wrote:
>> Change open_rx/tx_pcap/iface functions to open only a single
>> pcap/dumper and not loop num_of_queue times The num_of_queue loop is
>> already acheived by the caller rte_kvargs_process
>
> You are right, thanks for fixing this, a few comments below.
>
>>
>> Fixes:
>> 1. Opens N requested pcaps/dumpers instead of N^2 2. Leak of
>> pcap/dumper's which are being overwritten by
>> the sequential calls to open_rx/tx_pcap/iface functions 3. Use the
>> filename/iface args per queue and not just the last one
>> that overwrites the previous names
>
> Please add a "Fixes: xx" line, that is to trace initial commit the issue introduced. More details in contribution guide.
> Also please add "Cc: stable@dpdk.org" to be sure patch sent to stable tree too and to help stable tree maintainers"
> [ido] as far as I can trace back this is from day one (4c17330 pcap: add new driver), Would "Fixes: 4c17330" be ok?
As commit, it looks correct, thanks. For syntax we are using a git alias for
unified syntax [1], which makes output as [2].
[1]
git config alias.fixline "log -1 --abbrev=12 --format='Fixes: %h (\"%s\")%nCc: %ae'"
[2]
Fixes: 4c173302c307 ("pcap: add new driver")
>
>>
>> Signed-off-by: ido goshen <ido@cgstowernetworks.com>
>
> <...>
>
>> @@ -958,15 +950,8 @@ struct pmd_devargs {
>> * We check whether we want to open a RX stream from a real NIC or a
>> * pcap file
>> */
>> - pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
>> - if (pcaps.num_of_queue)
>> - is_rx_pcap = 1;
>> - else
>> - pcaps.num_of_queue = rte_kvargs_count(kvlist,
>> - ETH_PCAP_RX_IFACE_ARG);
>> -
>> - if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
>> - pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
>> + is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
>> + pcaps.num_of_queue = 0;
>>
>> if (is_rx_pcap)
>> ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG, @@ -975,6
>> +960,10 @@ struct pmd_devargs {
>> ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
>> &open_rx_iface, &pcaps);
>>
>> + if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
>> + pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
>
> Here is late for this check. You may be already access to rx->queue[],
> tx->queue[] out of boundary at this point.
>
> You should either check this value before rte_kvargs_process(), via rte_kvargs_count(), OR you should add this check into callback functions.
> [ido] good catch - will fix that
>
>> if (ret < 0)
>> goto free_kvlist;
>>
>> @@ -982,15 +971,8 @@ struct pmd_devargs {
>> * We check whether we want to open a TX stream to a real NIC or a
>> * pcap file
>> */
>> - dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
>> - if (dumpers.num_of_queue)
>> - is_tx_pcap = 1;
>> - else
>> - dumpers.num_of_queue = rte_kvargs_count(kvlist,
>> - ETH_PCAP_TX_IFACE_ARG);
>> -
>> - if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
>> - dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
>> + is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
>
> Is "is_rx_pcap" or "is_tx_pcap" flags really required? Is there anything preventing have a mixture of interface and pcap in multi queue case? With the changes you are doing, I guess we can remove these checks and call following
> sequentially:
> rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG..) rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG ..) What do you think?
> [ido] nice idea - will test if they can co-exist
>
> But please be sure the fix and refactor patches are separate, so that fix patch can be backported to stable trees. But refactor patches won't be backported.
>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2018-06-19 10:00 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-16 15:36 [dpdk-dev] [PATCH 1/2] net/pcap: multiple queues fix ido goshen
2018-06-16 15:36 ` [dpdk-dev] [PATCH 2/2] net/pcap: duplicate code consolidation ido goshen
2018-06-18 8:25 ` [dpdk-dev] [PATCH 1/2] net/pcap: multiple queues fix Ferruh Yigit
2018-06-19 9:45 ` Ido Goshen
2018-06-19 10:00 ` Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).