* [PATCH] net/idpf: refine devargs parse functions
@ 2023-04-21 7:14 Mingxia Liu
2023-04-23 8:55 ` Wu, Jingjing
2023-04-24 10:39 ` [PATCH v2] " Mingxia Liu
0 siblings, 2 replies; 5+ messages in thread
From: Mingxia Liu @ 2023-04-21 7:14 UTC (permalink / raw)
To: dev; +Cc: jingjing.wu, beilei.xing, Mingxia Liu
This patch refines devargs parsing functions and use valid
variable max_vport_nb to replace IDPF_MAX_VPORT_NUM.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
drivers/net/idpf/idpf_ethdev.c | 61 +++++++++++++++++-----------------
1 file changed, 30 insertions(+), 31 deletions(-)
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index e02ec2ec5a..a8dd5a0a80 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -857,12 +857,6 @@ insert_value(struct idpf_devargs *devargs, uint16_t id)
return 0;
}
- if (devargs->req_vport_nb >= RTE_DIM(devargs->req_vports)) {
- PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
- IDPF_MAX_VPORT_NUM);
- return -EINVAL;
- }
-
devargs->req_vports[devargs->req_vport_nb] = id;
devargs->req_vport_nb++;
@@ -879,12 +873,10 @@ parse_range(const char *value, struct idpf_devargs *devargs)
result = sscanf(value, "%hu%n-%hu%n", &lo, &n, &hi, &n);
if (result == 1) {
- if (lo >= IDPF_MAX_VPORT_NUM)
- return NULL;
if (insert_value(devargs, lo) != 0)
return NULL;
} else if (result == 2) {
- if (lo > hi || hi >= IDPF_MAX_VPORT_NUM)
+ if (lo > hi)
return NULL;
for (i = lo; i <= hi; i++) {
if (insert_value(devargs, i) != 0)
@@ -969,40 +961,46 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adap
return -EINVAL;
}
+ ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
+ idpf_args);
+ if (ret != 0)
+ goto fail;
+
+ ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
+ &adapter->base.is_tx_singleq);
+ if (ret != 0)
+ goto fail;
+
+ ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
+ &adapter->base.is_rx_singleq);
+ if (ret != 0)
+ goto fail;
+
/* check parsed devargs */
if (adapter->cur_vport_nb + idpf_args->req_vport_nb >
- IDPF_MAX_VPORT_NUM) {
+ adapter->max_vport_nb) {
PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
- IDPF_MAX_VPORT_NUM);
+ adapter->max_vport_nb);
ret = -EINVAL;
- goto bail;
+ goto fail;
}
for (i = 0; i < idpf_args->req_vport_nb; i++) {
+ if (idpf_args->req_vports[i] > adapter->max_vport_nb - 1) {
+ PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
+ idpf_args->req_vports[i], adapter->max_vport_nb - 1);
+ ret = -EINVAL;
+ goto fail;
+ }
if (adapter->cur_vports & RTE_BIT32(idpf_args->req_vports[i])) {
- PMD_INIT_LOG(ERR, "Vport %d has been created",
+ PMD_INIT_LOG(ERR, "Vport %d has been requested",
idpf_args->req_vports[i]);
ret = -EINVAL;
- goto bail;
+ goto fail;
}
}
- ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
- idpf_args);
- if (ret != 0)
- goto bail;
-
- ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
- &adapter->base.is_tx_singleq);
- if (ret != 0)
- goto bail;
-
- ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
- &adapter->base.is_rx_singleq);
- if (ret != 0)
- goto bail;
-
-bail:
+fail:
rte_kvargs_free(kvlist);
return ret;
}
@@ -1152,7 +1150,8 @@ idpf_adapter_ext_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *a
rte_eal_alarm_set(IDPF_ALARM_INTERVAL, idpf_dev_alarm_handler, adapter);
- adapter->max_vport_nb = adapter->base.caps.max_vports;
+ adapter->max_vport_nb = adapter->base.caps.max_vports > IDPF_MAX_VPORT_NUM ?
+ IDPF_MAX_VPORT_NUM : adapter->base.caps.max_vports;
adapter->vports = rte_zmalloc("vports",
adapter->max_vport_nb *
--
2.34.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* RE: [PATCH] net/idpf: refine devargs parse functions
2023-04-21 7:14 [PATCH] net/idpf: refine devargs parse functions Mingxia Liu
@ 2023-04-23 8:55 ` Wu, Jingjing
2023-04-24 2:19 ` Liu, Mingxia
2023-04-24 10:39 ` [PATCH v2] " Mingxia Liu
1 sibling, 1 reply; 5+ messages in thread
From: Wu, Jingjing @ 2023-04-23 8:55 UTC (permalink / raw)
To: Liu, Mingxia, dev; +Cc: Xing, Beilei
> -----Original Message-----
> From: Liu, Mingxia <mingxia.liu@intel.com>
> Sent: Friday, April 21, 2023 3:15 PM
> To: dev@dpdk.org
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Liu, Mingxia
> <mingxia.liu@intel.com>
> Subject: [PATCH] net/idpf: refine devargs parse functions
>
> This patch refines devargs parsing functions and use valid
> variable max_vport_nb to replace IDPF_MAX_VPORT_NUM.
>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> ---
> drivers/net/idpf/idpf_ethdev.c | 61 +++++++++++++++++-----------------
> 1 file changed, 30 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
> index e02ec2ec5a..a8dd5a0a80 100644
> --- a/drivers/net/idpf/idpf_ethdev.c
> +++ b/drivers/net/idpf/idpf_ethdev.c
> @@ -857,12 +857,6 @@ insert_value(struct idpf_devargs *devargs, uint16_t id)
> return 0;
> }
>
> - if (devargs->req_vport_nb >= RTE_DIM(devargs->req_vports)) {
> - PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
> - IDPF_MAX_VPORT_NUM);
> - return -EINVAL;
> - }
> -
> devargs->req_vports[devargs->req_vport_nb] = id;
> devargs->req_vport_nb++;
>
> @@ -879,12 +873,10 @@ parse_range(const char *value, struct idpf_devargs *devargs)
>
> result = sscanf(value, "%hu%n-%hu%n", &lo, &n, &hi, &n);
> if (result == 1) {
> - if (lo >= IDPF_MAX_VPORT_NUM)
> - return NULL;
> if (insert_value(devargs, lo) != 0)
> return NULL;
> } else if (result == 2) {
> - if (lo > hi || hi >= IDPF_MAX_VPORT_NUM)
> + if (lo > hi)
> return NULL;
> for (i = lo; i <= hi; i++) {
> if (insert_value(devargs, i) != 0)
> @@ -969,40 +961,46 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct
> idpf_adapter_ext *adap
> return -EINVAL;
> }
>
> + ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
> + idpf_args);
> + if (ret != 0)
> + goto fail;
> +
> + ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
> + &adapter->base.is_tx_singleq);
> + if (ret != 0)
> + goto fail;
> +
> + ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
> + &adapter->base.is_rx_singleq);
> + if (ret != 0)
> + goto fail;
> +
> /* check parsed devargs */
> if (adapter->cur_vport_nb + idpf_args->req_vport_nb >
> - IDPF_MAX_VPORT_NUM) {
> + adapter->max_vport_nb) {
> PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
> - IDPF_MAX_VPORT_NUM);
> + adapter->max_vport_nb);
> ret = -EINVAL;
> - goto bail;
> + goto fail;
> }
>
> for (i = 0; i < idpf_args->req_vport_nb; i++) {
> + if (idpf_args->req_vports[i] > adapter->max_vport_nb - 1) {
> + PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
> + idpf_args->req_vports[i], adapter->max_vport_nb - 1);
> + ret = -EINVAL;
This verify is not necessary, because we don't limit the vport id specified in args need to be less than the number it supports.
^ permalink raw reply [flat|nested] 5+ messages in thread
* RE: [PATCH] net/idpf: refine devargs parse functions
2023-04-23 8:55 ` Wu, Jingjing
@ 2023-04-24 2:19 ` Liu, Mingxia
0 siblings, 0 replies; 5+ messages in thread
From: Liu, Mingxia @ 2023-04-24 2:19 UTC (permalink / raw)
To: Wu, Jingjing, dev; +Cc: Xing, Beilei
> -----Original Message-----
> From: Wu, Jingjing <jingjing.wu@intel.com>
> Sent: Sunday, April 23, 2023 4:56 PM
> To: Liu, Mingxia <mingxia.liu@intel.com>; dev@dpdk.org
> Cc: Xing, Beilei <beilei.xing@intel.com>
> Subject: RE: [PATCH] net/idpf: refine devargs parse functions
>
>
>
> > -----Original Message-----
> > From: Liu, Mingxia <mingxia.liu@intel.com>
> > Sent: Friday, April 21, 2023 3:15 PM
> > To: dev@dpdk.org
> > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>; Liu, Mingxia <mingxia.liu@intel.com>
> > Subject: [PATCH] net/idpf: refine devargs parse functions
> >
> > This patch refines devargs parsing functions and use valid variable
> > max_vport_nb to replace IDPF_MAX_VPORT_NUM.
> >
> > Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> > ---
> > drivers/net/idpf/idpf_ethdev.c | 61
> > +++++++++++++++++-----------------
> > 1 file changed, 30 insertions(+), 31 deletions(-)
> >
> > diff --git a/drivers/net/idpf/idpf_ethdev.c
> > b/drivers/net/idpf/idpf_ethdev.c index e02ec2ec5a..a8dd5a0a80 100644
> > --- a/drivers/net/idpf/idpf_ethdev.c
> > +++ b/drivers/net/idpf/idpf_ethdev.c
> > @@ -857,12 +857,6 @@ insert_value(struct idpf_devargs *devargs, uint16_t
> id)
> > return 0;
> > }
> >
> > - if (devargs->req_vport_nb >= RTE_DIM(devargs->req_vports)) {
> > - PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
> > - IDPF_MAX_VPORT_NUM);
> > - return -EINVAL;
> > - }
> > -
> > devargs->req_vports[devargs->req_vport_nb] = id;
> > devargs->req_vport_nb++;
> >
> > @@ -879,12 +873,10 @@ parse_range(const char *value, struct
> > idpf_devargs *devargs)
> >
> > result = sscanf(value, "%hu%n-%hu%n", &lo, &n, &hi, &n);
> > if (result == 1) {
> > - if (lo >= IDPF_MAX_VPORT_NUM)
> > - return NULL;
> > if (insert_value(devargs, lo) != 0)
> > return NULL;
> > } else if (result == 2) {
> > - if (lo > hi || hi >= IDPF_MAX_VPORT_NUM)
> > + if (lo > hi)
> > return NULL;
> > for (i = lo; i <= hi; i++) {
> > if (insert_value(devargs, i) != 0) @@ -969,40 +961,46
> @@
> > idpf_parse_devargs(struct rte_pci_device *pci_dev, struct
> > idpf_adapter_ext *adap
> > return -EINVAL;
> > }
> >
> > + ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
> > + idpf_args);
> > + if (ret != 0)
> > + goto fail;
> > +
> > + ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
> > + &adapter->base.is_tx_singleq);
> > + if (ret != 0)
> > + goto fail;
> > +
> > + ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
> > + &adapter->base.is_rx_singleq);
> > + if (ret != 0)
> > + goto fail;
> > +
> > /* check parsed devargs */
> > if (adapter->cur_vport_nb + idpf_args->req_vport_nb >
> > - IDPF_MAX_VPORT_NUM) {
> > + adapter->max_vport_nb) {
> > PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
> > - IDPF_MAX_VPORT_NUM);
> > + adapter->max_vport_nb);
> > ret = -EINVAL;
> > - goto bail;
> > + goto fail;
> > }
> >
> > for (i = 0; i < idpf_args->req_vport_nb; i++) {
> > + if (idpf_args->req_vports[i] > adapter->max_vport_nb - 1) {
> > + PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be
> 0 ~ %d",
> > + idpf_args->req_vports[i], adapter-
> >max_vport_nb - 1);
> > + ret = -EINVAL;
> This verify is not necessary, because we don't limit the vport id specified in args
> need to be less than the number it supports.
[Liu, Mingxia] Yes, I'll delete this limits, and test ok.
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v2] net/idpf: refine devargs parse functions
2023-04-21 7:14 [PATCH] net/idpf: refine devargs parse functions Mingxia Liu
2023-04-23 8:55 ` Wu, Jingjing
@ 2023-04-24 10:39 ` Mingxia Liu
2023-04-24 10:43 ` [PATCH v3] " Mingxia Liu
1 sibling, 1 reply; 5+ messages in thread
From: Mingxia Liu @ 2023-04-24 10:39 UTC (permalink / raw)
To: dev; +Cc: jingjing.wu, beilei.xing, Mingxia Liu
This patch refines devargs parsing functions and use valid
variable max_vport_nb to replace IDPF_MAX_VPORT_NUM.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
drivers/net/idpf/idpf_ethdev.c | 61 +++++++++++++++++-----------------
1 file changed, 30 insertions(+), 31 deletions(-)
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index e02ec2ec5a..a8dd5a0a80 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -857,12 +857,6 @@ insert_value(struct idpf_devargs *devargs, uint16_t id)
return 0;
}
- if (devargs->req_vport_nb >= RTE_DIM(devargs->req_vports)) {
- PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
- IDPF_MAX_VPORT_NUM);
- return -EINVAL;
- }
-
devargs->req_vports[devargs->req_vport_nb] = id;
devargs->req_vport_nb++;
@@ -879,12 +873,10 @@ parse_range(const char *value, struct idpf_devargs *devargs)
result = sscanf(value, "%hu%n-%hu%n", &lo, &n, &hi, &n);
if (result == 1) {
- if (lo >= IDPF_MAX_VPORT_NUM)
- return NULL;
if (insert_value(devargs, lo) != 0)
return NULL;
} else if (result == 2) {
- if (lo > hi || hi >= IDPF_MAX_VPORT_NUM)
+ if (lo > hi)
return NULL;
for (i = lo; i <= hi; i++) {
if (insert_value(devargs, i) != 0)
@@ -969,40 +961,46 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adap
return -EINVAL;
}
+ ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
+ idpf_args);
+ if (ret != 0)
+ goto fail;
+
+ ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
+ &adapter->base.is_tx_singleq);
+ if (ret != 0)
+ goto fail;
+
+ ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
+ &adapter->base.is_rx_singleq);
+ if (ret != 0)
+ goto fail;
+
/* check parsed devargs */
if (adapter->cur_vport_nb + idpf_args->req_vport_nb >
- IDPF_MAX_VPORT_NUM) {
+ adapter->max_vport_nb) {
PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
- IDPF_MAX_VPORT_NUM);
+ adapter->max_vport_nb);
ret = -EINVAL;
- goto bail;
+ goto fail;
}
for (i = 0; i < idpf_args->req_vport_nb; i++) {
+ if (idpf_args->req_vports[i] > adapter->max_vport_nb - 1) {
+ PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 ~ %d",
+ idpf_args->req_vports[i], adapter->max_vport_nb - 1);
+ ret = -EINVAL;
+ goto fail;
+ }
if (adapter->cur_vports & RTE_BIT32(idpf_args->req_vports[i])) {
- PMD_INIT_LOG(ERR, "Vport %d has been created",
+ PMD_INIT_LOG(ERR, "Vport %d has been requested",
idpf_args->req_vports[i]);
ret = -EINVAL;
- goto bail;
+ goto fail;
}
}
- ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
- idpf_args);
- if (ret != 0)
- goto bail;
-
- ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
- &adapter->base.is_tx_singleq);
- if (ret != 0)
- goto bail;
-
- ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
- &adapter->base.is_rx_singleq);
- if (ret != 0)
- goto bail;
-
-bail:
+fail:
rte_kvargs_free(kvlist);
return ret;
}
@@ -1152,7 +1150,8 @@ idpf_adapter_ext_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *a
rte_eal_alarm_set(IDPF_ALARM_INTERVAL, idpf_dev_alarm_handler, adapter);
- adapter->max_vport_nb = adapter->base.caps.max_vports;
+ adapter->max_vport_nb = adapter->base.caps.max_vports > IDPF_MAX_VPORT_NUM ?
+ IDPF_MAX_VPORT_NUM : adapter->base.caps.max_vports;
adapter->vports = rte_zmalloc("vports",
adapter->max_vport_nb *
--
2.34.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v3] net/idpf: refine devargs parse functions
2023-04-24 10:39 ` [PATCH v2] " Mingxia Liu
@ 2023-04-24 10:43 ` Mingxia Liu
0 siblings, 0 replies; 5+ messages in thread
From: Mingxia Liu @ 2023-04-24 10:43 UTC (permalink / raw)
To: dev; +Cc: jingjing.wu, beilei.xing, Mingxia Liu
This patch refines devargs parsing functions and use valid
variable max_vport_nb to replace IDPF_MAX_VPORT_NUM.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
drivers/net/idpf/idpf_ethdev.c | 55 +++++++++++++++-------------------
1 file changed, 24 insertions(+), 31 deletions(-)
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index e02ec2ec5a..7b9f04095b 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -857,12 +857,6 @@ insert_value(struct idpf_devargs *devargs, uint16_t id)
return 0;
}
- if (devargs->req_vport_nb >= RTE_DIM(devargs->req_vports)) {
- PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
- IDPF_MAX_VPORT_NUM);
- return -EINVAL;
- }
-
devargs->req_vports[devargs->req_vport_nb] = id;
devargs->req_vport_nb++;
@@ -879,12 +873,10 @@ parse_range(const char *value, struct idpf_devargs *devargs)
result = sscanf(value, "%hu%n-%hu%n", &lo, &n, &hi, &n);
if (result == 1) {
- if (lo >= IDPF_MAX_VPORT_NUM)
- return NULL;
if (insert_value(devargs, lo) != 0)
return NULL;
} else if (result == 2) {
- if (lo > hi || hi >= IDPF_MAX_VPORT_NUM)
+ if (lo > hi)
return NULL;
for (i = lo; i <= hi; i++) {
if (insert_value(devargs, i) != 0)
@@ -969,40 +961,40 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *adap
return -EINVAL;
}
+ ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
+ idpf_args);
+ if (ret != 0)
+ goto fail;
+
+ ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
+ &adapter->base.is_tx_singleq);
+ if (ret != 0)
+ goto fail;
+
+ ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
+ &adapter->base.is_rx_singleq);
+ if (ret != 0)
+ goto fail;
+
/* check parsed devargs */
if (adapter->cur_vport_nb + idpf_args->req_vport_nb >
- IDPF_MAX_VPORT_NUM) {
+ adapter->max_vport_nb) {
PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
- IDPF_MAX_VPORT_NUM);
+ adapter->max_vport_nb);
ret = -EINVAL;
- goto bail;
+ goto fail;
}
for (i = 0; i < idpf_args->req_vport_nb; i++) {
if (adapter->cur_vports & RTE_BIT32(idpf_args->req_vports[i])) {
- PMD_INIT_LOG(ERR, "Vport %d has been created",
+ PMD_INIT_LOG(ERR, "Vport %d has been requested",
idpf_args->req_vports[i]);
ret = -EINVAL;
- goto bail;
+ goto fail;
}
}
- ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
- idpf_args);
- if (ret != 0)
- goto bail;
-
- ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
- &adapter->base.is_tx_singleq);
- if (ret != 0)
- goto bail;
-
- ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
- &adapter->base.is_rx_singleq);
- if (ret != 0)
- goto bail;
-
-bail:
+fail:
rte_kvargs_free(kvlist);
return ret;
}
@@ -1152,7 +1144,8 @@ idpf_adapter_ext_init(struct rte_pci_device *pci_dev, struct idpf_adapter_ext *a
rte_eal_alarm_set(IDPF_ALARM_INTERVAL, idpf_dev_alarm_handler, adapter);
- adapter->max_vport_nb = adapter->base.caps.max_vports;
+ adapter->max_vport_nb = adapter->base.caps.max_vports > IDPF_MAX_VPORT_NUM ?
+ IDPF_MAX_VPORT_NUM : adapter->base.caps.max_vports;
adapter->vports = rte_zmalloc("vports",
adapter->max_vport_nb *
--
2.34.1
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2023-04-24 2:27 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-21 7:14 [PATCH] net/idpf: refine devargs parse functions Mingxia Liu
2023-04-23 8:55 ` Wu, Jingjing
2023-04-24 2:19 ` Liu, Mingxia
2023-04-24 10:39 ` [PATCH v2] " Mingxia Liu
2023-04-24 10:43 ` [PATCH v3] " Mingxia Liu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).