Fix to assign dummy Rx/Tx handlers in dev_stop. For MTU set, assignment of the appropriate Rx/Tx handlers will be handled by dev_start/dev_stop. Fixes: 81f8804992c9 ("net/qede: enhance Rx CPU utilization") Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G mode") Cc: stable@dpdk.org Signed-off-by: Rasesh Mody <rmody@marvell.com> Signed-off-by: Igor Russkikh <irusskikh@marvell.com> --- drivers/net/qede/qede_ethdev.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index e71fa1e6a..726daa3e3 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -320,13 +320,19 @@ qede_interrupt_handler(void *param) } static void -qede_assign_rxtx_handlers(struct rte_eth_dev *dev) +qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) { uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; struct qede_dev *qdev = dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; bool use_tx_offload = false; + if (is_dummy) { + dev->rx_pkt_burst = qede_rxtx_pkts_dummy; + dev->tx_pkt_burst = qede_rxtx_pkts_dummy; + return; + } + if (ECORE_IS_CMT(edev)) { dev->rx_pkt_burst = qede_recv_pkts_cmt; dev->tx_pkt_burst = qede_xmit_pkts_cmt; @@ -1150,7 +1156,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) /* Start/resume traffic */ qede_fastpath_start(edev); - qede_assign_rxtx_handlers(eth_dev); + /* Assign I/O handlers */ + qede_assign_rxtx_handlers(eth_dev, false); + DP_INFO(edev, "Device started\n"); return 0; @@ -1166,6 +1174,11 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(edev); + /* Replace I/O functions with dummy ones. It cannot + * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. + */ + qede_assign_rxtx_handlers(eth_dev, true); + /* Disable vport */ if (qede_activate_vport(eth_dev, false)) return; @@ -2316,11 +2329,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) dev->data->min_rx_buf_size); return -EINVAL; } - /* Temporarily replace I/O functions with dummy ones. It cannot - * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. - */ - dev->rx_pkt_burst = qede_rxtx_pkts_dummy; - dev->tx_pkt_burst = qede_rxtx_pkts_dummy; if (dev->data->dev_started) { dev->data->dev_started = 0; qede_dev_stop(dev); @@ -2359,15 +2367,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; - /* Reassign back */ - qede_assign_rxtx_handlers(dev); - if (ECORE_IS_CMT(edev)) { - dev->rx_pkt_burst = qede_recv_pkts_cmt; - dev->tx_pkt_burst = qede_xmit_pkts_cmt; - } else { - dev->rx_pkt_burst = qede_recv_pkts; - dev->tx_pkt_burst = qede_xmit_pkts; - } return 0; } @@ -2570,7 +2569,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, QEDE_PMD_DRV_VER_STR_SIZE); - qede_assign_rxtx_handlers(eth_dev); + qede_assign_rxtx_handlers(eth_dev, true); eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; /* For CMT mode device do periodic polling for slowpath events. -- 2.18.0
On 5/5/2020 4:09 AM, Rasesh Mody wrote: > Fix to assign dummy Rx/Tx handlers in dev_stop. > For MTU set, assignment of the appropriate Rx/Tx handlers will be > handled by dev_start/dev_stop. > > Fixes: 81f8804992c9 ("net/qede: enhance Rx CPU utilization") > Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G mode") > Cc: stable@dpdk.org > > Signed-off-by: Rasesh Mody <rmody@marvell.com> > Signed-off-by: Igor Russkikh <irusskikh@marvell.com> > --- > drivers/net/qede/qede_ethdev.c | 33 ++++++++++++++++----------------- > 1 file changed, 16 insertions(+), 17 deletions(-) > > diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c > index e71fa1e6a..726daa3e3 100644 > --- a/drivers/net/qede/qede_ethdev.c > +++ b/drivers/net/qede/qede_ethdev.c > @@ -320,13 +320,19 @@ qede_interrupt_handler(void *param) > } > > static void > -qede_assign_rxtx_handlers(struct rte_eth_dev *dev) > +qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) > { > uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; > struct qede_dev *qdev = dev->data->dev_private; > struct ecore_dev *edev = &qdev->edev; > bool use_tx_offload = false; > > + if (is_dummy) { > + dev->rx_pkt_burst = qede_rxtx_pkts_dummy; > + dev->tx_pkt_burst = qede_rxtx_pkts_dummy; > + return; > + } > + > if (ECORE_IS_CMT(edev)) { > dev->rx_pkt_burst = qede_recv_pkts_cmt; > dev->tx_pkt_burst = qede_xmit_pkts_cmt; > @@ -1150,7 +1156,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) > /* Start/resume traffic */ > qede_fastpath_start(edev); > > - qede_assign_rxtx_handlers(eth_dev); > + /* Assign I/O handlers */ > + qede_assign_rxtx_handlers(eth_dev, false); > + > DP_INFO(edev, "Device started\n"); > > return 0; > @@ -1166,6 +1174,11 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) > > PMD_INIT_FUNC_TRACE(edev); > > + /* Replace I/O functions with dummy ones. It cannot > + * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. > + */ > + qede_assign_rxtx_handlers(eth_dev, true); > + Why need to assign dummy handlers on stop(), what happens if you keep them as they are as many PMD does? > /* Disable vport */ > if (qede_activate_vport(eth_dev, false)) > return; > @@ -2316,11 +2329,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) > dev->data->min_rx_buf_size); > return -EINVAL; > } > - /* Temporarily replace I/O functions with dummy ones. It cannot > - * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. > - */ > - dev->rx_pkt_burst = qede_rxtx_pkts_dummy; > - dev->tx_pkt_burst = qede_rxtx_pkts_dummy; > if (dev->data->dev_started) { > dev->data->dev_started = 0; > qede_dev_stop(dev); > @@ -2359,15 +2367,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) > /* update max frame size */ > dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; > > - /* Reassign back */ > - qede_assign_rxtx_handlers(dev); > - if (ECORE_IS_CMT(edev)) { > - dev->rx_pkt_burst = qede_recv_pkts_cmt; > - dev->tx_pkt_burst = qede_xmit_pkts_cmt; > - } else { > - dev->rx_pkt_burst = qede_recv_pkts; > - dev->tx_pkt_burst = qede_xmit_pkts; > - } > return 0; > } > > @@ -2570,7 +2569,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) > strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, > QEDE_PMD_DRV_VER_STR_SIZE); > > - qede_assign_rxtx_handlers(eth_dev); > + qede_assign_rxtx_handlers(eth_dev, true); > eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; > > /* For CMT mode device do periodic polling for slowpath events. >
Hi Ferruh, >From: Ferruh Yigit <ferruh.yigit@intel.com> >Sent: Tuesday, May 05, 2020 2:01 AM > >On 5/5/2020 4:09 AM, Rasesh Mody wrote: >> Fix to assign dummy Rx/Tx handlers in dev_stop. >> For MTU set, assignment of the appropriate Rx/Tx handlers will be >> handled by dev_start/dev_stop. >> >> Fixes: 81f8804992c9 ("net/qede: enhance Rx CPU utilization") >> Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G >> mode") >> Cc: stable@dpdk.org >> >> Signed-off-by: Rasesh Mody <rmody@marvell.com> >> Signed-off-by: Igor Russkikh <irusskikh@marvell.com> >> --- >> drivers/net/qede/qede_ethdev.c | 33 ++++++++++++++++----------------- >> 1 file changed, 16 insertions(+), 17 deletions(-) >> >> diff --git a/drivers/net/qede/qede_ethdev.c >> b/drivers/net/qede/qede_ethdev.c index e71fa1e6a..726daa3e3 100644 >> --- a/drivers/net/qede/qede_ethdev.c >> +++ b/drivers/net/qede/qede_ethdev.c >> @@ -320,13 +320,19 @@ qede_interrupt_handler(void *param) } >> >> static void >> -qede_assign_rxtx_handlers(struct rte_eth_dev *dev) >> +qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) >> { >> uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; >> struct qede_dev *qdev = dev->data->dev_private; >> struct ecore_dev *edev = &qdev->edev; >> bool use_tx_offload = false; >> >> + if (is_dummy) { >> + dev->rx_pkt_burst = qede_rxtx_pkts_dummy; >> + dev->tx_pkt_burst = qede_rxtx_pkts_dummy; >> + return; >> + } >> + >> if (ECORE_IS_CMT(edev)) { >> dev->rx_pkt_burst = qede_recv_pkts_cmt; >> dev->tx_pkt_burst = qede_xmit_pkts_cmt; @@ -1150,7 >+1156,9 @@ >> static int qede_dev_start(struct rte_eth_dev *eth_dev) >> /* Start/resume traffic */ >> qede_fastpath_start(edev); >> >> - qede_assign_rxtx_handlers(eth_dev); >> + /* Assign I/O handlers */ >> + qede_assign_rxtx_handlers(eth_dev, false); >> + >> DP_INFO(edev, "Device started\n"); >> >> return 0; >> @@ -1166,6 +1174,11 @@ static void qede_dev_stop(struct rte_eth_dev >> *eth_dev) >> >> PMD_INIT_FUNC_TRACE(edev); >> >> + /* Replace I/O functions with dummy ones. It cannot >> + * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. >> + */ >> + qede_assign_rxtx_handlers(eth_dev, true); >> + > >Why need to assign dummy handlers on stop(), what happens if you keep >them as they are as many PMD does? It helps in preventing crash when queues are still in use. Thanks! -Rasesh > >> /* Disable vport */ >> if (qede_activate_vport(eth_dev, false)) >> return; >> @@ -2316,11 +2329,6 @@ static int qede_set_mtu(struct rte_eth_dev >*dev, uint16_t mtu) >> dev->data->min_rx_buf_size); >> return -EINVAL; >> } >> - /* Temporarily replace I/O functions with dummy ones. It cannot >> - * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. >> - */ >> - dev->rx_pkt_burst = qede_rxtx_pkts_dummy; >> - dev->tx_pkt_burst = qede_rxtx_pkts_dummy; >> if (dev->data->dev_started) { >> dev->data->dev_started = 0; >> qede_dev_stop(dev); >> @@ -2359,15 +2367,6 @@ static int qede_set_mtu(struct rte_eth_dev >*dev, uint16_t mtu) >> /* update max frame size */ >> dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; >> >> - /* Reassign back */ >> - qede_assign_rxtx_handlers(dev); >> - if (ECORE_IS_CMT(edev)) { >> - dev->rx_pkt_burst = qede_recv_pkts_cmt; >> - dev->tx_pkt_burst = qede_xmit_pkts_cmt; >> - } else { >> - dev->rx_pkt_burst = qede_recv_pkts; >> - dev->tx_pkt_burst = qede_xmit_pkts; >> - } >> return 0; >> } >> >> @@ -2570,7 +2569,7 @@ static int qede_common_dev_init(struct >rte_eth_dev *eth_dev, bool is_vf) >> strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, >> QEDE_PMD_DRV_VER_STR_SIZE); >> >> - qede_assign_rxtx_handlers(eth_dev); >> + qede_assign_rxtx_handlers(eth_dev, true); >> eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; >> >> /* For CMT mode device do periodic polling for slowpath events. >>
Fix to assign dummy Rx/Tx handlers in dev_stop. For MTU set, assignment of the appropriate Rx/Tx handlers will be handled by dev_start/dev_stop. Fixes: 81f8804992c9 ("net/qede: enhance Rx CPU utilization") Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G mode") Cc: stable@dpdk.org Signed-off-by: Rasesh Mody <rmody@marvell.com> Signed-off-by: Igor Russkikh <irusskikh@marvell.com> --- drivers/net/qede/qede_ethdev.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 0b1fca9ac..d3d916e81 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -320,13 +320,19 @@ qede_interrupt_handler(void *param) } static void -qede_assign_rxtx_handlers(struct rte_eth_dev *dev) +qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) { uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; struct qede_dev *qdev = dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; bool use_tx_offload = false; + if (is_dummy) { + dev->rx_pkt_burst = qede_rxtx_pkts_dummy; + dev->tx_pkt_burst = qede_rxtx_pkts_dummy; + return; + } + if (ECORE_IS_CMT(edev)) { dev->rx_pkt_burst = qede_recv_pkts_cmt; dev->tx_pkt_burst = qede_xmit_pkts_cmt; @@ -1153,7 +1159,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) /* Start/resume traffic */ qede_fastpath_start(edev); - qede_assign_rxtx_handlers(eth_dev); + /* Assign I/O handlers */ + qede_assign_rxtx_handlers(eth_dev, false); + DP_INFO(edev, "Device started\n"); return 0; @@ -1175,6 +1183,11 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) /* Update link status */ qede_link_update(eth_dev, 0); + /* Replace I/O functions with dummy ones. It cannot + * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. + */ + qede_assign_rxtx_handlers(eth_dev, true); + /* Disable vport */ if (qede_activate_vport(eth_dev, false)) return; @@ -2323,11 +2336,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) dev->data->min_rx_buf_size); return -EINVAL; } - /* Temporarily replace I/O functions with dummy ones. It cannot - * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. - */ - dev->rx_pkt_burst = qede_rxtx_pkts_dummy; - dev->tx_pkt_burst = qede_rxtx_pkts_dummy; if (dev->data->dev_started) { dev->data->dev_started = 0; qede_dev_stop(dev); @@ -2366,15 +2374,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; - /* Reassign back */ - qede_assign_rxtx_handlers(dev); - if (ECORE_IS_CMT(edev)) { - dev->rx_pkt_burst = qede_recv_pkts_cmt; - dev->tx_pkt_burst = qede_xmit_pkts_cmt; - } else { - dev->rx_pkt_burst = qede_recv_pkts; - dev->tx_pkt_burst = qede_xmit_pkts; - } return 0; } @@ -2577,7 +2576,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, QEDE_PMD_DRV_VER_STR_SIZE); - qede_assign_rxtx_handlers(eth_dev); + qede_assign_rxtx_handlers(eth_dev, true); eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; /* For CMT mode device do periodic polling for slowpath events. -- 2.18.0
This patch adds a fix to setup Tx queue when changing KNI interface MTU. It ensures device can safely start txq post MTU change operation. Fixes: fc9ee41b7016 ("examples/kni: convert to new ethdev offloads API") Cc: stable@dpdk.org Signed-off-by: Rasesh Mody <rmody@marvell.com> --- examples/kni/main.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/examples/kni/main.c b/examples/kni/main.c index 6b4ab3b5b..7a927a50c 100644 --- a/examples/kni/main.c +++ b/examples/kni/main.c @@ -774,9 +774,11 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu) { int ret; uint16_t nb_rxd = NB_RXD; + uint16_t nb_txd = NB_TXD; struct rte_eth_conf conf; struct rte_eth_dev_info dev_info; struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; if (!rte_eth_dev_is_valid_port(port_id)) { RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id); @@ -804,7 +806,7 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu) return ret; } - ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, NULL); + ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd); if (ret < 0) rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors " "for port%u (%d)\n", (unsigned int)port_id, @@ -829,6 +831,16 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu) return ret; } + txq_conf = dev_info.default_txconf; + txq_conf.offloads = conf.txmode.offloads; + ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, + rte_eth_dev_socket_id(port_id), &txq_conf); + if (ret < 0) { + RTE_LOG(ERR, APP, "Fail to setup Tx queue of port %d\n", + port_id); + return ret; + } + /* Restart specific port */ ret = rte_eth_dev_start(port_id); if (ret < 0) { -- 2.18.0
On 5/14/2020 5:09 AM, Rasesh Mody wrote:
> This patch adds a fix to setup Tx queue when changing KNI interface MTU.
> It ensures device can safely start txq post MTU change operation.
>
> Fixes: fc9ee41b7016 ("examples/kni: convert to new ethdev offloads API")
> Cc: stable@dpdk.org
>
> Signed-off-by: Rasesh Mody <rmody@marvell.com>
> ---
> examples/kni/main.c | 14 +++++++++++++-
> 1 file changed, 13 insertions(+), 1 deletion(-)
>
> diff --git a/examples/kni/main.c b/examples/kni/main.c
> index 6b4ab3b5b..7a927a50c 100644
> --- a/examples/kni/main.c
> +++ b/examples/kni/main.c
> @@ -774,9 +774,11 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
> {
> int ret;
> uint16_t nb_rxd = NB_RXD;
> + uint16_t nb_txd = NB_TXD;
> struct rte_eth_conf conf;
> struct rte_eth_dev_info dev_info;
> struct rte_eth_rxconf rxq_conf;
> + struct rte_eth_txconf txq_conf;
>
> if (!rte_eth_dev_is_valid_port(port_id)) {
> RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
> @@ -804,7 +806,7 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
> return ret;
> }
>
> - ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, NULL);
> + ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, &nb_txd);
> if (ret < 0)
> rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors "
> "for port%u (%d)\n", (unsigned int)port_id,
> @@ -829,6 +831,16 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
> return ret;
> }
>
> + txq_conf = dev_info.default_txconf;
> + txq_conf.offloads = conf.txmode.offloads;
> + ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
> + rte_eth_dev_socket_id(port_id), &txq_conf);
> + if (ret < 0) {
> + RTE_LOG(ERR, APP, "Fail to setup Tx queue of port %d\n",
> + port_id);
> + return ret;
> + }
> +
> /* Restart specific port */
> ret = rte_eth_dev_start(port_id);
> if (ret < 0) {
>
Hi Rasesh,
The change looks OK but I am trying to understand the need for it, already asked
in other tread, will follow there.
Fix to assign dummy Rx/Tx handlers in dev_stop. For MTU set, assignment of the appropriate Rx/Tx handlers will be handled by dev_start/dev_stop. Fixes: 81f8804992c9 ("net/qede: enhance Rx CPU utilization") Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G mode") Cc: stable@dpdk.org Signed-off-by: Rasesh Mody <rmody@marvell.com> Signed-off-by: Igor Russkikh <irusskikh@marvell.com> --- drivers/net/qede/qede_ethdev.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 0b1fca9ac..d3d916e81 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -320,13 +320,19 @@ qede_interrupt_handler(void *param) } static void -qede_assign_rxtx_handlers(struct rte_eth_dev *dev) +qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) { uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; struct qede_dev *qdev = dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; bool use_tx_offload = false; + if (is_dummy) { + dev->rx_pkt_burst = qede_rxtx_pkts_dummy; + dev->tx_pkt_burst = qede_rxtx_pkts_dummy; + return; + } + if (ECORE_IS_CMT(edev)) { dev->rx_pkt_burst = qede_recv_pkts_cmt; dev->tx_pkt_burst = qede_xmit_pkts_cmt; @@ -1153,7 +1159,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) /* Start/resume traffic */ qede_fastpath_start(edev); - qede_assign_rxtx_handlers(eth_dev); + /* Assign I/O handlers */ + qede_assign_rxtx_handlers(eth_dev, false); + DP_INFO(edev, "Device started\n"); return 0; @@ -1175,6 +1183,11 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) /* Update link status */ qede_link_update(eth_dev, 0); + /* Replace I/O functions with dummy ones. It cannot + * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. + */ + qede_assign_rxtx_handlers(eth_dev, true); + /* Disable vport */ if (qede_activate_vport(eth_dev, false)) return; @@ -2323,11 +2336,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) dev->data->min_rx_buf_size); return -EINVAL; } - /* Temporarily replace I/O functions with dummy ones. It cannot - * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. - */ - dev->rx_pkt_burst = qede_rxtx_pkts_dummy; - dev->tx_pkt_burst = qede_rxtx_pkts_dummy; if (dev->data->dev_started) { dev->data->dev_started = 0; qede_dev_stop(dev); @@ -2366,15 +2374,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* update max frame size */ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; - /* Reassign back */ - qede_assign_rxtx_handlers(dev); - if (ECORE_IS_CMT(edev)) { - dev->rx_pkt_burst = qede_recv_pkts_cmt; - dev->tx_pkt_burst = qede_xmit_pkts_cmt; - } else { - dev->rx_pkt_burst = qede_recv_pkts; - dev->tx_pkt_burst = qede_xmit_pkts; - } return 0; } @@ -2577,7 +2576,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, QEDE_PMD_DRV_VER_STR_SIZE); - qede_assign_rxtx_handlers(eth_dev); + qede_assign_rxtx_handlers(eth_dev, true); eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; /* For CMT mode device do periodic polling for slowpath events. -- 2.18.0
This patch fixes deallocation of all fastpath resources unconditionally, when re-configuring the device. When re-allocating resources PMD depends on application to explicitly setup the Rx/Tx queue. Deallocation of all the resources is only required if the Rx/Tx queue configuration changes. For other scenarios like KNI MTU change we'd keep existing configuration. Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G mode") Fixes: dd28bc8c6ef4 ("net/qede: fix VF port creation sequence") Cc: stable@dpdk.org Signed-off-by: Rasesh Mody <rmody@marvell.com> Signed-off-by: Igor Russkikh <irusskikh@marvell.com> --- drivers/net/qede/qede_ethdev.c | 19 +++++++++++++------ drivers/net/qede/qede_rxtx.c | 4 +++- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index d3d916e81..c4f8f1258 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -1273,6 +1273,8 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; + uint8_t num_rxqs; + uint8_t num_txqs; int ret; PMD_INIT_FUNC_TRACE(edev); @@ -1305,12 +1307,17 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) if (qede_check_fdir_support(eth_dev)) return -ENOTSUP; - qede_dealloc_fp_resc(eth_dev); - qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns; - qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns; - - if (qede_alloc_fp_resc(qdev)) - return -ENOMEM; + /* Allocate/reallocate fastpath resources only for new queue config */ + num_txqs = eth_dev->data->nb_tx_queues * edev->num_hwfns; + num_rxqs = eth_dev->data->nb_rx_queues * edev->num_hwfns; + if (qdev->num_tx_queues != num_txqs || + qdev->num_rx_queues != num_rxqs) { + qede_dealloc_fp_resc(eth_dev); + qdev->num_tx_queues = num_txqs; + qdev->num_rx_queues = num_rxqs; + if (qede_alloc_fp_resc(qdev)) + return -ENOMEM; + } /* If jumbo enabled adjust MTU */ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c index b81788ca4..9878ba50e 100644 --- a/drivers/net/qede/qede_rxtx.c +++ b/drivers/net/qede/qede_rxtx.c @@ -593,12 +593,14 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info, int qede_alloc_fp_resc(struct qede_dev *qdev) { - struct ecore_dev *edev = &qdev->edev; + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct qede_fastpath *fp; uint32_t num_sbs; uint16_t sb_idx; int i; + PMD_INIT_FUNC_TRACE(edev); + if (IS_VF(edev)) ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs); else -- 2.18.0
On 5/14/2020 5:09 AM, Rasesh Mody wrote:
> This patch adds a fix to setup Tx queue when changing KNI interface MTU.
> It ensures device can safely start txq post MTU change operation.
>
> Fixes: fc9ee41b7016 ("examples/kni: convert to new ethdev offloads API")
> Cc: stable@dpdk.org
>
> Signed-off-by: Rasesh Mody <rmody@marvell.com>
I see you are going with a fix in the PMD, but the fix in the KNI sample also
looks reasonable, so I guess we can keep this.
Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
On Fri, May 15, 2020 at 12:05 PM Rasesh Mody <rmody@marvell.com> wrote: > > Fix to assign dummy Rx/Tx handlers in dev_stop. > For MTU set, assignment of the appropriate Rx/Tx handlers will be > handled by dev_start/dev_stop. > > Fixes: 81f8804992c9 ("net/qede: enhance Rx CPU utilization") > Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G mode") > Cc: stable@dpdk.org > > Signed-off-by: Rasesh Mody <rmody@marvell.com> > Signed-off-by: Igor Russkikh <irusskikh@marvell.com> Series applied to dpdk-next-net-mrvl/master after fixing the below issue. Wrong tag: Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G Wrong 'Fixes' reference: Fixes: 8de0c4201926 ("net/qede: fix odd number of queues usage in 100G > --- > drivers/net/qede/qede_ethdev.c | 33 ++++++++++++++++----------------- > 1 file changed, 16 insertions(+), 17 deletions(-) > > diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c > index 0b1fca9ac..d3d916e81 100644 > --- a/drivers/net/qede/qede_ethdev.c > +++ b/drivers/net/qede/qede_ethdev.c > @@ -320,13 +320,19 @@ qede_interrupt_handler(void *param) > } > > static void > -qede_assign_rxtx_handlers(struct rte_eth_dev *dev) > +qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) > { > uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; > struct qede_dev *qdev = dev->data->dev_private; > struct ecore_dev *edev = &qdev->edev; > bool use_tx_offload = false; > > + if (is_dummy) { > + dev->rx_pkt_burst = qede_rxtx_pkts_dummy; > + dev->tx_pkt_burst = qede_rxtx_pkts_dummy; > + return; > + } > + > if (ECORE_IS_CMT(edev)) { > dev->rx_pkt_burst = qede_recv_pkts_cmt; > dev->tx_pkt_burst = qede_xmit_pkts_cmt; > @@ -1153,7 +1159,9 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev) > /* Start/resume traffic */ > qede_fastpath_start(edev); > > - qede_assign_rxtx_handlers(eth_dev); > + /* Assign I/O handlers */ > + qede_assign_rxtx_handlers(eth_dev, false); > + > DP_INFO(edev, "Device started\n"); > > return 0; > @@ -1175,6 +1183,11 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev) > /* Update link status */ > qede_link_update(eth_dev, 0); > > + /* Replace I/O functions with dummy ones. It cannot > + * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. > + */ > + qede_assign_rxtx_handlers(eth_dev, true); > + > /* Disable vport */ > if (qede_activate_vport(eth_dev, false)) > return; > @@ -2323,11 +2336,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) > dev->data->min_rx_buf_size); > return -EINVAL; > } > - /* Temporarily replace I/O functions with dummy ones. It cannot > - * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. > - */ > - dev->rx_pkt_burst = qede_rxtx_pkts_dummy; > - dev->tx_pkt_burst = qede_rxtx_pkts_dummy; > if (dev->data->dev_started) { > dev->data->dev_started = 0; > qede_dev_stop(dev); > @@ -2366,15 +2374,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) > /* update max frame size */ > dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; > > - /* Reassign back */ > - qede_assign_rxtx_handlers(dev); > - if (ECORE_IS_CMT(edev)) { > - dev->rx_pkt_burst = qede_recv_pkts_cmt; > - dev->tx_pkt_burst = qede_xmit_pkts_cmt; > - } else { > - dev->rx_pkt_burst = qede_recv_pkts; > - dev->tx_pkt_burst = qede_xmit_pkts; > - } > return 0; > } > > @@ -2577,7 +2576,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) > strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, > QEDE_PMD_DRV_VER_STR_SIZE); > > - qede_assign_rxtx_handlers(eth_dev); > + qede_assign_rxtx_handlers(eth_dev, true); > eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; > > /* For CMT mode device do periodic polling for slowpath events. > -- > 2.18.0 >
15/05/2020 13:29, Ferruh Yigit:
> On 5/14/2020 5:09 AM, Rasesh Mody wrote:
> > This patch adds a fix to setup Tx queue when changing KNI interface MTU.
> > It ensures device can safely start txq post MTU change operation.
> >
> > Fixes: fc9ee41b7016 ("examples/kni: convert to new ethdev offloads API")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Rasesh Mody <rmody@marvell.com>
>
> I see you are going with a fix in the PMD, but the fix in the KNI sample also
> looks reasonable, so I guess we can keep this.
>
> Acked-by: Ferruh Yigit <ferruh.yigit@intel.com>
Applied, thanks