From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
To: Chaoyong He <chaoyong.he@corigine.com>, dev@dpdk.org
Cc: niklas.soderlund@corigine.com
Subject: Re: [PATCH v5 06/12] net/nfp: add flower PF related routines
Date: Fri, 5 Aug 2022 15:55:26 +0300 [thread overview]
Message-ID: <2a3bab36-4263-17a6-a009-5125f0d01426@oktetlabs.ru> (raw)
In-Reply-To: <1659681155-16525-7-git-send-email-chaoyong.he@corigine.com>
On 8/5/22 09:32, Chaoyong He wrote:
> This commit adds the start/stop/close routine of the
"This commit adds" -> "Add"
Typically close goes in pair with configure.
> flower PF vNIC.
>
> Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
> Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
> ---
> drivers/net/nfp/flower/nfp_flower.c | 193 ++++++++++++++++++++++++++++++++++++
> 1 file changed, 193 insertions(+)
>
> diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
> index c05d4ca..2498020 100644
> --- a/drivers/net/nfp/flower/nfp_flower.c
> +++ b/drivers/net/nfp/flower/nfp_flower.c
> @@ -7,6 +7,7 @@
> #include <ethdev_driver.h>
> #include <rte_service_component.h>
> #include <rte_malloc.h>
> +#include <rte_alarm.h>
> #include <ethdev_pci.h>
> #include <ethdev_driver.h>
>
> @@ -37,11 +38,178 @@
> return 0;
> }
>
> +static int
> +nfp_flower_pf_start(struct rte_eth_dev *dev)
> +{
> + int ret;
> + uint32_t new_ctrl;
> + uint32_t update = 0;
> + struct nfp_net_hw *hw;
> +
> + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + /* Disabling queues just in case... */
> + nfp_net_disable_queues(dev);
> +
> + /* Enabling the required queues in the device */
> + nfp_net_enable_queues(dev);
> +
> + new_ctrl = nfp_check_offloads(dev);
> +
> + /* Writing configuration parameters in the device */
> + nfp_net_params_setup(hw);
> +
> + nfp_net_rss_config_default(dev);
> + update |= NFP_NET_CFG_UPDATE_RSS;
> +
> + if (hw->cap & NFP_NET_CFG_CTRL_RSS2)
> + new_ctrl |= NFP_NET_CFG_CTRL_RSS2;
> + else
> + new_ctrl |= NFP_NET_CFG_CTRL_RSS;
> +
> + /* Enable device */
> + new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
> +
> + update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
> +
> + if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
> + new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
> +
> + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
> +
> + /* If an error when reconfig we avoid to change hw state */
> + ret = nfp_net_reconfig(hw, new_ctrl, update);
> + if (ret) {
Compare vs 0
> + PMD_INIT_LOG(ERR, "Failed to reconfig PF vnic");
> + return -EIO;
> + }
> +
> + hw->ctrl = new_ctrl;
> +
> + /* Setup the freelist ring */
> + ret = nfp_net_rx_freelist_setup(dev);
> + if (ret) {
Compare vs 0
> + PMD_INIT_LOG(ERR, "Error with flower PF vNIC freelist setup");
> + return -EIO;
> + }
> +
> + return 0;
> +}
> +
> +/* Stop device: disable rx and tx functions to allow for reconfiguring. */
> +static int
> +nfp_flower_pf_stop(struct rte_eth_dev *dev)
> +{
> + uint16_t i;
> + struct nfp_net_hw *hw;
> + struct nfp_net_txq *this_tx_q;
> + struct nfp_net_rxq *this_rx_q;
> +
> + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + nfp_net_disable_queues(dev);
> +
> + /* Clear queues */
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
> + nfp_net_reset_tx_queue(this_tx_q);
> + }
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
> + nfp_net_reset_rx_queue(this_rx_q);
> + }
> +
> + if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> + /* Configure the physical port down */
> + nfp_eth_set_configured(hw->cpp, hw->nfp_idx, 0);
> + else
> + nfp_eth_set_configured(dev->process_private, hw->nfp_idx, 0);
> +
> + return 0;
> +}
> +
> +/* Reset and stop device. The device can not be restarted. */
> +static int
> +nfp_flower_pf_close(struct rte_eth_dev *dev)
> +{
> + uint16_t i;
> + struct nfp_net_hw *hw;
> + struct nfp_pf_dev *pf_dev;
> + struct nfp_net_txq *this_tx_q;
> + struct nfp_net_rxq *this_rx_q;
> + struct rte_pci_device *pci_dev;
> + struct nfp_app_flower *app_flower;
> +
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> + return 0;
> +
> + pf_dev = NFP_NET_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + pci_dev = RTE_ETH_DEV_TO_PCI(dev);
> + app_flower = NFP_APP_PRIV_TO_APP_FLOWER(pf_dev->app_priv);
> +
> + /*
> + * We assume that the DPDK application is stopping all the
> + * threads/queues before calling the device close function.
> + */
> +
> + nfp_net_disable_queues(dev);
> +
> + /* Clear queues */
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> + this_tx_q = (struct nfp_net_txq *)dev->data->tx_queues[i];
> + nfp_net_reset_tx_queue(this_tx_q);
> + }
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> + this_rx_q = (struct nfp_net_rxq *)dev->data->rx_queues[i];
> + nfp_net_reset_rx_queue(this_rx_q);
> + }
> +
> + /* Cancel possible impending LSC work here before releasing the port*/
> + rte_eal_alarm_cancel(nfp_net_dev_interrupt_delayed_handler, (void *)dev);
> +
> + nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
> +
> + rte_eth_dev_release_port(dev);
> +
> + /* Now it is safe to free all PF resources */
> + PMD_INIT_LOG(INFO, "Freeing PF resources");
> + nfp_cpp_area_free(pf_dev->ctrl_area);
> + nfp_cpp_area_free(pf_dev->hwqueues_area);
> + free(pf_dev->hwinfo);
> + free(pf_dev->sym_tbl);
> + nfp_cpp_free(pf_dev->cpp);
> + rte_free(app_flower);
> + rte_free(pf_dev);
> +
> + rte_intr_disable(pci_dev->intr_handle);
> +
> + /* unregister callback func from eal lib */
> + rte_intr_callback_unregister(pci_dev->intr_handle,
> + nfp_net_dev_interrupt_handler, (void *)dev);
> +
> + return 0;
> +}
> +
> +static int
> +nfp_flower_pf_link_update(__rte_unused struct rte_eth_dev *dev,
> + __rte_unused int wait_to_complete)
> +{
It is really confusing implementation of the operatoin.
Could you explain why dummy implementation is OK? Why is it required?
> + return 0;
> +}
> +
> static const struct eth_dev_ops nfp_flower_pf_dev_ops = {
> .dev_configure = nfp_flower_pf_configure,
>
> /* Use the normal dev_infos_get functionality in the NFP PMD */
> .dev_infos_get = nfp_net_infos_get,
> +
> + .dev_start = nfp_flower_pf_start,
> + .dev_stop = nfp_flower_pf_stop,
> + .dev_close = nfp_flower_pf_close,
> + .link_update = nfp_flower_pf_link_update,
> };
>
> static struct rte_service_spec flower_services[NFP_FLOWER_SERVICE_MAX] = {
> @@ -375,6 +543,24 @@
> return ret;
> }
>
> +static int
> +nfp_flower_start_pf_vnic(struct nfp_net_hw *hw)
> +{
> + int ret;
> + uint16_t port_id;
> +
> + port_id = hw->eth_dev->data->port_id;
> +
> + /* Start the device */
> + ret = rte_eth_dev_start(port_id);
> + if (ret) {
Compare vs 0
> + PMD_INIT_LOG(ERR, "Could not start PF device %d", port_id);
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> int
> nfp_init_app_flower(struct nfp_pf_dev *pf_dev)
> {
> @@ -432,6 +618,13 @@
> goto pf_cpp_area_cleanup;
> }
>
> + /* Start the PF vNIC */
> + ret = nfp_flower_start_pf_vnic(app_flower->pf_hw);
> + if (ret) {
Compare vs 0
> + PMD_INIT_LOG(ERR, "Could not start flower PF vNIC");
> + goto pf_vnic_cleanup;
> + }
> +
> /* Start up flower services */
> if (nfp_flower_enable_services(app_flower)) {
> ret = -ESRCH;
next prev parent reply other threads:[~2022-08-05 12:55 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-05 6:32 [PATCH v5 00/12] preparation for the rte_flow offload of nfp PMD Chaoyong He
2022-08-05 6:32 ` [PATCH v5 01/12] net/nfp: move app specific attributes to own struct Chaoyong He
2022-08-05 10:49 ` Andrew Rybchenko
2022-08-05 6:32 ` [PATCH v5 02/12] net/nfp: simplify initialization and remove dead code Chaoyong He
2022-08-05 6:32 ` [PATCH v5 03/12] net/nfp: move app specific init logic to own function Chaoyong He
2022-08-05 10:53 ` Andrew Rybchenko
2022-08-05 6:32 ` [PATCH v5 04/12] net/nfp: add initial flower firmware support Chaoyong He
2022-08-05 11:00 ` Andrew Rybchenko
2022-08-05 6:32 ` [PATCH v5 05/12] net/nfp: add flower PF setup and mempool init logic Chaoyong He
2022-08-05 12:49 ` Andrew Rybchenko
2022-08-05 6:32 ` [PATCH v5 06/12] net/nfp: add flower PF related routines Chaoyong He
2022-08-05 12:55 ` Andrew Rybchenko [this message]
2022-08-05 6:32 ` [PATCH v5 07/12] net/nfp: add flower ctrl VNIC related logics Chaoyong He
2022-08-05 13:05 ` Andrew Rybchenko
2022-08-08 11:32 ` Chaoyong He
2022-08-08 14:45 ` Stephen Hemminger
2022-08-10 1:51 ` Chaoyong He
2022-08-10 19:39 ` Stephen Hemminger
2022-08-11 1:26 ` Chaoyong He
2022-08-11 4:24 ` Stephen Hemminger
2022-08-11 6:31 ` Chaoyong He
2022-08-11 15:07 ` Stephen Hemminger
2022-08-05 6:32 ` [PATCH v5 08/12] net/nfp: move common rxtx function for flower use Chaoyong He
2022-08-05 6:32 ` [PATCH v5 09/12] net/nfp: add flower ctrl VNIC rxtx logic Chaoyong He
2022-08-05 6:32 ` [PATCH v5 10/12] net/nfp: add flower representor framework Chaoyong He
2022-08-05 14:23 ` Andrew Rybchenko
2022-08-08 11:56 ` Chaoyong He
2022-08-05 6:32 ` [PATCH v5 11/12] net/nfp: move rxtx function to header file Chaoyong He
2022-08-05 6:32 ` [PATCH v5 12/12] net/nfp: add flower PF rxtx logic Chaoyong He
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=2a3bab36-4263-17a6-a009-5125f0d01426@oktetlabs.ru \
--to=andrew.rybchenko@oktetlabs.ru \
--cc=chaoyong.he@corigine.com \
--cc=dev@dpdk.org \
--cc=niklas.soderlund@corigine.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).