DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Wang, Haiyue" <haiyue.wang@intel.com>
To: "Zhang, Qi Z" <qi.z.zhang@intel.com>,
	"dev@dpdk.org" <dev@dpdk.org>,
	"Lu,  Wenzhuo" <wenzhuo.lu@intel.com>
Subject: Re: [dpdk-dev] [PATCH v1] net/ice: add link-up and link-down functions
Date: Fri, 3 May 2019 03:30:31 +0000	[thread overview]
Message-ID: <E3B9F2FDCB65864C82CD632F23D8AB8773359294@SHSMSX101.ccr.corp.intel.com> (raw)
Message-ID: <20190503033031._xpU5vF5AdGXZDZBhKacolnqgn0puOxb355KB8Yi8ho@z> (raw)
In-Reply-To: <039ED4275CED7440929022BC67E706115337A0F2@SHSMSX103.ccr.corp.intel.com>

> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Wednesday, May 1, 2019 09:09
> To: Wang, Haiyue <haiyue.wang@intel.com>; dev@dpdk.org; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>
> Subject: RE: [PATCH v1] net/ice: add link-up and link-down functions
> 
> 
> 
> > -----Original Message-----
> > From: Wang, Haiyue
> > Sent: Tuesday, April 30, 2019 3:12 PM
> > To: dev@dpdk.org; Zhang, Qi Z <qi.z.zhang@intel.com>; Lu, Wenzhuo
> > <wenzhuo.lu@intel.com>
> > Cc: Wang, Haiyue <haiyue.wang@intel.com>
> > Subject: [PATCH v1] net/ice: add link-up and link-down functions
> >
> > Support link up and down functions for ice, and when stop the ice,
> > makes the link down also.
> >
> > Signed-off-by: Haiyue Wang <haiyue.wang@intel.com>
> > ---
> >  drivers/net/ice/ice_ethdev.c | 83
> > ++++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 83 insertions(+)
> >
> > diff --git a/drivers/net/ice/ice_ethdev.c
> > b/drivers/net/ice/ice_ethdev.c index
> > 1f06a2c..8f58150 100644
> > --- a/drivers/net/ice/ice_ethdev.c
> > +++ b/drivers/net/ice/ice_ethdev.c
> > @@ -32,6 +32,9 @@ static void ice_dev_info_get(struct rte_eth_dev *dev,
> >  			     struct rte_eth_dev_info *dev_info);  static int
> > ice_link_update(struct rte_eth_dev *dev,
> >  			   int wait_to_complete);
> > +static int ice_dev_set_link_up(struct rte_eth_dev *dev); static int
> > +ice_dev_set_link_down(struct rte_eth_dev *dev);
> > +
> >  static int ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
> > static int ice_vlan_offload_set(struct rte_eth_dev *dev, int mask);
> > static int ice_vlan_tpid_set(struct rte_eth_dev *dev, @@ -94,6 +97,8
> > @@ static const struct eth_dev_ops ice_eth_dev_ops = {
> >  	.dev_stop                     = ice_dev_stop,
> >  	.dev_close                    = ice_dev_close,
> >  	.dev_reset                    = ice_dev_reset,
> > +	.dev_set_link_up              = ice_dev_set_link_up,
> > +	.dev_set_link_down            = ice_dev_set_link_down,
> >  	.rx_queue_start               = ice_rx_queue_start,
> >  	.rx_queue_stop                = ice_rx_queue_stop,
> >  	.tx_queue_start               = ice_tx_queue_start,
> > @@ -1541,6 +1546,9 @@ ice_dev_stop(struct rte_eth_dev *dev)
> >  	/* Clear all queues and release mbufs */
> >  	ice_clear_queues(dev);
> >
> > +	/* Set link down */
> 
> The comment looks redundant
> Same for the comment for ice_pf_disable_irq0 and ice_dev_set_link_up at
> below.
> 

I see, this is from i40e code style. like i40e_dev_stop. :) Will make the code clean
next version.

> > +	ice_dev_set_link_down(dev);
> > +
> >  	/* Clean datapath event and queue/vec mapping */
> >  	rte_intr_efd_disable(intr_handle);
> >  	if (intr_handle->intr_vec) {
> > @@ -1562,10 +1570,14 @@ ice_dev_close(struct rte_eth_dev *dev)
> >  	/* release all queue resource */
> >  	ice_free_queues(dev);
> >
> > +	/* Disable interrupt */
> > +	ice_pf_disable_irq0(hw);
> 
> It's better to add some explanation here. why we need this.

OK, will be in v2 patch.

> 
> > +
> >  	ice_res_pool_destroy(&pf->msix_pool);
> >  	ice_release_vsi(pf->main_vsi);
> >  	ice_sched_cleanup_all(hw);
> >  	rte_free(hw->port_info);
> > +	hw->port_info = NULL;
> >  	ice_shutdown_all_ctrlq(hw);
> >  }
> >
> > @@ -1936,6 +1948,9 @@ ice_dev_start(struct rte_eth_dev *dev)
> >  	if (ret != ICE_SUCCESS)
> >  		PMD_DRV_LOG(WARNING, "Fail to set phy mask");
> >
> > +	/* Set link up */
> > +	ice_dev_set_link_up(dev);
> > +
> >  	/* Call get_link_info aq commond to enable/disable LSE */
> >  	ice_link_update(dev, 0);
> >
> > @@ -2218,6 +2233,74 @@ ice_link_update(struct rte_eth_dev *dev,
> > __rte_unused int wait_to_complete)
> >  	return 0;
> >  }
> >
> > +/* Force the physical link state by getting the current PHY
> > +capabilities from
> > + * hardware and setting the PHY config based on the determined
> > +capabilities. If
> > + * link changes, link event will be triggered because both the Enable
> > +Automatic
> > + * Link Update and LESM Enable bits are set when setting the PHY
> > capabilities.
> > + */
> > +static enum ice_status
> > +ice_force_phys_link_state(struct ice_hw *hw, bool link_up) {
> > +	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
> > +	struct ice_aqc_get_phy_caps_data *pcaps;
> > +	struct ice_port_info *pi;
> > +	enum ice_status status;
> > +
> > +	if (!hw || !hw->port_info)
> > +		return ICE_ERR_PARAM;
> > +
> > +	pi = hw->port_info;
> > +
> > +	pcaps = (struct ice_aqc_get_phy_caps_data *)
> > +		ice_malloc(hw, sizeof(*pcaps));
> > +	if (!pcaps)
> > +		return ICE_ERR_NO_MEMORY;
> > +
> > +	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
> pcaps,
> > +				     NULL);
> > +	if (status)
> > +		goto out;
> > +
> > +	/* No change in link */
> > +	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
> > +	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
> > +		goto out;
> > +
> > +	cfg.phy_type_low = pcaps->phy_type_low;
> > +	cfg.phy_type_high = pcaps->phy_type_high;
> > +	cfg.caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
> > +	cfg.low_power_ctrl = pcaps->low_power_ctrl;
> > +	cfg.eee_cap = pcaps->eee_cap;
> > +	cfg.eeer_value = pcaps->eeer_value;
> > +	cfg.link_fec_opt = pcaps->link_fec_options;
> > +	if (link_up)
> > +		cfg.caps |= ICE_AQ_PHY_ENA_LINK;
> > +	else
> > +		cfg.caps &= ~ICE_AQ_PHY_ENA_LINK;
> > +
> > +	status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
> > +
> > +out:
> > +	ice_free(hw, pcaps);
> > +	return status;
> > +}
> > +
> > +static int
> > +ice_dev_set_link_up(struct rte_eth_dev *dev) {
> > +	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> > +
> > +	return ice_force_phys_link_state(hw, true); }
> > +
> > +static int
> > +ice_dev_set_link_down(struct rte_eth_dev *dev) {
> > +	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> > +
> > +	return ice_force_phys_link_state(hw, false); }
> > +
> >  static int
> >  ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)  {
> > --
> > 2.7.4


  parent reply	other threads:[~2019-05-03  3:30 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-30  7:11 Haiyue Wang
2019-04-30  7:11 ` Haiyue Wang
2019-04-30 16:23 ` Stillwell Jr, Paul M
2019-04-30 16:23   ` Stillwell Jr, Paul M
2019-04-30 17:19   ` Wang, Haiyue
2019-04-30 17:19     ` Wang, Haiyue
2019-04-30 18:43     ` Stillwell Jr, Paul M
2019-04-30 18:43       ` Stillwell Jr, Paul M
2019-05-01  1:08 ` Zhang, Qi Z
2019-05-01  1:08   ` Zhang, Qi Z
2019-05-03  3:30   ` Wang, Haiyue [this message]
2019-05-03  3:30     ` Wang, Haiyue

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=E3B9F2FDCB65864C82CD632F23D8AB8773359294@SHSMSX101.ccr.corp.intel.com \
    --to=haiyue.wang@intel.com \
    --cc=dev@dpdk.org \
    --cc=qi.z.zhang@intel.com \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).