From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by dpdk.org (Postfix) with ESMTP id ABF297CA9 for ; Thu, 29 Jun 2017 06:23:47 +0200 (CEST) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga105.fm.intel.com with ESMTP; 28 Jun 2017 21:23:47 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.40,278,1496127600"; d="scan'208";a="1145901594" Received: from dpdk26.sh.intel.com ([10.67.110.152]) by orsmga001.jf.intel.com with ESMTP; 28 Jun 2017 21:23:45 -0700 From: Wenzhuo Lu To: dev@dpdk.org Cc: cristian.dumitrescu@intel.com, jasvinder.singh@intel.com, Wenzhuo Lu Date: Thu, 29 Jun 2017 12:23:57 +0800 Message-Id: <1498710237-80285-21-git-send-email-wenzhuo.lu@intel.com> X-Mailer: git-send-email 1.9.3 In-Reply-To: <1498710237-80285-1-git-send-email-wenzhuo.lu@intel.com> References: <1495873075-49542-1-git-send-email-wenzhuo.lu@intel.com> <1498710237-80285-1-git-send-email-wenzhuo.lu@intel.com> Subject: [dpdk-dev] [PATCH v3 20/20] net/ixgbe: support committing TM hierarchy X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 29 Jun 2017 04:23:48 -0000 Add the support of the Traffic Management API, rte_tm_hierarchy_commit. When calling this API, the driver tries to enable the TM configuration on HW. Signed-off-by: Wenzhuo Lu --- drivers/net/ixgbe/ixgbe_ethdev.c | 15 ++++++--- drivers/net/ixgbe/ixgbe_ethdev.h | 2 ++ drivers/net/ixgbe/ixgbe_tm.c | 69 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 81 insertions(+), 5 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 377f8e6..b8b4d43 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -302,9 +302,6 @@ static void ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction, uint8_t queue, uint8_t msix_vector); static void ixgbe_configure_msix(struct rte_eth_dev *dev); -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate); - static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); @@ -2512,6 +2509,8 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) int status; uint16_t vf, idx; uint32_t *link_speeds; + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); @@ -2702,6 +2701,11 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) ixgbe_l2_tunnel_conf(dev); ixgbe_filter_restore(dev); + if (!tm_conf->committed) + PMD_DRV_LOG(WARNING, + "please call hierarchy_commit() " + "before starting the port"); + return 0; error: @@ -5610,8 +5614,9 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); } -static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, - uint16_t queue_idx, uint16_t tx_rate) +int +ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, + uint16_t queue_idx, uint16_t tx_rate) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t rf_dec, rf_int; diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index 67d2bdc..284dca8 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -741,6 +741,8 @@ int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, int ixgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops); void ixgbe_tm_conf_init(struct rte_eth_dev *dev); void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev); +int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t tx_rate); static inline int ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, diff --git a/drivers/net/ixgbe/ixgbe_tm.c b/drivers/net/ixgbe/ixgbe_tm.c index e9dce46..c790b59 100644 --- a/drivers/net/ixgbe/ixgbe_tm.c +++ b/drivers/net/ixgbe/ixgbe_tm.c @@ -62,6 +62,9 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev, uint32_t node_id, struct rte_tm_node_capabilities *cap, struct rte_tm_error *error); +static int ixgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error); const struct rte_tm_ops ixgbe_tm_ops = { .capabilities_get = ixgbe_tm_capabilities_get, @@ -72,6 +75,7 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev, .node_type_get = ixgbe_node_type_get, .level_capabilities_get = ixgbe_level_capabilities_get, .node_capabilities_get = ixgbe_node_capabilities_get, + .hierarchy_commit = ixgbe_hierarchy_commit, }; int @@ -970,3 +974,68 @@ static int ixgbe_node_capabilities_get(struct rte_eth_dev *dev, return 0; } + +static int +ixgbe_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct ixgbe_tm_conf *tm_conf = + IXGBE_DEV_PRIVATE_TO_TM_CONF(dev->data->dev_private); + struct ixgbe_tm_node *tm_node; + uint64_t bw; + int ret; + + if (!error) + return -EINVAL; + + /* check the setting */ + if (!tm_conf->root) + goto done; + + /* not support port max bandwidth yet */ + if (tm_conf->root->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no port max bandwidth"; + goto fail_clear; + } + + /* HW not support TC max bandwidth */ + TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { + if (tm_node->shaper_profile->profile.peak.rate) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = "no TC max bandwidth"; + goto fail_clear; + } + } + + /* queue max bandwidth */ + TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { + bw = tm_node->shaper_profile->profile.peak.rate; + if (bw) { + /* interpret Bps to Mbps */ + bw = bw * 8 / 1000 / 1000; + ret = ixgbe_set_queue_rate_limit(dev, tm_node->no, bw); + if (ret) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE; + error->message = + "failed to set queue max bandwidth"; + goto fail_clear; + } + } + } + + goto done; + +done: + tm_conf->committed = true; + return 0; + +fail_clear: + /* clear all the traffic manager configuration */ + if (clear_on_fail) { + ixgbe_tm_conf_uninit(dev); + ixgbe_tm_conf_init(dev); + } + return -EINVAL; +} -- 1.9.3