From: Jiawen Wu <jiawenwu@trustnetic.com> To: dev@dpdk.org Cc: Jiawen Wu <jiawenwu@trustnetic.com> Subject: [dpdk-dev] [PATCH v2 28/37] net/txgbe: add TM capabilities get operation Date: Wed, 11 Nov 2020 14:49:27 +0800 Message-ID: <20201111064936.768604-29-jiawenwu@trustnetic.com> (raw) In-Reply-To: <20201111064936.768604-1-jiawenwu@trustnetic.com> Add support to get traffic manager capabilities. Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com> --- drivers/net/txgbe/txgbe_ethdev.c | 1 + drivers/net/txgbe/txgbe_ethdev.h | 9 + drivers/net/txgbe/txgbe_tm.c | 278 +++++++++++++++++++++++++++++++ 3 files changed, 288 insertions(+) diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 6a4bff0e5..22bc2c004 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -5216,6 +5216,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = { .timesync_write_time = txgbe_timesync_write_time, .udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add, .udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del, + .tm_ops_get = txgbe_tm_ops_get, .tx_done_cleanup = txgbe_dev_tx_done_cleanup, }; diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 828cdadc5..533d85617 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -284,6 +284,14 @@ struct txgbe_tm_shaper_profile { TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile); +/* node type of Traffic Manager */ +enum txgbe_tm_node_type { + TXGBE_TM_NODE_TYPE_PORT, + TXGBE_TM_NODE_TYPE_TC, + TXGBE_TM_NODE_TYPE_QUEUE, + TXGBE_TM_NODE_TYPE_MAX, +}; + /* Struct to store Traffic Manager node configuration. */ struct txgbe_tm_node { TAILQ_ENTRY(txgbe_tm_node) node; @@ -557,6 +565,7 @@ int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev); int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, uint16_t tx_rate, uint64_t q_msk); +int txgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops); void txgbe_tm_conf_init(struct rte_eth_dev *dev); void txgbe_tm_conf_uninit(struct rte_eth_dev *dev); int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c index 78f426964..545590ba2 100644 --- a/drivers/net/txgbe/txgbe_tm.c +++ b/drivers/net/txgbe/txgbe_tm.c @@ -6,6 +6,36 @@ #include "txgbe_ethdev.h" +static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error); +static int txgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error); +static int txgbe_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error); + +const struct rte_tm_ops txgbe_tm_ops = { + .capabilities_get = txgbe_tm_capabilities_get, + .level_capabilities_get = txgbe_level_capabilities_get, + .node_capabilities_get = txgbe_node_capabilities_get, +}; + +int +txgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused, + void *arg) +{ + if (!arg) + return -EINVAL; + + *(const void **)arg = &txgbe_tm_ops; + + return 0; +} + void txgbe_tm_conf_init(struct rte_eth_dev *dev) { @@ -55,3 +85,251 @@ txgbe_tm_conf_uninit(struct rte_eth_dev *dev) } } +static inline uint8_t +txgbe_tc_nb_get(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *eth_conf; + uint8_t nb_tcs = 0; + + eth_conf = &dev->data->dev_conf; + if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs; + } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools == + ETH_32_POOLS) + nb_tcs = ETH_4_TCS; + else + nb_tcs = ETH_8_TCS; + } else { + nb_tcs = 1; + } + + return nb_tcs; +} + +static int +txgbe_tm_capabilities_get(struct rte_eth_dev *dev, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint8_t tc_nb = txgbe_tc_nb_get(dev); + + if (!cap || !error) + return -EINVAL; + + if (tc_nb > hw->mac.max_tx_queues) + return -EINVAL; + + error->type = RTE_TM_ERROR_TYPE_NONE; + + /* set all the parameters to 0 first. */ + memset(cap, 0, sizeof(struct rte_tm_capabilities)); + + /** + * here is the max capability not the current configuration. + */ + /* port + TCs + queues */ + cap->n_nodes_max = 1 + TXGBE_DCB_TC_MAX + + hw->mac.max_tx_queues; + cap->n_levels_max = 3; + cap->non_leaf_nodes_identical = 1; + cap->leaf_nodes_identical = 1; + cap->shaper_n_max = cap->n_nodes_max; + cap->shaper_private_n_max = cap->n_nodes_max; + cap->shaper_private_dual_rate_n_max = 0; + cap->shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->shaper_private_rate_max = 1250000000ull; + cap->shaper_shared_n_max = 0; + cap->shaper_shared_n_nodes_per_shaper_max = 0; + cap->shaper_shared_n_shapers_per_node_max = 0; + cap->shaper_shared_dual_rate_n_max = 0; + cap->shaper_shared_rate_min = 0; + cap->shaper_shared_rate_max = 0; + cap->sched_n_children_max = hw->mac.max_tx_queues; + /** + * HW supports SP. But no plan to support it now. + * So, all the nodes should have the same priority. + */ + cap->sched_sp_n_priorities_max = 1; + cap->sched_wfq_n_children_per_group_max = 0; + cap->sched_wfq_n_groups_max = 0; + /** + * SW only supports fair round robin now. + * So, all the nodes should have the same weight. + */ + cap->sched_wfq_weight_max = 1; + cap->cman_head_drop_supported = 0; + cap->dynamic_update_mask = 0; + cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD; + cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + cap->cman_wred_context_n_max = 0; + cap->cman_wred_context_private_n_max = 0; + cap->cman_wred_context_shared_n_max = 0; + cap->cman_wred_context_shared_n_nodes_per_context_max = 0; + cap->cman_wred_context_shared_n_contexts_per_node_max = 0; + cap->stats_mask = 0; + + return 0; +} + +static inline struct txgbe_tm_node * +txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id, + enum txgbe_tm_node_type *node_type) +{ + struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev); + struct txgbe_tm_node *tm_node; + + if (tm_conf->root && tm_conf->root->id == node_id) { + *node_type = TXGBE_TM_NODE_TYPE_PORT; + return tm_conf->root; + } + + TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) { + if (tm_node->id == node_id) { + *node_type = TXGBE_TM_NODE_TYPE_TC; + return tm_node; + } + } + + TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) { + if (tm_node->id == node_id) { + *node_type = TXGBE_TM_NODE_TYPE_QUEUE; + return tm_node; + } + } + + return NULL; +} + +static int +txgbe_level_capabilities_get(struct rte_eth_dev *dev, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + + if (!cap || !error) + return -EINVAL; + + if (level_id >= TXGBE_TM_NODE_TYPE_MAX) { + error->type = RTE_TM_ERROR_TYPE_LEVEL_ID; + error->message = "too deep level"; + return -EINVAL; + } + + /* root node */ + if (level_id == TXGBE_TM_NODE_TYPE_PORT) { + cap->n_nodes_max = 1; + cap->n_nodes_nonleaf_max = 1; + cap->n_nodes_leaf_max = 0; + } else if (level_id == TXGBE_TM_NODE_TYPE_TC) { + /* TC */ + cap->n_nodes_max = TXGBE_DCB_TC_MAX; + cap->n_nodes_nonleaf_max = TXGBE_DCB_TC_MAX; + cap->n_nodes_leaf_max = 0; + } else { + /* queue */ + cap->n_nodes_max = hw->mac.max_tx_queues; + cap->n_nodes_nonleaf_max = 0; + cap->n_nodes_leaf_max = hw->mac.max_tx_queues; + } + + cap->non_leaf_nodes_identical = true; + cap->leaf_nodes_identical = true; + + if (level_id != TXGBE_TM_NODE_TYPE_QUEUE) { + cap->nonleaf.shaper_private_supported = true; + cap->nonleaf.shaper_private_dual_rate_supported = false; + cap->nonleaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->nonleaf.shaper_private_rate_max = 1250000000ull; + cap->nonleaf.shaper_shared_n_max = 0; + if (level_id == TXGBE_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + TXGBE_DCB_TC_MAX; + else + cap->nonleaf.sched_n_children_max = + hw->mac.max_tx_queues; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + cap->nonleaf.stats_mask = 0; + + return 0; + } + + /* queue node */ + cap->leaf.shaper_private_supported = true; + cap->leaf.shaper_private_dual_rate_supported = false; + cap->leaf.shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->leaf.shaper_private_rate_max = 1250000000ull; + cap->leaf.shaper_shared_n_max = 0; + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + cap->leaf.stats_mask = 0; + + return 0; +} + +static int +txgbe_node_capabilities_get(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX; + struct txgbe_tm_node *tm_node; + + if (!cap || !error) + return -EINVAL; + + if (node_id == RTE_TM_NODE_ID_NULL) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node id"; + return -EINVAL; + } + + /* check if the node id exists */ + tm_node = txgbe_tm_node_search(dev, node_id, &node_type); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + cap->shaper_private_supported = true; + cap->shaper_private_dual_rate_supported = false; + cap->shaper_private_rate_min = 0; + /* 10Gbps -> 1.25GBps */ + cap->shaper_private_rate_max = 1250000000ull; + cap->shaper_shared_n_max = 0; + + if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) { + cap->leaf.cman_head_drop_supported = false; + cap->leaf.cman_wred_context_private_supported = true; + cap->leaf.cman_wred_context_shared_n_max = 0; + } else { + if (node_type == TXGBE_TM_NODE_TYPE_PORT) + cap->nonleaf.sched_n_children_max = + TXGBE_DCB_TC_MAX; + else + cap->nonleaf.sched_n_children_max = + hw->mac.max_tx_queues; + cap->nonleaf.sched_sp_n_priorities_max = 1; + cap->nonleaf.sched_wfq_n_children_per_group_max = 0; + cap->nonleaf.sched_wfq_n_groups_max = 0; + cap->nonleaf.sched_wfq_weight_max = 1; + } + + cap->stats_mask = 0; + + return 0; +} + -- 2.18.4
next prev parent reply other threads:[~2020-11-11 6:57 UTC|newest] Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top 2020-11-11 6:48 [dpdk-dev] [PATCH v2 00/37] net: add txgbe PMD part 2 Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 01/37] net/txgbe: add ntuple filter init and uninit Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 02/37] net/txgbe: support ntuple filter add and delete Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 03/37] net/txgbe: add ntuple parse rule Jiawen Wu 2020-11-11 16:06 ` Ferruh Yigit 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 04/37] net/txgbe: support ntuple filter remove operaion Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 05/37] net/txgbe: support ethertype filter add and delete Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 06/37] net/txgbe: add ethertype parse rule Jiawen Wu 2020-11-11 16:02 ` Ferruh Yigit 2020-11-11 16:04 ` Ferruh Yigit 2020-11-12 1:57 ` Wang, Haiyue 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 07/37] net/txgbe: support syn filter add and delete Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 08/37] net/txgbe: add syn filter parse rule Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 09/37] net/txgbe: add L2 tunnel filter init and uninit Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 10/37] net/txgbe: config L2 tunnel filter with e-tag Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 11/37] net/txgbe: support L2 tunnel filter add and delete Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 12/37] net/txgbe: add L2 tunnel filter parse rule Jiawen Wu 2020-11-11 16:10 ` Ferruh Yigit 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 13/37] net/txgbe: add FDIR filter init and uninit Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 14/37] net/txgbe: configure FDIR filter Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 15/37] net/txgbe: support FDIR add and delete operations Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 16/37] net/txgbe: add FDIR parse normal rule Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 17/37] net/txgbe: add FDIR parse tunnel rule Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 18/37] net/txgbe: add FDIR restore operation Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 19/37] net/txgbe: add RSS filter parse rule Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 20/37] net/txgbe: add RSS filter restore operation Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 21/37] net/txgbe: add filter list init and uninit Jiawen Wu 2020-11-11 16:10 ` Ferruh Yigit 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 22/37] net/txgbe: add generic flow API Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 23/37] net/txgbe: add flow API create function Jiawen Wu 2020-11-11 16:11 ` Ferruh Yigit 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 24/37] net/txgbe: add flow API destroy function Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 25/37] net/txgbe: add flow API flush function Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 26/37] net/txgbe: support UDP tunnel port add and delete Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 27/37] net/txgbe: add TM configuration init and uninit Jiawen Wu 2020-11-11 6:49 ` Jiawen Wu [this message] 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 29/37] net/txgbe: support TM shaper profile add and delete Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 30/37] net/txgbe: support TM node " Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 31/37] net/txgbe: add TM hierarchy commit Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 32/37] net/txgbe: add macsec setting Jiawen Wu 2020-11-11 16:13 ` Ferruh Yigit 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 33/37] net/txgbe: add IPsec context creation Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 34/37] net/txgbe: add security session create operation Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 35/37] net/txgbe: support security session destroy Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 36/37] net/txgbe: add security offload in Rx and Tx process Jiawen Wu 2020-11-11 6:49 ` [dpdk-dev] [PATCH v2 37/37] net/txgbe: add security type in flow action Jiawen Wu 2020-11-11 16:00 ` [dpdk-dev] [PATCH v2 00/37] net: add txgbe PMD part 2 Ferruh Yigit 2020-11-11 16:09 ` Ferruh Yigit
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20201111064936.768604-29-jiawenwu@trustnetic.com \ --to=jiawenwu@trustnetic.com \ --cc=dev@dpdk.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
DPDK patches and discussions This inbox may be cloned and mirrored by anyone: git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \ dev@dpdk.org public-inbox-index dev Example config snippet for mirrors. Newsgroup available over NNTP: nntp://inbox.dpdk.org/inbox.dpdk.dev AGPL code for this site: git clone https://public-inbox.org/public-inbox.git