From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 22806A0550; Fri, 26 Aug 2022 15:18:13 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A41A2427F9; Fri, 26 Aug 2022 15:18:02 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id 9A67640143 for ; Fri, 26 Aug 2022 15:17:58 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1661519878; x=1693055878; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=/BqxQgBetOpiQK9lC+vS8pGlGe8LHY9HOxktHgXjS74=; b=mpIh3Ni2O5PaOC+Om48TB7e+8Pr4g8di3QuMQia0Jlzpe9ljqoWmNUiy SFKkP4dOOmYZP9Haqx1p5S5iCgBeRwsif2xXgezn3tOwM1OGfjxPGSo/Y aMrqxMqAZP5zk4k3m+arwkNZBpDJe9PiUKhdqB+WnddsqmPQbNLd6HvCG 4hHynxw6PSGOe8wMGVD+QvBgQyw+DlnboQ40pQvwlOR+zFQOBNFLtThUF dE1i7UInfH1Y87ism7Ppq3rUifCeUq69aVWnM9GYCaZ1JJyJ6F7DGUHZi vgizJrpK8Ik6s/bAWAyaWz1F9H0iJ33jSvvjnDAu/MzuGVafb0F6NhDXp A==; X-IronPort-AV: E=McAfee;i="6500,9779,10450"; a="320598536" X-IronPort-AV: E=Sophos;i="5.93,265,1654585200"; d="scan'208";a="320598536" Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Aug 2022 06:17:40 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.93,265,1654585200"; d="scan'208";a="610561570" Received: from silpixa00400573.ir.intel.com (HELO silpixa00400573.ger.corp.intel.com.) ([10.237.223.157]) by orsmga002.jf.intel.com with ESMTP; 26 Aug 2022 06:17:39 -0700 From: Cristian Dumitrescu To: dev@dpdk.org Cc: Yogesh Jangra Subject: [PATCH V2 01/21] net/softnic: remove the traffic manager support Date: Fri, 26 Aug 2022 13:17:17 +0000 Message-Id: <20220826131737.1741743-2-cristian.dumitrescu@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20220826131737.1741743-1-cristian.dumitrescu@intel.com> References: <20220804165839.1074817-1-cristian.dumitrescu@intel.com> <20220826131737.1741743-1-cristian.dumitrescu@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Remove the Ethernet device traffic manager API support. Signed-off-by: Cristian Dumitrescu Signed-off-by: Yogesh Jangra --- drivers/net/softnic/meson.build | 3 +- drivers/net/softnic/rte_eth_softnic.c | 170 - drivers/net/softnic/rte_eth_softnic.h | 10 - drivers/net/softnic/rte_eth_softnic_cli.c | 1328 ------ .../net/softnic/rte_eth_softnic_internals.h | 176 - .../net/softnic/rte_eth_softnic_pipeline.c | 31 - drivers/net/softnic/rte_eth_softnic_tm.c | 3657 ----------------- 7 files changed, 1 insertion(+), 5374 deletions(-) delete mode 100644 drivers/net/softnic/rte_eth_softnic_tm.c diff --git a/drivers/net/softnic/meson.build b/drivers/net/softnic/meson.build index aff5fb3bf2..e2dbd6166e 100644 --- a/drivers/net/softnic/meson.build +++ b/drivers/net/softnic/meson.build @@ -21,6 +21,5 @@ sources = files( 'rte_eth_softnic_swq.c', 'rte_eth_softnic_tap.c', 'rte_eth_softnic_thread.c', - 'rte_eth_softnic_tm.c', ) -deps += ['pipeline', 'port', 'table', 'sched', 'cryptodev'] +deps += ['pipeline', 'port', 'table', 'cryptodev'] diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c index 8c098cad5b..ae3e8b3bcd 100644 --- a/drivers/net/softnic/rte_eth_softnic.c +++ b/drivers/net/softnic/rte_eth_softnic.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include "rte_eth_softnic.h" @@ -23,20 +22,6 @@ #define PMD_PARAM_CONN_PORT "conn_port" #define PMD_PARAM_CPU_ID "cpu_id" #define PMD_PARAM_SC "sc" -#define PMD_PARAM_TM_N_QUEUES "tm_n_queues" -#define PMD_PARAM_TM_QSIZE0 "tm_qsize0" -#define PMD_PARAM_TM_QSIZE1 "tm_qsize1" -#define PMD_PARAM_TM_QSIZE2 "tm_qsize2" -#define PMD_PARAM_TM_QSIZE3 "tm_qsize3" -#define PMD_PARAM_TM_QSIZE4 "tm_qsize4" -#define PMD_PARAM_TM_QSIZE5 "tm_qsize5" -#define PMD_PARAM_TM_QSIZE6 "tm_qsize6" -#define PMD_PARAM_TM_QSIZE7 "tm_qsize7" -#define PMD_PARAM_TM_QSIZE8 "tm_qsize8" -#define PMD_PARAM_TM_QSIZE9 "tm_qsize9" -#define PMD_PARAM_TM_QSIZE10 "tm_qsize10" -#define PMD_PARAM_TM_QSIZE11 "tm_qsize11" -#define PMD_PARAM_TM_QSIZE12 "tm_qsize12" static const char * const pmd_valid_args[] = { @@ -44,20 +29,6 @@ static const char * const pmd_valid_args[] = { PMD_PARAM_CONN_PORT, PMD_PARAM_CPU_ID, PMD_PARAM_SC, - PMD_PARAM_TM_N_QUEUES, - PMD_PARAM_TM_QSIZE0, - PMD_PARAM_TM_QSIZE1, - PMD_PARAM_TM_QSIZE2, - PMD_PARAM_TM_QSIZE3, - PMD_PARAM_TM_QSIZE4, - PMD_PARAM_TM_QSIZE5, - PMD_PARAM_TM_QSIZE6, - PMD_PARAM_TM_QSIZE7, - PMD_PARAM_TM_QSIZE8, - PMD_PARAM_TM_QSIZE9, - PMD_PARAM_TM_QSIZE10, - PMD_PARAM_TM_QSIZE11, - PMD_PARAM_TM_QSIZE12, NULL }; @@ -193,12 +164,10 @@ pmd_dev_stop(struct rte_eth_dev *dev) softnic_table_action_profile_free(p); softnic_port_in_action_profile_free(p); softnic_tap_free(p); - softnic_tmgr_free(p); softnic_link_free(p); softnic_softnic_swq_free_keep_rxq_txq(p); softnic_mempool_free(p); - tm_hierarchy_free(p); softnic_mtr_free(p); return 0; @@ -218,12 +187,10 @@ pmd_free(struct pmd_internals *p) softnic_table_action_profile_free(p); softnic_port_in_action_profile_free(p); softnic_tap_free(p); - softnic_tmgr_free(p); softnic_link_free(p); softnic_swq_free(p); softnic_mempool_free(p); - tm_hierarchy_free(p); softnic_mtr_free(p); rte_free(p); @@ -256,14 +223,6 @@ pmd_flow_ops_get(struct rte_eth_dev *dev __rte_unused, return 0; } -static int -pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) -{ - *(const struct rte_tm_ops **)arg = &pmd_tm_ops; - - return 0; -} - static int pmd_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) { @@ -282,7 +241,6 @@ static const struct eth_dev_ops pmd_ops = { .rx_queue_setup = pmd_rx_queue_setup, .tx_queue_setup = pmd_tx_queue_setup, .flow_ops_get = pmd_flow_ops_get, - .tm_ops_get = pmd_tm_ops_get, .mtr_ops_get = pmd_mtr_ops_get, }; @@ -325,13 +283,11 @@ pmd_init(struct pmd_params *params) memcpy(&p->params, params, sizeof(p->params)); /* Resources */ - tm_hierarchy_init(p); softnic_mtr_init(p); softnic_mempool_init(p); softnic_swq_init(p); softnic_link_init(p); - softnic_tmgr_init(p); softnic_tap_init(p); softnic_cryptodev_init(p); softnic_port_in_action_profile_init(p); @@ -459,20 +415,6 @@ pmd_parse_args(struct pmd_params *p, const char *params) } p->cpu_id = SOFTNIC_CPU_ID; p->sc = SOFTNIC_SC; - p->tm.n_queues = SOFTNIC_TM_N_QUEUES; - p->tm.qsize[0] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[1] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[2] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[3] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[4] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[5] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[6] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[7] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[8] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[9] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[10] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[11] = SOFTNIC_TM_QUEUE_SIZE; - p->tm.qsize[12] = SOFTNIC_TM_QUEUE_SIZE; /* Firmware script (optional) */ if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) { @@ -517,104 +459,6 @@ pmd_parse_args(struct pmd_params *p, const char *params) goto out_free; } - /* TM number of queues (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_N_QUEUES) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_N_QUEUES, - &get_uint32, &p->tm.n_queues); - if (ret < 0) - goto out_free; - } - - /* TM queue size 0 .. 3 (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE0) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE0, - &get_uint32, &p->tm.qsize[0]); - if (ret < 0) - goto out_free; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE1) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE1, - &get_uint32, &p->tm.qsize[1]); - if (ret < 0) - goto out_free; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE2) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE2, - &get_uint32, &p->tm.qsize[2]); - if (ret < 0) - goto out_free; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE3) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE3, - &get_uint32, &p->tm.qsize[3]); - if (ret < 0) - goto out_free; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE4) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE4, - &get_uint32, &p->tm.qsize[4]); - if (ret < 0) - goto out_free; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE5) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE5, - &get_uint32, &p->tm.qsize[5]); - if (ret < 0) - goto out_free; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE6) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE6, - &get_uint32, &p->tm.qsize[6]); - if (ret < 0) - goto out_free; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE7) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE7, - &get_uint32, &p->tm.qsize[7]); - if (ret < 0) - goto out_free; - } - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE8) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE8, - &get_uint32, &p->tm.qsize[8]); - if (ret < 0) - goto out_free; - } - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE9) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE9, - &get_uint32, &p->tm.qsize[9]); - if (ret < 0) - goto out_free; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE10) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE10, - &get_uint32, &p->tm.qsize[10]); - if (ret < 0) - goto out_free; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE11) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE11, - &get_uint32, &p->tm.qsize[11]); - if (ret < 0) - goto out_free; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE12) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE12, - &get_uint32, &p->tm.qsize[12]); - if (ret < 0) - goto out_free; - } - out_free: rte_kvargs_free(kvlist); return ret; @@ -696,20 +540,6 @@ RTE_PMD_REGISTER_PARAM_STRING(net_softnic, PMD_PARAM_FIRMWARE "= " PMD_PARAM_CONN_PORT "= " PMD_PARAM_CPU_ID "= " - PMD_PARAM_TM_N_QUEUES "= " - PMD_PARAM_TM_QSIZE0 "= " - PMD_PARAM_TM_QSIZE1 "= " - PMD_PARAM_TM_QSIZE2 "= " - PMD_PARAM_TM_QSIZE3 "=" - PMD_PARAM_TM_QSIZE4 "= " - PMD_PARAM_TM_QSIZE5 "= " - PMD_PARAM_TM_QSIZE6 "= " - PMD_PARAM_TM_QSIZE7 "= " - PMD_PARAM_TM_QSIZE8 "= " - PMD_PARAM_TM_QSIZE9 "= " - PMD_PARAM_TM_QSIZE10 "= " - PMD_PARAM_TM_QSIZE11 "=" - PMD_PARAM_TM_QSIZE12 "=" ); int diff --git a/drivers/net/softnic/rte_eth_softnic.h b/drivers/net/softnic/rte_eth_softnic.h index 6c11d89ab6..3fd80fa571 100644 --- a/drivers/net/softnic/rte_eth_softnic.h +++ b/drivers/net/softnic/rte_eth_softnic.h @@ -46,16 +46,6 @@ extern "C" { #define SOFTNIC_SC 1 #endif -/** Traffic Manager: Number of scheduler queues. */ -#ifndef SOFTNIC_TM_N_QUEUES -#define SOFTNIC_TM_N_QUEUES (64 * 1024) -#endif - -/** Traffic Manager: Scheduler queue size (per traffic class). */ -#ifndef SOFTNIC_TM_QUEUE_SIZE -#define SOFTNIC_TM_QUEUE_SIZE 64 -#endif - /** * Soft NIC run. * diff --git a/drivers/net/softnic/rte_eth_softnic_cli.c b/drivers/net/softnic/rte_eth_softnic_cli.c index 7acbeecae7..7556e50831 100644 --- a/drivers/net/softnic/rte_eth_softnic_cli.c +++ b/drivers/net/softnic/rte_eth_softnic_cli.c @@ -186,1270 +186,6 @@ cmd_swq(struct pmd_internals *softnic, } } -/** - * tmgr shaper profile - * id - * rate size - * adj - */ -static void -cmd_tmgr_shaper_profile(struct pmd_internals *softnic, - char **tokens, - uint32_t n_tokens, - char *out, - size_t out_size) -{ - struct rte_tm_shaper_params sp; - struct rte_tm_error error; - uint32_t shaper_profile_id; - uint16_t port_id; - int status; - - memset(&sp, 0, sizeof(struct rte_tm_shaper_params)); - - if (n_tokens != 11) { - snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); - return; - } - - if (strcmp(tokens[1], "shaper") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); - return; - } - - if (strcmp(tokens[2], "profile") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); - return; - } - - if (strcmp(tokens[3], "id") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); - return; - } - - if (softnic_parser_read_uint32(&shaper_profile_id, tokens[4]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "profile_id"); - return; - } - - if (strcmp(tokens[5], "rate") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rate"); - return; - } - - if (softnic_parser_read_uint64(&sp.peak.rate, tokens[6]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tb_rate"); - return; - } - - if (strcmp(tokens[7], "size") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); - return; - } - - if (softnic_parser_read_uint64(&sp.peak.size, tokens[8]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tb_size"); - return; - } - - if (strcmp(tokens[9], "adj") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "adj"); - return; - } - - if (softnic_parser_read_int32(&sp.pkt_length_adjust, tokens[10]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "packet_length_adjust"); - return; - } - - status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); - if (status) - return; - - status = rte_tm_shaper_profile_add(port_id, shaper_profile_id, &sp, &error); - if (status != 0) { - snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); - return; - } -} - -/** - * tmgr shared shaper - * id - * profile - */ -static void -cmd_tmgr_shared_shaper(struct pmd_internals *softnic, - char **tokens, - uint32_t n_tokens, - char *out, - size_t out_size) -{ - struct rte_tm_error error; - uint32_t shared_shaper_id, shaper_profile_id; - uint16_t port_id; - int status; - - if (n_tokens != 7) { - snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); - return; - } - - if (strcmp(tokens[1], "shared") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared"); - return; - } - - if (strcmp(tokens[2], "shaper") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); - return; - } - - if (strcmp(tokens[3], "id") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); - return; - } - - if (softnic_parser_read_uint32(&shared_shaper_id, tokens[4]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id"); - return; - } - - if (strcmp(tokens[5], "profile") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); - return; - } - - if (softnic_parser_read_uint32(&shaper_profile_id, tokens[6]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id"); - return; - } - - status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); - if (status) - return; - - status = rte_tm_shared_shaper_add_update(port_id, - shared_shaper_id, - shaper_profile_id, - &error); - if (status != 0) { - snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); - return; - } -} - -/** - * tmgr node - * id - * parent - * priority - * weight - * [shaper profile ] - * [shared shaper ] - * [nonleaf sp ] - */ -static void -cmd_tmgr_node(struct pmd_internals *softnic, - char **tokens, - uint32_t n_tokens, - char *out, - size_t out_size) -{ - struct rte_tm_error error; - struct rte_tm_node_params np; - uint32_t node_id, parent_node_id, priority, weight, shared_shaper_id; - uint16_t port_id; - int status; - - memset(&np, 0, sizeof(struct rte_tm_node_params)); - np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; - np.nonleaf.n_sp_priorities = 1; - - if (n_tokens < 10) { - snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); - return; - } - - if (strcmp(tokens[1], "node") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "node"); - return; - } - - if (strcmp(tokens[2], "id") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); - return; - } - - if (softnic_parser_read_uint32(&node_id, tokens[3]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "node_id"); - return; - } - - if (strcmp(tokens[4], "parent") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "parent"); - return; - } - - if (strcmp(tokens[5], "none") == 0) - parent_node_id = RTE_TM_NODE_ID_NULL; - else { - if (softnic_parser_read_uint32(&parent_node_id, tokens[5]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "parent_node_id"); - return; - } - } - - if (strcmp(tokens[6], "priority") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority"); - return; - } - - if (softnic_parser_read_uint32(&priority, tokens[7]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "priority"); - return; - } - - if (strcmp(tokens[8], "weight") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight"); - return; - } - - if (softnic_parser_read_uint32(&weight, tokens[9]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "weight"); - return; - } - - tokens += 10; - n_tokens -= 10; - - if (n_tokens >= 2 && - (strcmp(tokens[0], "shaper") == 0) && - (strcmp(tokens[1], "profile") == 0)) { - if (n_tokens < 3) { - snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); - return; - } - - if (strcmp(tokens[2], "none") == 0) { - np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; - } else { - if (softnic_parser_read_uint32(&np.shaper_profile_id, tokens[2]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id"); - return; - } - } - - tokens += 3; - n_tokens -= 3; - } /* shaper profile */ - - if (n_tokens >= 2 && - (strcmp(tokens[0], "shared") == 0) && - (strcmp(tokens[1], "shaper") == 0)) { - if (n_tokens < 3) { - snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); - return; - } - - if (softnic_parser_read_uint32(&shared_shaper_id, tokens[2]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id"); - return; - } - - np.shared_shaper_id = &shared_shaper_id; - np.n_shared_shapers = 1; - - tokens += 3; - n_tokens -= 3; - } /* shared shaper */ - - if (n_tokens >= 2 && - (strcmp(tokens[0], "nonleaf") == 0) && - (strcmp(tokens[1], "sp") == 0)) { - if (n_tokens < 3) { - snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); - return; - } - - if (softnic_parser_read_uint32(&np.nonleaf.n_sp_priorities, tokens[2]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "n_sp_priorities"); - return; - } - - tokens += 3; - n_tokens -= 3; - } /* nonleaf sp */ - - if (n_tokens) { - snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); - return; - } - - status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); - if (status != 0) - return; - - status = rte_tm_node_add(port_id, - node_id, - parent_node_id, - priority, - weight, - RTE_TM_NODE_LEVEL_ID_ANY, - &np, - &error); - if (status != 0) { - snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); - return; - } -} - -static uint32_t -root_node_id(uint32_t n_spp, - uint32_t n_pps) -{ - uint32_t n_queues = n_spp * n_pps * RTE_SCHED_QUEUES_PER_PIPE; - uint32_t n_tc = n_spp * n_pps * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; - uint32_t n_pipes = n_spp * n_pps; - - return n_queues + n_tc + n_pipes + n_spp; -} - -static uint32_t -subport_node_id(uint32_t n_spp, - uint32_t n_pps, - uint32_t subport_id) -{ - uint32_t n_pipes = n_spp * n_pps; - uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; - uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; - - return n_queues + n_tc + n_pipes + subport_id; -} - -static uint32_t -pipe_node_id(uint32_t n_spp, - uint32_t n_pps, - uint32_t subport_id, - uint32_t pipe_id) -{ - uint32_t n_pipes = n_spp * n_pps; - uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; - uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; - - return n_queues + - n_tc + - pipe_id + - subport_id * n_pps; -} - -static uint32_t -tc_node_id(uint32_t n_spp, - uint32_t n_pps, - uint32_t subport_id, - uint32_t pipe_id, - uint32_t tc_id) -{ - uint32_t n_pipes = n_spp * n_pps; - uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; - - return n_queues + - tc_id + - (pipe_id + subport_id * n_pps) * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; -} - -static uint32_t -queue_node_id(uint32_t n_spp __rte_unused, - uint32_t n_pps, - uint32_t subport_id, - uint32_t pipe_id, - uint32_t tc_id, - uint32_t queue_id) -{ - return queue_id + tc_id + - (pipe_id + subport_id * n_pps) * RTE_SCHED_QUEUES_PER_PIPE; -} - -struct tmgr_hierarchy_default_params { - uint32_t n_spp; /**< Number of subports per port. */ - uint32_t n_pps; /**< Number of pipes per subport. */ - - struct { - uint32_t port; - uint32_t subport; - uint32_t pipe; - uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - } shaper_profile_id; - - struct { - uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - uint32_t tc_valid[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - } shared_shaper_id; - - struct { - uint32_t queue[RTE_SCHED_QUEUES_PER_PIPE]; - } weight; -}; - -static int -tmgr_hierarchy_default(struct pmd_internals *softnic, - struct tmgr_hierarchy_default_params *params) -{ - struct rte_tm_node_params root_node_params = { - .shaper_profile_id = params->shaper_profile_id.port, - .nonleaf = { - .n_sp_priorities = 1, - }, - }; - - struct rte_tm_node_params subport_node_params = { - .shaper_profile_id = params->shaper_profile_id.subport, - .nonleaf = { - .n_sp_priorities = 1, - }, - }; - - struct rte_tm_node_params pipe_node_params = { - .shaper_profile_id = params->shaper_profile_id.pipe, - .nonleaf = { - .n_sp_priorities = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, - }, - }; - - uint32_t *shared_shaper_id = - (uint32_t *)calloc(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, - sizeof(uint32_t)); - - if (shared_shaper_id == NULL) - return -1; - - memcpy(shared_shaper_id, params->shared_shaper_id.tc, - sizeof(params->shared_shaper_id.tc)); - - struct rte_tm_node_params tc_node_params[] = { - [0] = { - .shaper_profile_id = params->shaper_profile_id.tc[0], - .shared_shaper_id = &shared_shaper_id[0], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[0]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [1] = { - .shaper_profile_id = params->shaper_profile_id.tc[1], - .shared_shaper_id = &shared_shaper_id[1], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[1]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [2] = { - .shaper_profile_id = params->shaper_profile_id.tc[2], - .shared_shaper_id = &shared_shaper_id[2], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[2]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [3] = { - .shaper_profile_id = params->shaper_profile_id.tc[3], - .shared_shaper_id = &shared_shaper_id[3], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[3]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [4] = { - .shaper_profile_id = params->shaper_profile_id.tc[4], - .shared_shaper_id = &shared_shaper_id[4], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[4]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [5] = { - .shaper_profile_id = params->shaper_profile_id.tc[5], - .shared_shaper_id = &shared_shaper_id[5], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[5]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [6] = { - .shaper_profile_id = params->shaper_profile_id.tc[6], - .shared_shaper_id = &shared_shaper_id[6], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[6]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [7] = { - .shaper_profile_id = params->shaper_profile_id.tc[7], - .shared_shaper_id = &shared_shaper_id[7], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[7]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [8] = { - .shaper_profile_id = params->shaper_profile_id.tc[8], - .shared_shaper_id = &shared_shaper_id[8], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[8]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [9] = { - .shaper_profile_id = params->shaper_profile_id.tc[9], - .shared_shaper_id = &shared_shaper_id[9], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[9]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [10] = { - .shaper_profile_id = params->shaper_profile_id.tc[10], - .shared_shaper_id = &shared_shaper_id[10], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[10]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [11] = { - .shaper_profile_id = params->shaper_profile_id.tc[11], - .shared_shaper_id = &shared_shaper_id[11], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[11]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - - [12] = { - .shaper_profile_id = params->shaper_profile_id.tc[12], - .shared_shaper_id = &shared_shaper_id[12], - .n_shared_shapers = - (params->shared_shaper_id.tc_valid[12]) ? 1 : 0, - .nonleaf = { - .n_sp_priorities = 1, - }, - }, - }; - - struct rte_tm_node_params queue_node_params = { - .shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE, - }; - - struct rte_tm_error error; - uint32_t n_spp = params->n_spp, n_pps = params->n_pps, s; - int status; - uint16_t port_id; - - status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); - if (status) - return -1; - - /* Hierarchy level 0: Root node */ - status = rte_tm_node_add(port_id, - root_node_id(n_spp, n_pps), - RTE_TM_NODE_ID_NULL, - 0, - 1, - RTE_TM_NODE_LEVEL_ID_ANY, - &root_node_params, - &error); - if (status) - return -1; - - /* Hierarchy level 1: Subport nodes */ - for (s = 0; s < params->n_spp; s++) { - uint32_t p; - - status = rte_tm_node_add(port_id, - subport_node_id(n_spp, n_pps, s), - root_node_id(n_spp, n_pps), - 0, - 1, - RTE_TM_NODE_LEVEL_ID_ANY, - &subport_node_params, - &error); - if (status) - return -1; - - /* Hierarchy level 2: Pipe nodes */ - for (p = 0; p < params->n_pps; p++) { - uint32_t t; - - status = rte_tm_node_add(port_id, - pipe_node_id(n_spp, n_pps, s, p), - subport_node_id(n_spp, n_pps, s), - 0, - 1, - RTE_TM_NODE_LEVEL_ID_ANY, - &pipe_node_params, - &error); - if (status) - return -1; - - /* Hierarchy level 3: Traffic class nodes */ - for (t = 0; t < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; t++) { - uint32_t q; - - status = rte_tm_node_add(port_id, - tc_node_id(n_spp, n_pps, s, p, t), - pipe_node_id(n_spp, n_pps, s, p), - t, - 1, - RTE_TM_NODE_LEVEL_ID_ANY, - &tc_node_params[t], - &error); - if (status) - return -1; - - /* Hierarchy level 4: Queue nodes */ - if (t < RTE_SCHED_TRAFFIC_CLASS_BE) { - /* Strict-priority traffic class queues */ - q = 0; - status = rte_tm_node_add(port_id, - queue_node_id(n_spp, n_pps, s, p, t, q), - tc_node_id(n_spp, n_pps, s, p, t), - 0, - params->weight.queue[q], - RTE_TM_NODE_LEVEL_ID_ANY, - &queue_node_params, - &error); - if (status) - return -1; - - continue; - } - /* Best-effort traffic class queues */ - for (q = 0; q < RTE_SCHED_BE_QUEUES_PER_PIPE; q++) { - status = rte_tm_node_add(port_id, - queue_node_id(n_spp, n_pps, s, p, t, q), - tc_node_id(n_spp, n_pps, s, p, t), - 0, - params->weight.queue[q], - RTE_TM_NODE_LEVEL_ID_ANY, - &queue_node_params, - &error); - if (status) - return -1; - } - } /* TC */ - } /* Pipe */ - } /* Subport */ - - return 0; -} - - -/** - * tmgr hierarchy-default - * spp - * pps - * shaper profile - * port - * subport - * pipe - * tc0 - * tc1 - * tc2 - * tc3 - * tc4 - * tc5 - * tc6 - * tc7 - * tc8 - * tc9 - * tc10 - * tc11 - * tc12 - * shared shaper - * tc0 - * tc1 - * tc2 - * tc3 - * tc4 - * tc5 - * tc6 - * tc7 - * tc8 - * tc9 - * tc10 - * tc11 - * tc12 - * weight - * queue ... - */ -static void -cmd_tmgr_hierarchy_default(struct pmd_internals *softnic, - char **tokens, - uint32_t n_tokens, - char *out, - size_t out_size) -{ - struct tmgr_hierarchy_default_params p; - int i, j, status; - - memset(&p, 0, sizeof(p)); - - if (n_tokens != 74) { - snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); - return; - } - - if (strcmp(tokens[1], "hierarchy-default") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy-default"); - return; - } - - if (strcmp(tokens[2], "spp") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp"); - return; - } - - if (softnic_parser_read_uint32(&p.n_spp, tokens[3]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "n_subports_per_port"); - return; - } - - if (strcmp(tokens[4], "pps") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps"); - return; - } - - if (softnic_parser_read_uint32(&p.n_pps, tokens[5]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "n_pipes_per_subport"); - return; - } - - /* Shaper profile */ - - if (strcmp(tokens[6], "shaper") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); - return; - } - - if (strcmp(tokens[7], "profile") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); - return; - } - - if (strcmp(tokens[8], "port") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.port, tokens[9]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "port profile id"); - return; - } - - if (strcmp(tokens[10], "subport") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "subport"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.subport, tokens[11]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "subport profile id"); - return; - } - - if (strcmp(tokens[12], "pipe") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipe"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.pipe, tokens[13]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "pipe_profile_id"); - return; - } - - if (strcmp(tokens[14], "tc0") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[0], tokens[15]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc0 profile id"); - return; - } - - if (strcmp(tokens[16], "tc1") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[1], tokens[17]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc1 profile id"); - return; - } - - if (strcmp(tokens[18], "tc2") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[2], tokens[19]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc2 profile id"); - return; - } - - if (strcmp(tokens[20], "tc3") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[3], tokens[21]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc3 profile id"); - return; - } - - if (strcmp(tokens[22], "tc4") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc4"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[4], tokens[23]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc4 profile id"); - return; - } - - if (strcmp(tokens[24], "tc5") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc5"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[5], tokens[25]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc5 profile id"); - return; - } - - if (strcmp(tokens[26], "tc6") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc6"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[6], tokens[27]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc6 profile id"); - return; - } - - if (strcmp(tokens[28], "tc7") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc7"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[7], tokens[29]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc7 profile id"); - return; - } - - if (strcmp(tokens[30], "tc8") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc8"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[8], tokens[31]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc8 profile id"); - return; - } - - if (strcmp(tokens[32], "tc9") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc9"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[9], tokens[33]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc9 profile id"); - return; - } - - if (strcmp(tokens[34], "tc10") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc10"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[10], tokens[35]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc10 profile id"); - return; - } - - if (strcmp(tokens[36], "tc11") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc11"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[11], tokens[37]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc11 profile id"); - return; - } - - if (strcmp(tokens[38], "tc12") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc12"); - return; - } - - if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[12], tokens[39]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "tc12 profile id"); - return; - } - - /* Shared shaper */ - - if (strcmp(tokens[40], "shared") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared"); - return; - } - - if (strcmp(tokens[41], "shaper") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); - return; - } - - if (strcmp(tokens[42], "tc0") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0"); - return; - } - - if (strcmp(tokens[43], "none") == 0) - p.shared_shaper_id.tc_valid[0] = 0; - else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[0], - tokens[43]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc0"); - return; - } - - p.shared_shaper_id.tc_valid[0] = 1; - } - - if (strcmp(tokens[44], "tc1") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1"); - return; - } - - if (strcmp(tokens[45], "none") == 0) - p.shared_shaper_id.tc_valid[1] = 0; - else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[1], - tokens[45]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc1"); - return; - } - - p.shared_shaper_id.tc_valid[1] = 1; - } - - if (strcmp(tokens[46], "tc2") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2"); - return; - } - - if (strcmp(tokens[47], "none") == 0) - p.shared_shaper_id.tc_valid[2] = 0; - else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[2], - tokens[47]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc2"); - return; - } - - p.shared_shaper_id.tc_valid[2] = 1; - } - - if (strcmp(tokens[48], "tc3") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3"); - return; - } - - if (strcmp(tokens[49], "none") == 0) - p.shared_shaper_id.tc_valid[3] = 0; - else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[3], - tokens[49]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc3"); - return; - } - - p.shared_shaper_id.tc_valid[3] = 1; - } - - if (strcmp(tokens[50], "tc4") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc4"); - return; - } - - if (strcmp(tokens[51], "none") == 0) { - p.shared_shaper_id.tc_valid[4] = 0; - } else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[4], - tokens[51]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc4"); - return; - } - - p.shared_shaper_id.tc_valid[4] = 1; - } - - if (strcmp(tokens[52], "tc5") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc5"); - return; - } - - if (strcmp(tokens[53], "none") == 0) { - p.shared_shaper_id.tc_valid[5] = 0; - } else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[5], - tokens[53]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc5"); - return; - } - - p.shared_shaper_id.tc_valid[5] = 1; - } - - if (strcmp(tokens[54], "tc6") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc6"); - return; - } - - if (strcmp(tokens[55], "none") == 0) { - p.shared_shaper_id.tc_valid[6] = 0; - } else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[6], - tokens[55]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc6"); - return; - } - - p.shared_shaper_id.tc_valid[6] = 1; - } - - if (strcmp(tokens[56], "tc7") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc7"); - return; - } - - if (strcmp(tokens[57], "none") == 0) { - p.shared_shaper_id.tc_valid[7] = 0; - } else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[7], - tokens[57]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc7"); - return; - } - - p.shared_shaper_id.tc_valid[7] = 1; - } - - if (strcmp(tokens[58], "tc8") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc8"); - return; - } - - if (strcmp(tokens[59], "none") == 0) { - p.shared_shaper_id.tc_valid[8] = 0; - } else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[8], - tokens[59]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc8"); - return; - } - - p.shared_shaper_id.tc_valid[8] = 1; - } - - if (strcmp(tokens[60], "tc9") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc9"); - return; - } - - if (strcmp(tokens[61], "none") == 0) { - p.shared_shaper_id.tc_valid[9] = 0; - } else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[9], - tokens[61]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc9"); - return; - } - - p.shared_shaper_id.tc_valid[9] = 1; - } - - if (strcmp(tokens[62], "tc10") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc10"); - return; - } - - if (strcmp(tokens[63], "none") == 0) { - p.shared_shaper_id.tc_valid[10] = 0; - } else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[10], - tokens[63]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc10"); - return; - } - - p.shared_shaper_id.tc_valid[10] = 1; - } - - if (strcmp(tokens[64], "tc11") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc11"); - return; - } - - if (strcmp(tokens[65], "none") == 0) { - p.shared_shaper_id.tc_valid[11] = 0; - } else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[11], - tokens[65]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc11"); - return; - } - - p.shared_shaper_id.tc_valid[11] = 1; - } - - if (strcmp(tokens[66], "tc12") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc12"); - return; - } - - if (strcmp(tokens[67], "none") == 0) { - p.shared_shaper_id.tc_valid[12] = 0; - } else { - if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[12], - tokens[67]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc12"); - return; - } - - p.shared_shaper_id.tc_valid[12] = 1; - } - - /* Weight */ - - if (strcmp(tokens[68], "weight") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight"); - return; - } - - if (strcmp(tokens[69], "queue") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "queue"); - return; - } - - for (i = 0, j = 0; i < 16; i++) { - if (i < RTE_SCHED_TRAFFIC_CLASS_BE) { - p.weight.queue[i] = 1; - } else { - if (softnic_parser_read_uint32(&p.weight.queue[i], - tokens[70 + j]) != 0) { - snprintf(out, out_size, MSG_ARG_INVALID, "weight queue"); - return; - } - j++; - } - } - - status = tmgr_hierarchy_default(softnic, &p); - if (status != 0) { - snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); - return; - } -} - -/** - * tmgr hierarchy commit - */ -static void -cmd_tmgr_hierarchy_commit(struct pmd_internals *softnic, - char **tokens, - uint32_t n_tokens, - char *out, - size_t out_size) -{ - struct rte_tm_error error; - uint16_t port_id; - int status; - - if (n_tokens != 3) { - snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); - return; - } - - if (strcmp(tokens[1], "hierarchy") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy"); - return; - } - - if (strcmp(tokens[2], "commit") != 0) { - snprintf(out, out_size, MSG_ARG_NOT_FOUND, "commit"); - return; - } - - status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); - if (status != 0) - return; - - status = rte_tm_hierarchy_commit(port_id, 1, &error); - if (status) { - snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); - return; - } -} - -/** - * tmgr - */ -static void -cmd_tmgr(struct pmd_internals *softnic, - char **tokens, - uint32_t n_tokens, - char *out, - size_t out_size) -{ - char *name; - struct softnic_tmgr_port *tmgr_port; - - if (n_tokens != 2) { - snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); - return; - } - - name = tokens[1]; - - tmgr_port = softnic_tmgr_port_create(softnic, name); - if (tmgr_port == NULL) { - snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); - return; - } -} - /** * tap */ @@ -2247,7 +983,6 @@ cmd_pipeline(struct pmd_internals *softnic, * bsz * link rxq * | swq - * | tmgr * | tap mempool mtu * | source mempool file bpp * | cryptodev rxq @@ -2331,18 +1066,6 @@ cmd_pipeline_port_in(struct pmd_internals *softnic, strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); - t0 += 2; - } else if (strcmp(tokens[t0], "tmgr") == 0) { - if (n_tokens < t0 + 2) { - snprintf(out, out_size, MSG_ARG_MISMATCH, - "pipeline port in tmgr"); - return; - } - - p.type = PORT_IN_TMGR; - - strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); - t0 += 2; } else if (strcmp(tokens[t0], "tap") == 0) { if (n_tokens < t0 + 6) { @@ -2482,7 +1205,6 @@ cmd_pipeline_port_in(struct pmd_internals *softnic, * bsz * link txq * | swq - * | tmgr * | tap * | sink [file pkts ] * | cryptodev txq offset @@ -2557,16 +1279,6 @@ cmd_pipeline_port_out(struct pmd_internals *softnic, p.type = PORT_OUT_SWQ; - strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); - } else if (strcmp(tokens[6], "tmgr") == 0) { - if (n_tokens != 8) { - snprintf(out, out_size, MSG_ARG_MISMATCH, - "pipeline port out tmgr"); - return; - } - - p.type = PORT_OUT_TMGR; - strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); } else if (strcmp(tokens[6], "tap") == 0) { if (n_tokens != 8) { @@ -6129,46 +4841,6 @@ softnic_cli_process(char *in, char *out, size_t out_size, void *arg) return; } - if (strcmp(tokens[0], "tmgr") == 0) { - if (n_tokens == 2) { - cmd_tmgr(softnic, tokens, n_tokens, out, out_size); - return; - } - - if (n_tokens >= 3 && - (strcmp(tokens[1], "shaper") == 0) && - (strcmp(tokens[2], "profile") == 0)) { - cmd_tmgr_shaper_profile(softnic, tokens, n_tokens, out, out_size); - return; - } - - if (n_tokens >= 3 && - (strcmp(tokens[1], "shared") == 0) && - (strcmp(tokens[2], "shaper") == 0)) { - cmd_tmgr_shared_shaper(softnic, tokens, n_tokens, out, out_size); - return; - } - - if (n_tokens >= 2 && - (strcmp(tokens[1], "node") == 0)) { - cmd_tmgr_node(softnic, tokens, n_tokens, out, out_size); - return; - } - - if (n_tokens >= 2 && - (strcmp(tokens[1], "hierarchy-default") == 0)) { - cmd_tmgr_hierarchy_default(softnic, tokens, n_tokens, out, out_size); - return; - } - - if (n_tokens >= 3 && - (strcmp(tokens[1], "hierarchy") == 0) && - (strcmp(tokens[2], "commit") == 0)) { - cmd_tmgr_hierarchy_commit(softnic, tokens, n_tokens, out, out_size); - return; - } - } - if (strcmp(tokens[0], "tap") == 0) { cmd_tap(softnic, tokens, n_tokens, out, out_size); return; diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h index 07285ca315..4cc98b7aad 100644 --- a/drivers/net/softnic/rte_eth_softnic_internals.h +++ b/drivers/net/softnic/rte_eth_softnic_internals.h @@ -13,14 +13,12 @@ #include #include #include -#include #include #include #include #include #include -#include #include #include @@ -40,12 +38,6 @@ struct pmd_params { uint16_t conn_port; uint32_t cpu_id; int sc; /**< Service cores. */ - - /** Traffic Management (TM) */ - struct { - uint32_t n_queues; /**< Number of queues */ - uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - } tm; }; /** @@ -161,134 +153,6 @@ struct softnic_link { TAILQ_HEAD(softnic_link_list, softnic_link); -/** - * TMGR - */ - -#ifndef TM_MAX_SUBPORTS -#define TM_MAX_SUBPORTS 8 -#endif - -#ifndef TM_MAX_PIPES_PER_SUBPORT -#define TM_MAX_PIPES_PER_SUBPORT 4096 -#endif - -#ifndef TM_MAX_PIPE_PROFILE -#define TM_MAX_PIPE_PROFILE 256 -#endif - -#ifndef TM_MAX_SUBPORT_PROFILE -#define TM_MAX_SUBPORT_PROFILE 256 -#endif - -struct tm_params { - struct rte_sched_port_params port_params; - struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS]; - struct rte_sched_subport_profile_params - subport_profile[TM_MAX_SUBPORT_PROFILE]; - uint32_t n_subport_profiles; - uint32_t subport_to_profile[TM_MAX_SUBPORT_PROFILE]; - struct rte_sched_pipe_params pipe_profiles[TM_MAX_PIPE_PROFILE]; - uint32_t n_pipe_profiles; - uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT]; -}; - -/* TM Levels */ -enum tm_node_level { - TM_NODE_LEVEL_PORT = 0, - TM_NODE_LEVEL_SUBPORT, - TM_NODE_LEVEL_PIPE, - TM_NODE_LEVEL_TC, - TM_NODE_LEVEL_QUEUE, - TM_NODE_LEVEL_MAX, -}; - -/* TM Shaper Profile */ -struct tm_shaper_profile { - TAILQ_ENTRY(tm_shaper_profile) node; - uint32_t shaper_profile_id; - uint32_t n_users; - struct rte_tm_shaper_params params; -}; - -TAILQ_HEAD(tm_shaper_profile_list, tm_shaper_profile); - -/* TM Shared Shaper */ -struct tm_shared_shaper { - TAILQ_ENTRY(tm_shared_shaper) node; - uint32_t shared_shaper_id; - uint32_t n_users; - uint32_t shaper_profile_id; -}; - -TAILQ_HEAD(tm_shared_shaper_list, tm_shared_shaper); - -/* TM WRED Profile */ -struct tm_wred_profile { - TAILQ_ENTRY(tm_wred_profile) node; - uint32_t wred_profile_id; - uint32_t n_users; - struct rte_tm_wred_params params; -}; - -TAILQ_HEAD(tm_wred_profile_list, tm_wred_profile); - -/* TM Node */ -struct tm_node { - TAILQ_ENTRY(tm_node) node; - uint32_t node_id; - uint32_t parent_node_id; - uint32_t priority; - uint32_t weight; - uint32_t level; - struct tm_node *parent_node; - struct tm_shaper_profile *shaper_profile; - struct tm_wred_profile *wred_profile; - struct rte_tm_node_params params; - struct rte_tm_node_stats stats; - uint32_t n_children; -}; - -TAILQ_HEAD(tm_node_list, tm_node); - -/* TM Hierarchy Specification */ -struct tm_hierarchy { - struct tm_shaper_profile_list shaper_profiles; - struct tm_shared_shaper_list shared_shapers; - struct tm_wred_profile_list wred_profiles; - struct tm_node_list nodes; - - uint32_t n_shaper_profiles; - uint32_t n_shared_shapers; - uint32_t n_wred_profiles; - uint32_t n_nodes; - - uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX]; -}; - -struct tm_internals { - /** Hierarchy specification - * - * -Hierarchy is unfrozen at init and when port is stopped. - * -Hierarchy is frozen on successful hierarchy commit. - * -Run-time hierarchy changes are not allowed, therefore it makes - * sense to keep the hierarchy frozen after the port is started. - */ - struct tm_hierarchy h; - int hierarchy_frozen; - - /** Blueprints */ - struct tm_params params; -}; - -struct softnic_tmgr_port { - TAILQ_ENTRY(softnic_tmgr_port) node; - char name[NAME_SIZE]; - struct rte_sched_port *s; -}; - -TAILQ_HEAD(softnic_tmgr_port_list, softnic_tmgr_port); - /** * TAP */ @@ -385,7 +249,6 @@ struct pipeline_params { enum softnic_port_in_type { PORT_IN_RXQ, PORT_IN_SWQ, - PORT_IN_TMGR, PORT_IN_TAP, PORT_IN_SOURCE, PORT_IN_CRYPTODEV, @@ -426,7 +289,6 @@ struct softnic_port_in_params { enum softnic_port_out_type { PORT_OUT_TXQ, PORT_OUT_SWQ, - PORT_OUT_TMGR, PORT_OUT_TAP, PORT_OUT_SINK, PORT_OUT_CRYPTODEV, @@ -619,10 +481,6 @@ struct pmd_internals { /** Params */ struct pmd_params params; - struct { - struct tm_internals tm; /**< Traffic Management */ - } soft; - struct flow_internals flow; struct mtr_internals mtr; @@ -630,7 +488,6 @@ struct pmd_internals { struct softnic_mempool_list mempool_list; struct softnic_swq_list swq_list; struct softnic_link_list link_list; - struct softnic_tmgr_port_list tmgr_port_list; struct softnic_tap_list tap_list; struct softnic_cryptodev_list cryptodev_list; struct softnic_port_in_action_profile_list port_in_action_profile_list; @@ -753,39 +610,6 @@ softnic_link_create(struct pmd_internals *p, const char *name, struct softnic_link_params *params); -/** - * TMGR - */ -int -softnic_tmgr_init(struct pmd_internals *p); - -void -softnic_tmgr_free(struct pmd_internals *p); - -struct softnic_tmgr_port * -softnic_tmgr_port_find(struct pmd_internals *p, - const char *name); - -struct softnic_tmgr_port * -softnic_tmgr_port_create(struct pmd_internals *p, - const char *name); - -void -tm_hierarchy_init(struct pmd_internals *p); - -void -tm_hierarchy_free(struct pmd_internals *p); - -static inline int -tm_used(struct rte_eth_dev *dev) -{ - struct pmd_internals *p = dev->data->dev_private; - - return p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT]; -} - -extern const struct rte_tm_ops pmd_tm_ops; - /** * TAP */ diff --git a/drivers/net/softnic/rte_eth_softnic_pipeline.c b/drivers/net/softnic/rte_eth_softnic_pipeline.c index 337aa32e57..c7d2a7de19 100644 --- a/drivers/net/softnic/rte_eth_softnic_pipeline.c +++ b/drivers/net/softnic/rte_eth_softnic_pipeline.c @@ -305,21 +305,6 @@ softnic_pipeline_port_in_create(struct pmd_internals *softnic, break; } - case PORT_IN_TMGR: - { - struct softnic_tmgr_port *tmgr_port; - - tmgr_port = softnic_tmgr_port_find(softnic, params->dev_name); - if (tmgr_port == NULL) - return -1; - - pp.sched.sched = tmgr_port->s; - - p.ops = &rte_port_sched_reader_ops; - p.arg_create = &pp.sched; - break; - } - case PORT_IN_TAP: { struct softnic_tap *tap; @@ -545,22 +530,6 @@ softnic_pipeline_port_out_create(struct pmd_internals *softnic, break; } - case PORT_OUT_TMGR: - { - struct softnic_tmgr_port *tmgr_port; - - tmgr_port = softnic_tmgr_port_find(softnic, params->dev_name); - if (tmgr_port == NULL) - return -1; - - pp.sched.sched = tmgr_port->s; - pp.sched.tx_burst_sz = params->burst_size; - - p.ops = &rte_port_sched_writer_ops; - p.arg_create = &pp.sched; - break; - } - case PORT_OUT_TAP: { struct softnic_tap *tap; diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c deleted file mode 100644 index 6a7766ba1c..0000000000 --- a/drivers/net/softnic/rte_eth_softnic_tm.c +++ /dev/null @@ -1,3657 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Intel Corporation - */ - -#include -#include -#include - -#include -#include - -#include "rte_eth_softnic_internals.h" -#include "rte_eth_softnic.h" - -#define SUBPORT_TC_PERIOD 10 -#define PIPE_TC_PERIOD 40 - -int -softnic_tmgr_init(struct pmd_internals *p) -{ - TAILQ_INIT(&p->tmgr_port_list); - - return 0; -} - -void -softnic_tmgr_free(struct pmd_internals *p) -{ - for ( ; ; ) { - struct softnic_tmgr_port *tmgr_port; - - tmgr_port = TAILQ_FIRST(&p->tmgr_port_list); - if (tmgr_port == NULL) - break; - - TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node); - rte_sched_port_free(tmgr_port->s); - free(tmgr_port); - } -} - -struct softnic_tmgr_port * -softnic_tmgr_port_find(struct pmd_internals *p, - const char *name) -{ - struct softnic_tmgr_port *tmgr_port; - - if (name == NULL) - return NULL; - - TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node) - if (strcmp(tmgr_port->name, name) == 0) - return tmgr_port; - - return NULL; -} - -struct softnic_tmgr_port * -softnic_tmgr_port_create(struct pmd_internals *p, - const char *name) -{ - struct softnic_tmgr_port *tmgr_port; - struct tm_params *t = &p->soft.tm.params; - struct rte_sched_port *sched; - uint32_t n_subports, subport_id; - - /* Check input params */ - if (name == NULL || - softnic_tmgr_port_find(p, name)) - return NULL; - - /* - * Resource - */ - - /* Is hierarchy frozen? */ - if (p->soft.tm.hierarchy_frozen == 0) - return NULL; - - /* Port */ - sched = rte_sched_port_config(&t->port_params); - if (sched == NULL) - return NULL; - - /* Subport */ - n_subports = t->port_params.n_subports_per_port; - for (subport_id = 0; subport_id < n_subports; subport_id++) { - uint32_t n_pipes_per_subport = - t->subport_params[subport_id].n_pipes_per_subport_enabled; - uint32_t pipe_id; - int status; - - status = rte_sched_subport_config(sched, - subport_id, - &t->subport_params[subport_id], - t->subport_to_profile[subport_id]); - if (status) { - rte_sched_port_free(sched); - return NULL; - } - - /* Pipe */ - for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) { - int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id; - int profile_id = t->pipe_to_profile[pos]; - - if (profile_id < 0) - continue; - - status = rte_sched_pipe_config(sched, - subport_id, - pipe_id, - profile_id); - if (status) { - rte_sched_port_free(sched); - return NULL; - } - } - } - - /* Node allocation */ - tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port)); - if (tmgr_port == NULL) { - rte_sched_port_free(sched); - return NULL; - } - - /* Node fill in */ - strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name)); - tmgr_port->s = sched; - - /* Node add to list */ - TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node); - - return tmgr_port; -} - -static struct rte_sched_port * -SCHED(struct pmd_internals *p) -{ - struct softnic_tmgr_port *tmgr_port; - - tmgr_port = softnic_tmgr_port_find(p, "TMGR"); - if (tmgr_port == NULL) - return NULL; - - return tmgr_port->s; -} - -void -tm_hierarchy_init(struct pmd_internals *p) -{ - memset(&p->soft.tm, 0, sizeof(p->soft.tm)); - - /* Initialize shaper profile list */ - TAILQ_INIT(&p->soft.tm.h.shaper_profiles); - - /* Initialize shared shaper list */ - TAILQ_INIT(&p->soft.tm.h.shared_shapers); - - /* Initialize wred profile list */ - TAILQ_INIT(&p->soft.tm.h.wred_profiles); - - /* Initialize TM node list */ - TAILQ_INIT(&p->soft.tm.h.nodes); -} - -void -tm_hierarchy_free(struct pmd_internals *p) -{ - /* Remove all nodes*/ - for ( ; ; ) { - struct tm_node *tm_node; - - tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes); - if (tm_node == NULL) - break; - - TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node); - free(tm_node); - } - - /* Remove all WRED profiles */ - for ( ; ; ) { - struct tm_wred_profile *wred_profile; - - wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles); - if (wred_profile == NULL) - break; - - TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node); - free(wred_profile); - } - - /* Remove all shared shapers */ - for ( ; ; ) { - struct tm_shared_shaper *shared_shaper; - - shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers); - if (shared_shaper == NULL) - break; - - TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node); - free(shared_shaper); - } - - /* Remove all shaper profiles */ - for ( ; ; ) { - struct tm_shaper_profile *shaper_profile; - - shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles); - if (shaper_profile == NULL) - break; - - TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, - shaper_profile, node); - free(shaper_profile); - } - - tm_hierarchy_init(p); -} - -static struct tm_shaper_profile * -tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles; - struct tm_shaper_profile *sp; - - TAILQ_FOREACH(sp, spl, node) - if (shaper_profile_id == sp->shaper_profile_id) - return sp; - - return NULL; -} - -static struct tm_shared_shaper * -tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers; - struct tm_shared_shaper *ss; - - TAILQ_FOREACH(ss, ssl, node) - if (shared_shaper_id == ss->shared_shaper_id) - return ss; - - return NULL; -} - -static struct tm_wred_profile * -tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles; - struct tm_wred_profile *wp; - - TAILQ_FOREACH(wp, wpl, node) - if (wred_profile_id == wp->wred_profile_id) - return wp; - - return NULL; -} - -static struct tm_node * -tm_node_search(struct rte_eth_dev *dev, uint32_t node_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_node_list *nl = &p->soft.tm.h.nodes; - struct tm_node *n; - - TAILQ_FOREACH(n, nl, node) - if (n->node_id == node_id) - return n; - - return NULL; -} - -static struct tm_node * -tm_root_node_present(struct rte_eth_dev *dev) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_node_list *nl = &p->soft.tm.h.nodes; - struct tm_node *n; - - TAILQ_FOREACH(n, nl, node) - if (n->parent_node_id == RTE_TM_NODE_ID_NULL) - return n; - - return NULL; -} - -static uint32_t -tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_node_list *nl = &p->soft.tm.h.nodes; - struct tm_node *ns; - uint32_t subport_id; - - subport_id = 0; - TAILQ_FOREACH(ns, nl, node) { - if (ns->level != TM_NODE_LEVEL_SUBPORT) - continue; - - if (ns->node_id == subport_node->node_id) - return subport_id; - - subport_id++; - } - - return UINT32_MAX; -} - -static uint32_t -tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_node_list *nl = &p->soft.tm.h.nodes; - struct tm_node *np; - uint32_t pipe_id; - - pipe_id = 0; - TAILQ_FOREACH(np, nl, node) { - if (np->level != TM_NODE_LEVEL_PIPE || - np->parent_node_id != pipe_node->parent_node_id) - continue; - - if (np->node_id == pipe_node->node_id) - return pipe_id; - - pipe_id++; - } - - return UINT32_MAX; -} - -static uint32_t -tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node) -{ - return tc_node->priority; -} - -static uint32_t -tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_node_list *nl = &p->soft.tm.h.nodes; - struct tm_node *nq; - uint32_t queue_id; - - queue_id = 0; - TAILQ_FOREACH(nq, nl, node) { - if (nq->level != TM_NODE_LEVEL_QUEUE || - nq->parent_node_id != queue_node->parent_node_id) - continue; - - if (nq->node_id == queue_node->node_id) - return queue_id; - - queue_id++; - } - - return UINT32_MAX; -} - -static uint32_t -tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level) -{ - struct pmd_internals *p = dev->data->dev_private; - uint32_t n_queues_max = p->params.tm.n_queues; - uint32_t n_tc_max = - (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) - / RTE_SCHED_QUEUES_PER_PIPE; - uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; - uint32_t n_subports_max = n_pipes_max; - uint32_t n_root_max = 1; - - switch (level) { - case TM_NODE_LEVEL_PORT: - return n_root_max; - case TM_NODE_LEVEL_SUBPORT: - return n_subports_max; - case TM_NODE_LEVEL_PIPE: - return n_pipes_max; - case TM_NODE_LEVEL_TC: - return n_tc_max; - case TM_NODE_LEVEL_QUEUE: - default: - return n_queues_max; - } -} - -/* Traffic manager node type get */ -static int -pmd_tm_node_type_get(struct rte_eth_dev *dev, - uint32_t node_id, - int *is_leaf, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - - if (is_leaf == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - - if (node_id == RTE_TM_NODE_ID_NULL || - (tm_node_search(dev, node_id) == NULL)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - *is_leaf = node_id < p->params.tm.n_queues; - - return 0; -} - -#ifdef RTE_SCHED_CMAN -#define WRED_SUPPORTED 1 -#else -#define WRED_SUPPORTED 0 -#endif - -#define STATS_MASK_DEFAULT \ - (RTE_TM_STATS_N_PKTS | \ - RTE_TM_STATS_N_BYTES | \ - RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \ - RTE_TM_STATS_N_BYTES_GREEN_DROPPED) - -#define STATS_MASK_QUEUE \ - (STATS_MASK_DEFAULT | \ - RTE_TM_STATS_N_PKTS_QUEUED) - -static const struct rte_tm_capabilities tm_cap = { - .n_nodes_max = UINT32_MAX, - .n_levels_max = TM_NODE_LEVEL_MAX, - - .non_leaf_nodes_identical = 0, - .leaf_nodes_identical = 1, - - .shaper_n_max = UINT32_MAX, - .shaper_private_n_max = UINT32_MAX, - .shaper_private_dual_rate_n_max = 0, - .shaper_private_rate_min = 1, - .shaper_private_rate_max = UINT32_MAX, - .shaper_private_packet_mode_supported = 0, - .shaper_private_byte_mode_supported = 1, - - .shaper_shared_n_max = UINT32_MAX, - .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX, - .shaper_shared_n_shapers_per_node_max = 1, - .shaper_shared_dual_rate_n_max = 0, - .shaper_shared_rate_min = 1, - .shaper_shared_rate_max = UINT32_MAX, - .shaper_shared_packet_mode_supported = 0, - .shaper_shared_byte_mode_supported = 1, - - .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS, - .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS, - - .sched_n_children_max = UINT32_MAX, - .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, - .sched_wfq_n_children_per_group_max = UINT32_MAX, - .sched_wfq_n_groups_max = 1, - .sched_wfq_weight_max = UINT32_MAX, - .sched_wfq_packet_mode_supported = 0, - .sched_wfq_byte_mode_supported = 1, - - .cman_wred_packet_mode_supported = WRED_SUPPORTED, - .cman_wred_byte_mode_supported = 0, - .cman_head_drop_supported = 0, - .cman_wred_context_n_max = 0, - .cman_wred_context_private_n_max = 0, - .cman_wred_context_shared_n_max = 0, - .cman_wred_context_shared_n_nodes_per_context_max = 0, - .cman_wred_context_shared_n_contexts_per_node_max = 0, - - .mark_vlan_dei_supported = {0, 0, 0}, - .mark_ip_ecn_tcp_supported = {0, 0, 0}, - .mark_ip_ecn_sctp_supported = {0, 0, 0}, - .mark_ip_dscp_supported = {0, 0, 0}, - - .dynamic_update_mask = 0, - - .stats_mask = STATS_MASK_QUEUE, -}; - -/* Traffic manager capabilities get */ -static int -pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused, - struct rte_tm_capabilities *cap, - struct rte_tm_error *error) -{ - if (cap == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_CAPABILITIES, - NULL, - rte_strerror(EINVAL)); - - memcpy(cap, &tm_cap, sizeof(*cap)); - - cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) + - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) + - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) + - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) + - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE); - - cap->shaper_private_n_max = - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) + - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) + - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) + - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC); - - cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT); - - cap->shaper_n_max = cap->shaper_private_n_max + - cap->shaper_shared_n_max; - - cap->shaper_shared_n_nodes_per_shaper_max = - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE); - - cap->sched_n_children_max = RTE_MAX( - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE), - (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE); - - cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max; - - if (WRED_SUPPORTED) - cap->cman_wred_context_private_n_max = - tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE); - - cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max + - cap->cman_wred_context_shared_n_max; - - return 0; -} - -static const struct rte_tm_level_capabilities tm_level_cap[] = { - [TM_NODE_LEVEL_PORT] = { - .n_nodes_max = 1, - .n_nodes_nonleaf_max = 1, - .n_nodes_leaf_max = 0, - .non_leaf_nodes_identical = 1, - .leaf_nodes_identical = 0, - - {.nonleaf = { - .shaper_private_supported = 1, - .shaper_private_dual_rate_supported = 0, - .shaper_private_rate_min = 1, - .shaper_private_rate_max = UINT32_MAX, - .shaper_private_packet_mode_supported = 0, - .shaper_private_byte_mode_supported = 1, - .shaper_shared_n_max = 0, - .shaper_shared_packet_mode_supported = 0, - .shaper_shared_byte_mode_supported = 0, - - .sched_n_children_max = UINT32_MAX, - .sched_sp_n_priorities_max = 1, - .sched_wfq_n_children_per_group_max = UINT32_MAX, - .sched_wfq_n_groups_max = 1, - .sched_wfq_weight_max = 1, - .sched_wfq_packet_mode_supported = 0, - .sched_wfq_byte_mode_supported = 1, - - .stats_mask = STATS_MASK_DEFAULT, - } }, - }, - - [TM_NODE_LEVEL_SUBPORT] = { - .n_nodes_max = UINT32_MAX, - .n_nodes_nonleaf_max = UINT32_MAX, - .n_nodes_leaf_max = 0, - .non_leaf_nodes_identical = 1, - .leaf_nodes_identical = 0, - - {.nonleaf = { - .shaper_private_supported = 1, - .shaper_private_dual_rate_supported = 0, - .shaper_private_rate_min = 1, - .shaper_private_rate_max = UINT32_MAX, - .shaper_private_packet_mode_supported = 0, - .shaper_private_byte_mode_supported = 1, - .shaper_shared_n_max = 0, - .shaper_shared_packet_mode_supported = 0, - .shaper_shared_byte_mode_supported = 0, - - .sched_n_children_max = UINT32_MAX, - .sched_sp_n_priorities_max = 1, - .sched_wfq_n_children_per_group_max = UINT32_MAX, - .sched_wfq_n_groups_max = 1, - .sched_wfq_weight_max = UINT32_MAX, - .sched_wfq_packet_mode_supported = 0, - .sched_wfq_byte_mode_supported = 1, - - .stats_mask = STATS_MASK_DEFAULT, - } }, - }, - - [TM_NODE_LEVEL_PIPE] = { - .n_nodes_max = UINT32_MAX, - .n_nodes_nonleaf_max = UINT32_MAX, - .n_nodes_leaf_max = 0, - .non_leaf_nodes_identical = 1, - .leaf_nodes_identical = 0, - - {.nonleaf = { - .shaper_private_supported = 1, - .shaper_private_dual_rate_supported = 0, - .shaper_private_rate_min = 1, - .shaper_private_rate_max = UINT32_MAX, - .shaper_private_packet_mode_supported = 0, - .shaper_private_byte_mode_supported = 1, - .shaper_shared_n_max = 0, - .shaper_shared_packet_mode_supported = 0, - .shaper_shared_byte_mode_supported = 0, - - .sched_n_children_max = - RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, - .sched_sp_n_priorities_max = - RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, - .sched_wfq_n_children_per_group_max = 1, - .sched_wfq_n_groups_max = 0, - .sched_wfq_weight_max = 1, - .sched_wfq_packet_mode_supported = 0, - .sched_wfq_byte_mode_supported = 0, - - .stats_mask = STATS_MASK_DEFAULT, - } }, - }, - - [TM_NODE_LEVEL_TC] = { - .n_nodes_max = UINT32_MAX, - .n_nodes_nonleaf_max = UINT32_MAX, - .n_nodes_leaf_max = 0, - .non_leaf_nodes_identical = 1, - .leaf_nodes_identical = 0, - - {.nonleaf = { - .shaper_private_supported = 1, - .shaper_private_dual_rate_supported = 0, - .shaper_private_rate_min = 1, - .shaper_private_rate_max = UINT32_MAX, - .shaper_private_packet_mode_supported = 0, - .shaper_private_byte_mode_supported = 1, - .shaper_shared_n_max = 1, - .shaper_shared_packet_mode_supported = 0, - .shaper_shared_byte_mode_supported = 1, - - .sched_n_children_max = - RTE_SCHED_BE_QUEUES_PER_PIPE, - .sched_sp_n_priorities_max = 1, - .sched_wfq_n_children_per_group_max = - RTE_SCHED_BE_QUEUES_PER_PIPE, - .sched_wfq_n_groups_max = 1, - .sched_wfq_weight_max = UINT32_MAX, - .sched_wfq_packet_mode_supported = 0, - .sched_wfq_byte_mode_supported = 1, - - .stats_mask = STATS_MASK_DEFAULT, - } }, - }, - - [TM_NODE_LEVEL_QUEUE] = { - .n_nodes_max = UINT32_MAX, - .n_nodes_nonleaf_max = 0, - .n_nodes_leaf_max = UINT32_MAX, - .non_leaf_nodes_identical = 0, - .leaf_nodes_identical = 1, - - {.leaf = { - .shaper_private_supported = 0, - .shaper_private_dual_rate_supported = 0, - .shaper_private_rate_min = 0, - .shaper_private_rate_max = 0, - .shaper_private_packet_mode_supported = 0, - .shaper_private_byte_mode_supported = 0, - .shaper_shared_n_max = 0, - .shaper_shared_packet_mode_supported = 0, - .shaper_shared_byte_mode_supported = 0, - - .cman_head_drop_supported = 0, - .cman_wred_packet_mode_supported = WRED_SUPPORTED, - .cman_wred_byte_mode_supported = 0, - .cman_wred_context_private_supported = WRED_SUPPORTED, - .cman_wred_context_shared_n_max = 0, - - .stats_mask = STATS_MASK_QUEUE, - } }, - }, -}; - -/* Traffic manager level capabilities get */ -static int -pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused, - uint32_t level_id, - struct rte_tm_level_capabilities *cap, - struct rte_tm_error *error) -{ - if (cap == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_CAPABILITIES, - NULL, - rte_strerror(EINVAL)); - - if (level_id >= TM_NODE_LEVEL_MAX) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_LEVEL_ID, - NULL, - rte_strerror(EINVAL)); - - memcpy(cap, &tm_level_cap[level_id], sizeof(*cap)); - - switch (level_id) { - case TM_NODE_LEVEL_PORT: - cap->nonleaf.sched_n_children_max = - tm_level_get_max_nodes(dev, - TM_NODE_LEVEL_SUBPORT); - cap->nonleaf.sched_wfq_n_children_per_group_max = - cap->nonleaf.sched_n_children_max; - break; - - case TM_NODE_LEVEL_SUBPORT: - cap->n_nodes_max = tm_level_get_max_nodes(dev, - TM_NODE_LEVEL_SUBPORT); - cap->n_nodes_nonleaf_max = cap->n_nodes_max; - cap->nonleaf.sched_n_children_max = - tm_level_get_max_nodes(dev, - TM_NODE_LEVEL_PIPE); - cap->nonleaf.sched_wfq_n_children_per_group_max = - cap->nonleaf.sched_n_children_max; - break; - - case TM_NODE_LEVEL_PIPE: - cap->n_nodes_max = tm_level_get_max_nodes(dev, - TM_NODE_LEVEL_PIPE); - cap->n_nodes_nonleaf_max = cap->n_nodes_max; - break; - - case TM_NODE_LEVEL_TC: - cap->n_nodes_max = tm_level_get_max_nodes(dev, - TM_NODE_LEVEL_TC); - cap->n_nodes_nonleaf_max = cap->n_nodes_max; - break; - - case TM_NODE_LEVEL_QUEUE: - default: - cap->n_nodes_max = tm_level_get_max_nodes(dev, - TM_NODE_LEVEL_QUEUE); - cap->n_nodes_leaf_max = cap->n_nodes_max; - break; - } - - return 0; -} - -static const struct rte_tm_node_capabilities tm_node_cap[] = { - [TM_NODE_LEVEL_PORT] = { - .shaper_private_supported = 1, - .shaper_private_dual_rate_supported = 0, - .shaper_private_rate_min = 1, - .shaper_private_rate_max = UINT32_MAX, - .shaper_private_packet_mode_supported = 0, - .shaper_private_byte_mode_supported = 1, - .shaper_shared_n_max = 0, - .shaper_shared_packet_mode_supported = 0, - .shaper_shared_byte_mode_supported = 0, - - {.nonleaf = { - .sched_n_children_max = UINT32_MAX, - .sched_sp_n_priorities_max = 1, - .sched_wfq_n_children_per_group_max = UINT32_MAX, - .sched_wfq_n_groups_max = 1, - .sched_wfq_weight_max = 1, - .sched_wfq_packet_mode_supported = 0, - .sched_wfq_byte_mode_supported = 1, - } }, - - .stats_mask = STATS_MASK_DEFAULT, - }, - - [TM_NODE_LEVEL_SUBPORT] = { - .shaper_private_supported = 1, - .shaper_private_dual_rate_supported = 0, - .shaper_private_rate_min = 1, - .shaper_private_rate_max = UINT32_MAX, - .shaper_private_packet_mode_supported = 0, - .shaper_private_byte_mode_supported = 1, - .shaper_shared_n_max = 0, - .shaper_shared_packet_mode_supported = 0, - .shaper_shared_byte_mode_supported = 0, - - {.nonleaf = { - .sched_n_children_max = UINT32_MAX, - .sched_sp_n_priorities_max = 1, - .sched_wfq_n_children_per_group_max = UINT32_MAX, - .sched_wfq_n_groups_max = 1, - .sched_wfq_weight_max = UINT32_MAX, - .sched_wfq_packet_mode_supported = 0, - .sched_wfq_byte_mode_supported = 1, - } }, - - .stats_mask = STATS_MASK_DEFAULT, - }, - - [TM_NODE_LEVEL_PIPE] = { - .shaper_private_supported = 1, - .shaper_private_dual_rate_supported = 0, - .shaper_private_rate_min = 1, - .shaper_private_rate_max = UINT32_MAX, - .shaper_private_packet_mode_supported = 0, - .shaper_private_byte_mode_supported = 1, - .shaper_shared_n_max = 0, - .shaper_shared_packet_mode_supported = 0, - .shaper_shared_byte_mode_supported = 0, - - {.nonleaf = { - .sched_n_children_max = - RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, - .sched_sp_n_priorities_max = - RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, - .sched_wfq_n_children_per_group_max = 1, - .sched_wfq_n_groups_max = 0, - .sched_wfq_weight_max = 1, - .sched_wfq_packet_mode_supported = 0, - .sched_wfq_byte_mode_supported = 0, - } }, - - .stats_mask = STATS_MASK_DEFAULT, - }, - - [TM_NODE_LEVEL_TC] = { - .shaper_private_supported = 1, - .shaper_private_dual_rate_supported = 0, - .shaper_private_rate_min = 1, - .shaper_private_rate_max = UINT32_MAX, - .shaper_private_packet_mode_supported = 0, - .shaper_private_byte_mode_supported = 1, - .shaper_shared_n_max = 1, - .shaper_shared_packet_mode_supported = 0, - .shaper_shared_byte_mode_supported = 1, - - {.nonleaf = { - .sched_n_children_max = - RTE_SCHED_BE_QUEUES_PER_PIPE, - .sched_sp_n_priorities_max = 1, - .sched_wfq_n_children_per_group_max = - RTE_SCHED_BE_QUEUES_PER_PIPE, - .sched_wfq_n_groups_max = 1, - .sched_wfq_weight_max = UINT32_MAX, - .sched_wfq_packet_mode_supported = 0, - .sched_wfq_byte_mode_supported = 1, - } }, - - .stats_mask = STATS_MASK_DEFAULT, - }, - - [TM_NODE_LEVEL_QUEUE] = { - .shaper_private_supported = 0, - .shaper_private_dual_rate_supported = 0, - .shaper_private_rate_min = 0, - .shaper_private_rate_max = 0, - .shaper_private_packet_mode_supported = 0, - .shaper_private_byte_mode_supported = 0, - .shaper_shared_n_max = 0, - .shaper_shared_packet_mode_supported = 0, - .shaper_shared_byte_mode_supported = 0, - - - {.leaf = { - .cman_head_drop_supported = 0, - .cman_wred_packet_mode_supported = WRED_SUPPORTED, - .cman_wred_byte_mode_supported = 0, - .cman_wred_context_private_supported = WRED_SUPPORTED, - .cman_wred_context_shared_n_max = 0, - } }, - - .stats_mask = STATS_MASK_QUEUE, - }, -}; - -/* Traffic manager node capabilities get */ -static int -pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused, - uint32_t node_id, - struct rte_tm_node_capabilities *cap, - struct rte_tm_error *error) -{ - struct tm_node *tm_node; - - if (cap == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_CAPABILITIES, - NULL, - rte_strerror(EINVAL)); - - tm_node = tm_node_search(dev, node_id); - if (tm_node == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap)); - - switch (tm_node->level) { - case TM_NODE_LEVEL_PORT: - cap->nonleaf.sched_n_children_max = - tm_level_get_max_nodes(dev, - TM_NODE_LEVEL_SUBPORT); - cap->nonleaf.sched_wfq_n_children_per_group_max = - cap->nonleaf.sched_n_children_max; - break; - - case TM_NODE_LEVEL_SUBPORT: - cap->nonleaf.sched_n_children_max = - tm_level_get_max_nodes(dev, - TM_NODE_LEVEL_PIPE); - cap->nonleaf.sched_wfq_n_children_per_group_max = - cap->nonleaf.sched_n_children_max; - break; - - case TM_NODE_LEVEL_PIPE: - case TM_NODE_LEVEL_TC: - case TM_NODE_LEVEL_QUEUE: - default: - break; - } - - return 0; -} - -static int -shaper_profile_check(struct rte_eth_dev *dev, - uint32_t shaper_profile_id, - struct rte_tm_shaper_params *profile, - struct rte_tm_error *error) -{ - struct tm_shaper_profile *sp; - - /* Shaper profile ID must not be NONE. */ - if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Shaper profile must not exist. */ - sp = tm_shaper_profile_search(dev, shaper_profile_id); - if (sp) - return -rte_tm_error_set(error, - EEXIST, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, - NULL, - rte_strerror(EEXIST)); - - /* Profile must not be NULL. */ - if (profile == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE, - NULL, - rte_strerror(EINVAL)); - - /* Peak rate: non-zero, 32-bit */ - if (profile->peak.rate == 0 || - profile->peak.rate >= UINT32_MAX) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE, - NULL, - rte_strerror(EINVAL)); - - /* Peak size: non-zero, 32-bit */ - if (profile->peak.size == 0 || - profile->peak.size >= UINT32_MAX) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE, - NULL, - rte_strerror(EINVAL)); - - /* Dual-rate profiles are not supported. */ - if (profile->committed.rate != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE, - NULL, - rte_strerror(EINVAL)); - - /* Packet length adjust: 24 bytes */ - if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN, - NULL, - rte_strerror(EINVAL)); - - /* Packet mode is not supported. */ - if (profile->packet_mode != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PACKET_MODE, - NULL, - rte_strerror(EINVAL)); - return 0; -} - -/* Traffic manager shaper profile add */ -static int -pmd_tm_shaper_profile_add(struct rte_eth_dev *dev, - uint32_t shaper_profile_id, - struct rte_tm_shaper_params *profile, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles; - struct tm_shaper_profile *sp; - int status; - - /* Check input params */ - status = shaper_profile_check(dev, shaper_profile_id, profile, error); - if (status) - return status; - - /* Memory allocation */ - sp = calloc(1, sizeof(struct tm_shaper_profile)); - if (sp == NULL) - return -rte_tm_error_set(error, - ENOMEM, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(ENOMEM)); - - /* Fill in */ - sp->shaper_profile_id = shaper_profile_id; - memcpy(&sp->params, profile, sizeof(sp->params)); - - /* Add to list */ - TAILQ_INSERT_TAIL(spl, sp, node); - p->soft.tm.h.n_shaper_profiles++; - - return 0; -} - -/* Traffic manager shaper profile delete */ -static int -pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev, - uint32_t shaper_profile_id, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_shaper_profile *sp; - - /* Check existing */ - sp = tm_shaper_profile_search(dev, shaper_profile_id); - if (sp == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Check unused */ - if (sp->n_users) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, - NULL, - rte_strerror(EBUSY)); - - /* Remove from list */ - TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node); - p->soft.tm.h.n_shaper_profiles--; - free(sp); - - return 0; -} - -static struct tm_node * -tm_shared_shaper_get_tc(struct rte_eth_dev *dev, - struct tm_shared_shaper *ss) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_node_list *nl = &p->soft.tm.h.nodes; - struct tm_node *n; - - /* Subport: each TC uses shared shaper */ - TAILQ_FOREACH(n, nl, node) { - if (n->level != TM_NODE_LEVEL_TC || - n->params.n_shared_shapers == 0 || - n->params.shared_shaper_id[0] != ss->shared_shaper_id) - continue; - - return n; - } - - return NULL; -} - -static int -subport_profile_exists(struct rte_eth_dev *dev, - struct rte_sched_subport_profile_params *sp, - uint32_t *subport_profile_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_params *t = &p->soft.tm.params; - uint32_t i; - - for (i = 0; i < t->n_subport_profiles; i++) - if (memcmp(&t->subport_profile[i], sp, sizeof(*sp)) == 0) { - if (subport_profile_id) - *subport_profile_id = i; - return 1; - } - - return 0; -} - -static int -update_subport_tc_rate(struct rte_eth_dev *dev, - struct tm_node *nt, - struct tm_shared_shaper *ss, - struct tm_shaper_profile *sp_new) -{ - struct rte_sched_subport_profile_params subport_profile; - struct pmd_internals *p = dev->data->dev_private; - uint32_t tc_id = tm_node_tc_id(dev, nt); - struct tm_node *np = nt->parent_node; - struct tm_node *ns = np->parent_node; - uint32_t subport_id = tm_node_subport_id(dev, ns); - struct tm_params *t = &p->soft.tm.params; - uint32_t subport_profile_id; - struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev, - ss->shaper_profile_id); - - if (subport_id >= TM_MAX_SUBPORT_PROFILE) - return -1; - - subport_profile_id = t->subport_to_profile[subport_id]; - - /* Derive new subport configuration. */ - memcpy(&subport_profile, - &p->soft.tm.params.subport_profile[subport_profile_id], - sizeof(subport_profile)); - subport_profile.tc_rate[tc_id] = sp_new->params.peak.rate; - - /* Update the subport configuration. */ - if (rte_sched_subport_config(SCHED(p), - subport_id, NULL, subport_profile_id)) - return -1; - - /* Commit changes. */ - sp_old->n_users--; - - ss->shaper_profile_id = sp_new->shaper_profile_id; - sp_new->n_users++; - - memcpy(&p->soft.tm.params.subport_profile[subport_profile_id], - &subport_profile, - sizeof(subport_profile)); - - return 0; -} - -/* Traffic manager shared shaper add/update */ -static int -pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev, - uint32_t shared_shaper_id, - uint32_t shaper_profile_id, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_shared_shaper *ss; - struct tm_shaper_profile *sp; - struct tm_node *nt; - - /* Shaper profile must be valid. */ - sp = tm_shaper_profile_search(dev, shaper_profile_id); - if (sp == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, - NULL, - rte_strerror(EINVAL)); - - /** - * Add new shared shaper - */ - ss = tm_shared_shaper_search(dev, shared_shaper_id); - if (ss == NULL) { - struct tm_shared_shaper_list *ssl = - &p->soft.tm.h.shared_shapers; - - /* Hierarchy must not be frozen */ - if (p->soft.tm.hierarchy_frozen) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EBUSY)); - - /* Memory allocation */ - ss = calloc(1, sizeof(struct tm_shared_shaper)); - if (ss == NULL) - return -rte_tm_error_set(error, - ENOMEM, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(ENOMEM)); - - /* Fill in */ - ss->shared_shaper_id = shared_shaper_id; - ss->shaper_profile_id = shaper_profile_id; - - /* Add to list */ - TAILQ_INSERT_TAIL(ssl, ss, node); - p->soft.tm.h.n_shared_shapers++; - - return 0; - } - - /** - * Update existing shared shaper - */ - /* Hierarchy must be frozen (run-time update) */ - if (p->soft.tm.hierarchy_frozen == 0) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EBUSY)); - - - /* Propagate change. */ - nt = tm_shared_shaper_get_tc(dev, ss); - if (update_subport_tc_rate(dev, nt, ss, sp)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - - return 0; -} - -/* Traffic manager shared shaper delete */ -static int -pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev, - uint32_t shared_shaper_id, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_shared_shaper *ss; - - /* Check existing */ - ss = tm_shared_shaper_search(dev, shared_shaper_id); - if (ss == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID, - NULL, - rte_strerror(EINVAL)); - - /* Check unused */ - if (ss->n_users) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID, - NULL, - rte_strerror(EBUSY)); - - /* Remove from list */ - TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node); - p->soft.tm.h.n_shared_shapers--; - free(ss); - - return 0; -} - -static int -wred_profile_check(struct rte_eth_dev *dev, - uint32_t wred_profile_id, - struct rte_tm_wred_params *profile, - struct rte_tm_error *error) -{ - struct tm_wred_profile *wp; - enum rte_color color; - - /* WRED profile ID must not be NONE. */ - if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_WRED_PROFILE_ID, - NULL, - rte_strerror(EINVAL)); - - /* WRED profile must not exist. */ - wp = tm_wred_profile_search(dev, wred_profile_id); - if (wp) - return -rte_tm_error_set(error, - EEXIST, - RTE_TM_ERROR_TYPE_WRED_PROFILE_ID, - NULL, - rte_strerror(EEXIST)); - - /* Profile must not be NULL. */ - if (profile == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_WRED_PROFILE, - NULL, - rte_strerror(EINVAL)); - - /* WRED profile should be in packet mode */ - if (profile->packet_mode == 0) - return -rte_tm_error_set(error, - ENOTSUP, - RTE_TM_ERROR_TYPE_WRED_PROFILE, - NULL, - rte_strerror(ENOTSUP)); - - /* min_th <= max_th, max_th > 0 */ - for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) { - uint32_t min_th = profile->red_params[color].min_th; - uint32_t max_th = profile->red_params[color].max_th; - - if (min_th > max_th || - max_th == 0 || - min_th > UINT16_MAX || - max_th > UINT16_MAX) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_WRED_PROFILE, - NULL, - rte_strerror(EINVAL)); - } - - return 0; -} - -/* Traffic manager WRED profile add */ -static int -pmd_tm_wred_profile_add(struct rte_eth_dev *dev, - uint32_t wred_profile_id, - struct rte_tm_wred_params *profile, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles; - struct tm_wred_profile *wp; - int status; - - /* Check input params */ - status = wred_profile_check(dev, wred_profile_id, profile, error); - if (status) - return status; - - /* Memory allocation */ - wp = calloc(1, sizeof(struct tm_wred_profile)); - if (wp == NULL) - return -rte_tm_error_set(error, - ENOMEM, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(ENOMEM)); - - /* Fill in */ - wp->wred_profile_id = wred_profile_id; - memcpy(&wp->params, profile, sizeof(wp->params)); - - /* Add to list */ - TAILQ_INSERT_TAIL(wpl, wp, node); - p->soft.tm.h.n_wred_profiles++; - - return 0; -} - -/* Traffic manager WRED profile delete */ -static int -pmd_tm_wred_profile_delete(struct rte_eth_dev *dev, - uint32_t wred_profile_id, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_wred_profile *wp; - - /* Check existing */ - wp = tm_wred_profile_search(dev, wred_profile_id); - if (wp == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_WRED_PROFILE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Check unused */ - if (wp->n_users) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_WRED_PROFILE_ID, - NULL, - rte_strerror(EBUSY)); - - /* Remove from list */ - TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node); - p->soft.tm.h.n_wred_profiles--; - free(wp); - - return 0; -} - -static int -node_add_check_port(struct rte_eth_dev *dev, - uint32_t node_id, - uint32_t parent_node_id __rte_unused, - uint32_t priority, - uint32_t weight, - uint32_t level_id __rte_unused, - struct rte_tm_node_params *params, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_shaper_profile *sp = tm_shaper_profile_search(dev, - params->shaper_profile_id); - - /* node type: non-leaf */ - if (node_id < p->params.tm.n_queues) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Priority must be 0 */ - if (priority != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PRIORITY, - NULL, - rte_strerror(EINVAL)); - - /* Weight must be 1 */ - if (weight != 1) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_WEIGHT, - NULL, - rte_strerror(EINVAL)); - - /* Shaper must be valid */ - if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE || - sp == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID, - NULL, - rte_strerror(EINVAL)); - - /* No shared shapers */ - if (params->n_shared_shapers != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, - NULL, - rte_strerror(EINVAL)); - - /* Number of SP priorities must be 1 */ - if (params->nonleaf.n_sp_priorities != 1) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES, - NULL, - rte_strerror(EINVAL)); - - /* Stats */ - if (params->stats_mask & ~STATS_MASK_DEFAULT) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, - NULL, - rte_strerror(EINVAL)); - - return 0; -} - -static int -node_add_check_subport(struct rte_eth_dev *dev, - uint32_t node_id, - uint32_t parent_node_id __rte_unused, - uint32_t priority, - uint32_t weight, - uint32_t level_id __rte_unused, - struct rte_tm_node_params *params, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - - /* node type: non-leaf */ - if (node_id < p->params.tm.n_queues) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Priority must be 0 */ - if (priority != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PRIORITY, - NULL, - rte_strerror(EINVAL)); - - /* Weight must be 1 */ - if (weight != 1) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_WEIGHT, - NULL, - rte_strerror(EINVAL)); - - /* Shaper must be valid */ - if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE || - (!tm_shaper_profile_search(dev, params->shaper_profile_id))) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID, - NULL, - rte_strerror(EINVAL)); - - /* No shared shapers */ - if (params->n_shared_shapers != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, - NULL, - rte_strerror(EINVAL)); - - /* Number of SP priorities must be 1 */ - if (params->nonleaf.n_sp_priorities != 1) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES, - NULL, - rte_strerror(EINVAL)); - - /* Stats */ - if (params->stats_mask & ~STATS_MASK_DEFAULT) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, - NULL, - rte_strerror(EINVAL)); - - return 0; -} - -static int -node_add_check_pipe(struct rte_eth_dev *dev, - uint32_t node_id, - uint32_t parent_node_id __rte_unused, - uint32_t priority, - uint32_t weight __rte_unused, - uint32_t level_id __rte_unused, - struct rte_tm_node_params *params, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - - /* node type: non-leaf */ - if (node_id < p->params.tm.n_queues) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Priority must be 0 */ - if (priority != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PRIORITY, - NULL, - rte_strerror(EINVAL)); - - /* Shaper must be valid */ - if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE || - (!tm_shaper_profile_search(dev, params->shaper_profile_id))) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID, - NULL, - rte_strerror(EINVAL)); - - /* No shared shapers */ - if (params->n_shared_shapers != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, - NULL, - rte_strerror(EINVAL)); - - /* Number of SP priorities must be 4 */ - if (params->nonleaf.n_sp_priorities != - RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES, - NULL, - rte_strerror(EINVAL)); - - /* WFQ mode must be byte mode */ - if (params->nonleaf.wfq_weight_mode != NULL && - params->nonleaf.wfq_weight_mode[0] != 0 && - params->nonleaf.wfq_weight_mode[1] != 0 && - params->nonleaf.wfq_weight_mode[2] != 0 && - params->nonleaf.wfq_weight_mode[3] != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE, - NULL, - rte_strerror(EINVAL)); - - /* Stats */ - if (params->stats_mask & ~STATS_MASK_DEFAULT) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, - NULL, - rte_strerror(EINVAL)); - - return 0; -} - -static int -node_add_check_tc(struct rte_eth_dev *dev, - uint32_t node_id, - uint32_t parent_node_id __rte_unused, - uint32_t priority __rte_unused, - uint32_t weight, - uint32_t level_id __rte_unused, - struct rte_tm_node_params *params, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - - /* node type: non-leaf */ - if (node_id < p->params.tm.n_queues) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Weight must be 1 */ - if (weight != 1) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_WEIGHT, - NULL, - rte_strerror(EINVAL)); - - /* Shaper must be valid */ - if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE || - (!tm_shaper_profile_search(dev, params->shaper_profile_id))) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Single valid shared shaper */ - if (params->n_shared_shapers > 1) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, - NULL, - rte_strerror(EINVAL)); - - if (params->n_shared_shapers == 1 && - (params->shared_shaper_id == NULL || - (!tm_shared_shaper_search(dev, params->shared_shaper_id[0])))) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID, - NULL, - rte_strerror(EINVAL)); - - /* Number of priorities must be 1 */ - if (params->nonleaf.n_sp_priorities != 1) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES, - NULL, - rte_strerror(EINVAL)); - - /* Stats */ - if (params->stats_mask & ~STATS_MASK_DEFAULT) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, - NULL, - rte_strerror(EINVAL)); - - return 0; -} - -static int -node_add_check_queue(struct rte_eth_dev *dev, - uint32_t node_id, - uint32_t parent_node_id __rte_unused, - uint32_t priority, - uint32_t weight __rte_unused, - uint32_t level_id __rte_unused, - struct rte_tm_node_params *params, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - - /* node type: leaf */ - if (node_id >= p->params.tm.n_queues) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Priority must be 0 */ - if (priority != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PRIORITY, - NULL, - rte_strerror(EINVAL)); - - /* No shaper */ - if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID, - NULL, - rte_strerror(EINVAL)); - - /* No shared shapers */ - if (params->n_shared_shapers != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, - NULL, - rte_strerror(EINVAL)); - - /* Congestion management must not be head drop */ - if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN, - NULL, - rte_strerror(EINVAL)); - - /* Congestion management set to WRED */ - if (params->leaf.cman == RTE_TM_CMAN_WRED) { - uint32_t wred_profile_id = params->leaf.wred.wred_profile_id; - struct tm_wred_profile *wp = tm_wred_profile_search(dev, - wred_profile_id); - - /* WRED profile (for private WRED context) must be valid */ - if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE || - wp == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID, - NULL, - rte_strerror(EINVAL)); - - /* No shared WRED contexts */ - if (params->leaf.wred.n_shared_wred_contexts != 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS, - NULL, - rte_strerror(EINVAL)); - } - - /* Stats */ - if (params->stats_mask & ~STATS_MASK_QUEUE) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, - NULL, - rte_strerror(EINVAL)); - - return 0; -} - -static int -node_add_check(struct rte_eth_dev *dev, - uint32_t node_id, - uint32_t parent_node_id, - uint32_t priority, - uint32_t weight, - uint32_t level_id, - struct rte_tm_node_params *params, - struct rte_tm_error *error) -{ - struct tm_node *pn; - uint32_t level; - int status; - - /* node_id, parent_node_id: - * -node_id must not be RTE_TM_NODE_ID_NULL - * -node_id must not be in use - * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL): - * -root node must not exist - * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL): - * -parent_node_id must be valid - */ - if (node_id == RTE_TM_NODE_ID_NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - if (tm_node_search(dev, node_id)) - return -rte_tm_error_set(error, - EEXIST, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EEXIST)); - - if (parent_node_id == RTE_TM_NODE_ID_NULL) { - pn = NULL; - if (tm_root_node_present(dev)) - return -rte_tm_error_set(error, - EEXIST, - RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, - NULL, - rte_strerror(EEXIST)); - } else { - pn = tm_node_search(dev, parent_node_id); - if (pn == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, - NULL, - rte_strerror(EINVAL)); - } - - /* priority: must be 0 .. 3 */ - if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PRIORITY, - NULL, - rte_strerror(EINVAL)); - - /* weight: must be 1 .. 255 */ - if (weight == 0 || weight >= UINT8_MAX) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_WEIGHT, - NULL, - rte_strerror(EINVAL)); - - /* level_id: if valid, then - * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL): - * -level_id must be zero - * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL): - * -level_id must be parent level ID plus one - */ - level = (pn == NULL) ? 0 : pn->level + 1; - if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_LEVEL_ID, - NULL, - rte_strerror(EINVAL)); - - /* params: must not be NULL */ - if (params == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARAMS, - NULL, - rte_strerror(EINVAL)); - - /* params: per level checks */ - switch (level) { - case TM_NODE_LEVEL_PORT: - status = node_add_check_port(dev, node_id, - parent_node_id, priority, weight, level_id, - params, error); - if (status) - return status; - break; - - case TM_NODE_LEVEL_SUBPORT: - status = node_add_check_subport(dev, node_id, - parent_node_id, priority, weight, level_id, - params, error); - if (status) - return status; - break; - - case TM_NODE_LEVEL_PIPE: - status = node_add_check_pipe(dev, node_id, - parent_node_id, priority, weight, level_id, - params, error); - if (status) - return status; - break; - - case TM_NODE_LEVEL_TC: - status = node_add_check_tc(dev, node_id, - parent_node_id, priority, weight, level_id, - params, error); - if (status) - return status; - break; - - case TM_NODE_LEVEL_QUEUE: - status = node_add_check_queue(dev, node_id, - parent_node_id, priority, weight, level_id, - params, error); - if (status) - return status; - break; - - default: - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_LEVEL_ID, - NULL, - rte_strerror(EINVAL)); - } - - return 0; -} - -/* Traffic manager node add */ -static int -pmd_tm_node_add(struct rte_eth_dev *dev, - uint32_t node_id, - uint32_t parent_node_id, - uint32_t priority, - uint32_t weight, - uint32_t level_id, - struct rte_tm_node_params *params, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_node_list *nl = &p->soft.tm.h.nodes; - struct tm_node *n; - uint32_t i; - int status; - - /* Checks */ - if (p->soft.tm.hierarchy_frozen) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EBUSY)); - - status = node_add_check(dev, node_id, parent_node_id, priority, weight, - level_id, params, error); - if (status) - return status; - - /* Memory allocation */ - n = calloc(1, sizeof(struct tm_node)); - if (n == NULL) - return -rte_tm_error_set(error, - ENOMEM, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(ENOMEM)); - - /* Fill in */ - n->node_id = node_id; - n->parent_node_id = parent_node_id; - n->priority = priority; - n->weight = weight; - - if (parent_node_id != RTE_TM_NODE_ID_NULL) { - n->parent_node = tm_node_search(dev, parent_node_id); - n->level = n->parent_node->level + 1; - } - - if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) - n->shaper_profile = tm_shaper_profile_search(dev, - params->shaper_profile_id); - - if (n->level == TM_NODE_LEVEL_QUEUE && - params->leaf.cman == RTE_TM_CMAN_WRED) - n->wred_profile = tm_wred_profile_search(dev, - params->leaf.wred.wred_profile_id); - - memcpy(&n->params, params, sizeof(n->params)); - - /* Add to list */ - TAILQ_INSERT_TAIL(nl, n, node); - p->soft.tm.h.n_nodes++; - - /* Update dependencies */ - if (n->parent_node) - n->parent_node->n_children++; - - if (n->shaper_profile) - n->shaper_profile->n_users++; - - for (i = 0; i < params->n_shared_shapers; i++) { - struct tm_shared_shaper *ss; - - ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]); - ss->n_users++; - } - - if (n->wred_profile) - n->wred_profile->n_users++; - - p->soft.tm.h.n_tm_nodes[n->level]++; - - return 0; -} - -/* Traffic manager node delete */ -static int -pmd_tm_node_delete(struct rte_eth_dev *dev, - uint32_t node_id, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_node *n; - uint32_t i; - - /* Check hierarchy changes are currently allowed */ - if (p->soft.tm.hierarchy_frozen) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EBUSY)); - - /* Check existing */ - n = tm_node_search(dev, node_id); - if (n == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Check unused */ - if (n->n_children) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EBUSY)); - - /* Update dependencies */ - p->soft.tm.h.n_tm_nodes[n->level]--; - - if (n->wred_profile) - n->wred_profile->n_users--; - - for (i = 0; i < n->params.n_shared_shapers; i++) { - struct tm_shared_shaper *ss; - - ss = tm_shared_shaper_search(dev, - n->params.shared_shaper_id[i]); - ss->n_users--; - } - - if (n->shaper_profile) - n->shaper_profile->n_users--; - - if (n->parent_node) - n->parent_node->n_children--; - - /* Remove from list */ - TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node); - p->soft.tm.h.n_nodes--; - free(n); - - return 0; -} - - -static void -pipe_profile_build(struct rte_eth_dev *dev, - struct tm_node *np, - struct rte_sched_pipe_params *pp) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_hierarchy *h = &p->soft.tm.h; - struct tm_node_list *nl = &h->nodes; - struct tm_node *nt, *nq; - - memset(pp, 0, sizeof(*pp)); - - /* Pipe */ - pp->tb_rate = np->shaper_profile->params.peak.rate; - pp->tb_size = np->shaper_profile->params.peak.size; - - /* Traffic Class (TC) */ - pp->tc_period = PIPE_TC_PERIOD; - - pp->tc_ov_weight = np->weight; - - TAILQ_FOREACH(nt, nl, node) { - uint32_t queue_id = 0; - - if (nt->level != TM_NODE_LEVEL_TC || - nt->parent_node_id != np->node_id) - continue; - - pp->tc_rate[nt->priority] = - nt->shaper_profile->params.peak.rate; - - /* Queue */ - TAILQ_FOREACH(nq, nl, node) { - - if (nq->level != TM_NODE_LEVEL_QUEUE || - nq->parent_node_id != nt->node_id) - continue; - - if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE) - pp->wrr_weights[queue_id] = nq->weight; - - queue_id++; - } - } -} - -static int -pipe_profile_free_exists(struct rte_eth_dev *dev, - uint32_t *pipe_profile_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_params *t = &p->soft.tm.params; - - if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) { - *pipe_profile_id = t->n_pipe_profiles; - return 1; - } - - return 0; -} - -static int -pipe_profile_exists(struct rte_eth_dev *dev, - struct rte_sched_pipe_params *pp, - uint32_t *pipe_profile_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_params *t = &p->soft.tm.params; - uint32_t i; - - for (i = 0; i < t->n_pipe_profiles; i++) - if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) { - if (pipe_profile_id) - *pipe_profile_id = i; - return 1; - } - - return 0; -} - -static void -pipe_profile_install(struct rte_eth_dev *dev, - struct rte_sched_pipe_params *pp, - uint32_t pipe_profile_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_params *t = &p->soft.tm.params; - - memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp)); - t->n_pipe_profiles++; -} - -static void -pipe_profile_mark(struct rte_eth_dev *dev, - uint32_t subport_id, - uint32_t pipe_id, - uint32_t pipe_profile_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_hierarchy *h = &p->soft.tm.h; - struct tm_params *t = &p->soft.tm.params; - uint32_t n_pipes_per_subport, pos; - - n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / - h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT]; - pos = subport_id * n_pipes_per_subport + pipe_id; - - t->pipe_to_profile[pos] = pipe_profile_id; -} - -static struct rte_sched_pipe_params * -pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_hierarchy *h = &p->soft.tm.h; - struct tm_params *t = &p->soft.tm.params; - uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / - h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT]; - - uint32_t subport_id = tm_node_subport_id(dev, np->parent_node); - uint32_t pipe_id = tm_node_pipe_id(dev, np); - - uint32_t pos = subport_id * n_pipes_per_subport + pipe_id; - uint32_t pipe_profile_id = t->pipe_to_profile[pos]; - - return &t->pipe_profiles[pipe_profile_id]; -} - -static int -pipe_profiles_generate(struct rte_eth_dev *dev) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_hierarchy *h = &p->soft.tm.h; - struct tm_node_list *nl = &h->nodes; - struct tm_node *ns, *np; - uint32_t subport_id; - - /* Objective: Fill in the following fields in struct tm_params: - * - pipe_profiles - * - n_pipe_profiles - * - pipe_to_profile - */ - - subport_id = 0; - TAILQ_FOREACH(ns, nl, node) { - uint32_t pipe_id; - - if (ns->level != TM_NODE_LEVEL_SUBPORT) - continue; - - pipe_id = 0; - TAILQ_FOREACH(np, nl, node) { - struct rte_sched_pipe_params pp; - uint32_t pos; - - memset(&pp, 0, sizeof(pp)); - - if (np->level != TM_NODE_LEVEL_PIPE || - np->parent_node_id != ns->node_id) - continue; - - pipe_profile_build(dev, np, &pp); - - if (!pipe_profile_exists(dev, &pp, &pos)) { - if (!pipe_profile_free_exists(dev, &pos)) - return -1; - - pipe_profile_install(dev, &pp, pos); - } - - pipe_profile_mark(dev, subport_id, pipe_id, pos); - - pipe_id++; - } - - subport_id++; - } - - return 0; -} - -static struct tm_wred_profile * -tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_hierarchy *h = &p->soft.tm.h; - struct tm_node_list *nl = &h->nodes; - struct tm_node *nq; - - TAILQ_FOREACH(nq, nl, node) { - if (nq->level != TM_NODE_LEVEL_QUEUE || - nq->parent_node->priority != tc_id) - continue; - - return nq->wred_profile; - } - - return NULL; -} - -#ifdef RTE_SCHED_CMAN - -static void -wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct rte_sched_subport_params *pp = - &p->soft.tm.params.subport_params[subport_id]; - - uint32_t tc_id; - enum rte_color color; - - for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) - for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) { - struct rte_red_params *dst = - &pp->cman_params->red_params[tc_id][color]; - struct tm_wred_profile *src_wp = - tm_tc_wred_profile_get(dev, tc_id); - struct rte_tm_red_params *src = - &src_wp->params.red_params[color]; - - memcpy(dst, src, sizeof(*dst)); - } -} - -#else - -#define wred_profiles_set(dev, subport_id) - -#endif - -static struct tm_shared_shaper * -tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node) -{ - return (tc_node->params.n_shared_shapers) ? - tm_shared_shaper_search(dev, - tc_node->params.shared_shaper_id[0]) : - NULL; -} - -static struct tm_shared_shaper * -tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev, - struct tm_node *subport_node, - uint32_t tc_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_node_list *nl = &p->soft.tm.h.nodes; - struct tm_node *n; - - TAILQ_FOREACH(n, nl, node) { - if (n->level != TM_NODE_LEVEL_TC || - n->parent_node->parent_node_id != - subport_node->node_id || - n->priority != tc_id) - continue; - - return tm_tc_shared_shaper_get(dev, n); - } - - return NULL; -} - -static struct rte_sched_subport_profile_params * -subport_profile_get(struct rte_eth_dev *dev, struct tm_node *np) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_params *t = &p->soft.tm.params; - uint32_t subport_id = tm_node_subport_id(dev, np->parent_node); - - if (subport_id >= TM_MAX_SUBPORT_PROFILE) - return NULL; - - return &t->subport_profile[subport_id]; -} - -static void -subport_profile_mark(struct rte_eth_dev *dev, - uint32_t subport_id, - uint32_t subport_profile_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_params *t = &p->soft.tm.params; - - t->subport_to_profile[subport_id] = subport_profile_id; -} - -static void -subport_profile_install(struct rte_eth_dev *dev, - struct rte_sched_subport_profile_params *sp, - uint32_t subport_profile_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_params *t = &p->soft.tm.params; - - memcpy(&t->subport_profile[subport_profile_id], - sp, sizeof(*sp)); - t->n_subport_profiles++; -} - -static int -subport_profile_free_exists(struct rte_eth_dev *dev, - uint32_t *subport_profile_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_params *t = &p->soft.tm.params; - - if (t->n_subport_profiles < TM_MAX_SUBPORT_PROFILE) { - *subport_profile_id = t->n_subport_profiles; - return 1; - } - - return 0; -} - -static void -subport_profile_build(struct rte_eth_dev *dev, struct tm_node *np, - struct rte_sched_subport_profile_params *sp) -{ - uint32_t i; - memset(sp, 0, sizeof(*sp)); - - sp->tb_rate = np->shaper_profile->params.peak.rate; - sp->tb_size = np->shaper_profile->params.peak.size; - - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { - struct tm_shared_shaper *ss; - struct tm_shaper_profile *ssp; - - ss = tm_subport_tc_shared_shaper_get(dev, np, i); - ssp = (ss) ? tm_shaper_profile_search(dev, - ss->shaper_profile_id) : - np->shaper_profile; - sp->tc_rate[i] = ssp->params.peak.rate; - } - - /* Traffic Class (TC) */ - sp->tc_period = SUBPORT_TC_PERIOD; -} - -static int -subport_profiles_generate(struct rte_eth_dev *dev) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_hierarchy *h = &p->soft.tm.h; - struct tm_node_list *nl = &h->nodes; - struct tm_node *ns; - uint32_t subport_id; - - /* Objective: Fill in the following fields in struct tm_params: - * - subport_profiles - * - n_subport_profiles - * - subport_to_profile - */ - - subport_id = 0; - TAILQ_FOREACH(ns, nl, node) { - if (ns->level != TM_NODE_LEVEL_SUBPORT) - continue; - - struct rte_sched_subport_profile_params sp; - uint32_t pos; - - memset(&sp, 0, sizeof(sp)); - - subport_profile_build(dev, ns, &sp); - - if (!subport_profile_exists(dev, &sp, &pos)) { - if (!subport_profile_free_exists(dev, &pos)) - return -1; - - subport_profile_install(dev, &sp, pos); - } - - subport_profile_mark(dev, subport_id, pos); - - subport_id++; - } - - return 0; -} - - -static int -hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_hierarchy *h = &p->soft.tm.h; - struct tm_node_list *nl = &h->nodes; - struct tm_shared_shaper_list *ssl = &h->shared_shapers; - struct tm_wred_profile_list *wpl = &h->wred_profiles; - struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq; - struct tm_shared_shaper *ss; - - uint32_t n_pipes_per_subport; - - /* Root node exists. */ - if (nr == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_LEVEL_ID, - NULL, - rte_strerror(EINVAL)); - - /* There is at least one subport, max is not exceeded. */ - if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_LEVEL_ID, - NULL, - rte_strerror(EINVAL)); - - /* There is at least one pipe. */ - if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_LEVEL_ID, - NULL, - rte_strerror(EINVAL)); - - /* Number of pipes is the same for all subports. Maximum number of pipes - * per subport is not exceeded. - */ - n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / - h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT]; - - if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - - TAILQ_FOREACH(ns, nl, node) { - if (ns->level != TM_NODE_LEVEL_SUBPORT) - continue; - - if (ns->n_children != n_pipes_per_subport) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - } - - /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */ - TAILQ_FOREACH(np, nl, node) { - uint32_t mask = 0, mask_expected = - RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, - uint32_t); - - if (np->level != TM_NODE_LEVEL_PIPE) - continue; - - if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - - TAILQ_FOREACH(nt, nl, node) { - if (nt->level != TM_NODE_LEVEL_TC || - nt->parent_node_id != np->node_id) - continue; - - mask |= 1 << nt->priority; - } - - if (mask != mask_expected) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - } - - /** Each Strict priority TC has exactly 1 packet queues while - * lowest priority TC (Best-effort) has 4 queues. - */ - TAILQ_FOREACH(nt, nl, node) { - if (nt->level != TM_NODE_LEVEL_TC) - continue; - - if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - } - - /** - * Shared shapers: - * -For each TC #i, all pipes in the same subport use the same - * shared shaper (or no shared shaper) for their TC#i. - * -Each shared shaper needs to have at least one user. All its - * users have to be TC nodes with the same priority and the same - * subport. - */ - TAILQ_FOREACH(ns, nl, node) { - struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - uint32_t id; - - if (ns->level != TM_NODE_LEVEL_SUBPORT) - continue; - - for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) - s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id); - - TAILQ_FOREACH(nt, nl, node) { - struct tm_shared_shaper *subport_ss, *tc_ss; - - if (nt->level != TM_NODE_LEVEL_TC || - nt->parent_node->parent_node_id != - ns->node_id) - continue; - - subport_ss = s[nt->priority]; - tc_ss = tm_tc_shared_shaper_get(dev, nt); - - if (subport_ss == NULL && tc_ss == NULL) - continue; - - if ((subport_ss == NULL && tc_ss != NULL) || - (subport_ss != NULL && tc_ss == NULL) || - subport_ss->shared_shaper_id != - tc_ss->shared_shaper_id) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - } - } - - TAILQ_FOREACH(ss, ssl, node) { - struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss); - uint32_t n_users = 0; - - if (nt_any != NULL) - TAILQ_FOREACH(nt, nl, node) { - if (nt->level != TM_NODE_LEVEL_TC || - nt->priority != nt_any->priority || - nt->parent_node->parent_node_id != - nt_any->parent_node->parent_node_id) - continue; - - n_users++; - } - - if (ss->n_users == 0 || ss->n_users != n_users) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - } - - /* Not too many subport profiles. */ - if (subport_profiles_generate(dev)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - - - /* Not too many pipe profiles. */ - if (pipe_profiles_generate(dev)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - - /** - * WRED (when used, i.e. at least one WRED profile defined): - * -Each WRED profile must have at least one user. - * -All leaf nodes must have their private WRED context enabled. - * -For each TC #i, all leaf nodes must use the same WRED profile - * for their private WRED context. - */ - if (h->n_wred_profiles) { - struct tm_wred_profile *wp; - struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - uint32_t id; - - TAILQ_FOREACH(wp, wpl, node) - if (wp->n_users == 0) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - - for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) { - w[id] = tm_tc_wred_profile_get(dev, id); - - if (w[id] == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - } - - TAILQ_FOREACH(nq, nl, node) { - uint32_t id; - - if (nq->level != TM_NODE_LEVEL_QUEUE) - continue; - - id = nq->parent_node->priority; - - if (nq->wred_profile == NULL || - nq->wred_profile->wred_profile_id != - w[id]->wred_profile_id) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - } - } - - return 0; -} - -static void -hierarchy_blueprints_create(struct rte_eth_dev *dev) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_params *t = &p->soft.tm.params; - struct tm_hierarchy *h = &p->soft.tm.h; - - struct tm_node_list *nl = &h->nodes; - struct tm_node *root = tm_root_node_present(dev), *n; - - uint32_t subport_id; - - t->port_params = (struct rte_sched_port_params) { - .name = dev->data->name, - .socket = dev->data->numa_node, - .rate = root->shaper_profile->params.peak.rate, - .mtu = dev->data->mtu, - .frame_overhead = - root->shaper_profile->params.pkt_length_adjust, - .n_subports_per_port = root->n_children, - .n_subport_profiles = t->n_subport_profiles, - .subport_profiles = t->subport_profile, - .n_max_subport_profiles = TM_MAX_SUBPORT_PROFILE, - .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT, - }; - - subport_id = 0; - TAILQ_FOREACH(n, nl, node) { - - if (n->level != TM_NODE_LEVEL_SUBPORT) - continue; - - t->subport_params[subport_id] = - (struct rte_sched_subport_params) { - .n_pipes_per_subport_enabled = - h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / - h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT], - .qsize = {p->params.tm.qsize[0], - p->params.tm.qsize[1], - p->params.tm.qsize[2], - p->params.tm.qsize[3], - p->params.tm.qsize[4], - p->params.tm.qsize[5], - p->params.tm.qsize[6], - p->params.tm.qsize[7], - p->params.tm.qsize[8], - p->params.tm.qsize[9], - p->params.tm.qsize[10], - p->params.tm.qsize[11], - p->params.tm.qsize[12], - }, - .pipe_profiles = t->pipe_profiles, - .n_pipe_profiles = t->n_pipe_profiles, - .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE, - }; - wred_profiles_set(dev, subport_id); - subport_id++; - } -} - -/* Traffic manager hierarchy commit */ -static int -pmd_tm_hierarchy_commit(struct rte_eth_dev *dev, - int clear_on_fail, - struct rte_tm_error *error) -{ - struct pmd_internals *p = dev->data->dev_private; - int status; - - /* Checks */ - if (p->soft.tm.hierarchy_frozen) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EBUSY)); - - status = hierarchy_commit_check(dev, error); - if (status) { - if (clear_on_fail) - tm_hierarchy_free(p); - - return status; - } - - /* Create blueprints */ - hierarchy_blueprints_create(dev); - - /* Freeze hierarchy */ - p->soft.tm.hierarchy_frozen = 1; - - return 0; -} - -static int -update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight) -{ - struct pmd_internals *p = dev->data->dev_private; - uint32_t pipe_id = tm_node_pipe_id(dev, np); - - struct tm_node *ns = np->parent_node; - uint32_t subport_id = tm_node_subport_id(dev, ns); - - struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np); - struct rte_sched_pipe_params profile1; - uint32_t pipe_profile_id; - - /* Derive new pipe profile. */ - memcpy(&profile1, profile0, sizeof(profile1)); - profile1.tc_ov_weight = (uint8_t)weight; - - /* Since implementation does not allow adding more pipe profiles after - * port configuration, the pipe configuration can be successfully - * updated only if the new profile is also part of the existing set of - * pipe profiles. - */ - if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0) - return -1; - - /* Update the pipe profile used by the current pipe. */ - if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id, - (int32_t)pipe_profile_id)) - return -1; - - /* Commit changes. */ - pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id); - np->weight = weight; - - return 0; -} - -static int -update_queue_weight(struct rte_eth_dev *dev, - struct tm_node *nq, uint32_t weight) -{ - struct pmd_internals *p = dev->data->dev_private; - uint32_t queue_id = tm_node_queue_id(dev, nq); - - struct tm_node *nt = nq->parent_node; - - struct tm_node *np = nt->parent_node; - uint32_t pipe_id = tm_node_pipe_id(dev, np); - - struct tm_node *ns = np->parent_node; - uint32_t subport_id = tm_node_subport_id(dev, ns); - - uint32_t pipe_be_queue_id = - queue_id - RTE_SCHED_TRAFFIC_CLASS_BE; - - struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np); - struct rte_sched_pipe_params profile1; - uint32_t pipe_profile_id; - - /* Derive new pipe profile. */ - memcpy(&profile1, profile0, sizeof(profile1)); - profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight; - - /* Since implementation does not allow adding more pipe profiles after - * port configuration, the pipe configuration can be successfully - * updated only if the new profile is also part of the existing set - * of pipe profiles. - */ - if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0) - return -1; - - /* Update the pipe profile used by the current pipe. */ - if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id, - (int32_t)pipe_profile_id)) - return -1; - - /* Commit changes. */ - pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id); - nq->weight = weight; - - return 0; -} - -/* Traffic manager node parent update */ -static int -pmd_tm_node_parent_update(struct rte_eth_dev *dev, - uint32_t node_id, - uint32_t parent_node_id, - uint32_t priority, - uint32_t weight, - struct rte_tm_error *error) -{ - struct tm_node *n; - - /* Port must be started and TM used. */ - if (dev->data->dev_started == 0 && (tm_used(dev) == 0)) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EBUSY)); - - /* Node must be valid */ - n = tm_node_search(dev, node_id); - if (n == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Parent node must be the same */ - if (n->parent_node_id != parent_node_id) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Priority must be the same */ - if (n->priority != priority) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_PRIORITY, - NULL, - rte_strerror(EINVAL)); - - /* weight: must be 1 .. 255 */ - if (weight == 0 || weight >= UINT8_MAX) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_WEIGHT, - NULL, - rte_strerror(EINVAL)); - - switch (n->level) { - case TM_NODE_LEVEL_PORT: - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_WEIGHT, - NULL, - rte_strerror(EINVAL)); - /* fall-through */ - case TM_NODE_LEVEL_SUBPORT: - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_WEIGHT, - NULL, - rte_strerror(EINVAL)); - /* fall-through */ - case TM_NODE_LEVEL_PIPE: - if (update_pipe_weight(dev, n, weight)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - return 0; - /* fall-through */ - case TM_NODE_LEVEL_TC: - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_WEIGHT, - NULL, - rte_strerror(EINVAL)); - /* fall-through */ - case TM_NODE_LEVEL_QUEUE: - /* fall-through */ - default: - if (update_queue_weight(dev, n, weight)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - return 0; - } -} - -static int -update_subport_rate(struct rte_eth_dev *dev, - struct tm_node *ns, - struct tm_shaper_profile *sp) -{ - struct pmd_internals *p = dev->data->dev_private; - uint32_t subport_id = tm_node_subport_id(dev, ns); - - struct rte_sched_subport_profile_params *profile0 = - subport_profile_get(dev, ns); - struct rte_sched_subport_profile_params profile1; - uint32_t subport_profile_id; - - if (profile0 == NULL) - return -1; - - /* Derive new pipe profile. */ - memcpy(&profile1, profile0, sizeof(profile1)); - profile1.tb_rate = sp->params.peak.rate; - profile1.tb_size = sp->params.peak.size; - - /* Since implementation does not allow adding more subport profiles - * after port configuration, the pipe configuration can be successfully - * updated only if the new profile is also part of the existing set of - * pipe profiles. - */ - if (subport_profile_exists(dev, &profile1, &subport_profile_id) == 0) - return -1; - - /* Update the subport configuration. */ - if (rte_sched_subport_config(SCHED(p), subport_id, - NULL, subport_profile_id)) - return -1; - - /* Commit changes. */ - ns->shaper_profile->n_users--; - - ns->shaper_profile = sp; - ns->params.shaper_profile_id = sp->shaper_profile_id; - sp->n_users++; - - subport_profile_mark(dev, subport_id, subport_profile_id); - - memcpy(&p->soft.tm.params.subport_profile[subport_profile_id], - &profile1, - sizeof(profile1)); - - return 0; -} - -static int -update_pipe_rate(struct rte_eth_dev *dev, - struct tm_node *np, - struct tm_shaper_profile *sp) -{ - struct pmd_internals *p = dev->data->dev_private; - uint32_t pipe_id = tm_node_pipe_id(dev, np); - - struct tm_node *ns = np->parent_node; - uint32_t subport_id = tm_node_subport_id(dev, ns); - - struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np); - struct rte_sched_pipe_params profile1; - uint32_t pipe_profile_id; - - /* Derive new pipe profile. */ - memcpy(&profile1, profile0, sizeof(profile1)); - profile1.tb_rate = sp->params.peak.rate; - profile1.tb_size = sp->params.peak.size; - - /* Since implementation does not allow adding more pipe profiles after - * port configuration, the pipe configuration can be successfully - * updated only if the new profile is also part of the existing set of - * pipe profiles. - */ - if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0) - return -1; - - /* Update the pipe profile used by the current pipe. */ - if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id, - (int32_t)pipe_profile_id)) - return -1; - - /* Commit changes. */ - pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id); - np->shaper_profile->n_users--; - np->shaper_profile = sp; - np->params.shaper_profile_id = sp->shaper_profile_id; - sp->n_users++; - - return 0; -} - -static int -update_tc_rate(struct rte_eth_dev *dev, - struct tm_node *nt, - struct tm_shaper_profile *sp) -{ - struct pmd_internals *p = dev->data->dev_private; - uint32_t tc_id = tm_node_tc_id(dev, nt); - - struct tm_node *np = nt->parent_node; - uint32_t pipe_id = tm_node_pipe_id(dev, np); - - struct tm_node *ns = np->parent_node; - uint32_t subport_id = tm_node_subport_id(dev, ns); - - struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np); - struct rte_sched_pipe_params profile1; - uint32_t pipe_profile_id; - - /* Derive new pipe profile. */ - memcpy(&profile1, profile0, sizeof(profile1)); - profile1.tc_rate[tc_id] = sp->params.peak.rate; - - /* Since implementation does not allow adding more pipe profiles after - * port configuration, the pipe configuration can be successfully - * updated only if the new profile is also part of the existing set of - * pipe profiles. - */ - if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0) - return -1; - - /* Update the pipe profile used by the current pipe. */ - if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id, - (int32_t)pipe_profile_id)) - return -1; - - /* Commit changes. */ - pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id); - nt->shaper_profile->n_users--; - nt->shaper_profile = sp; - nt->params.shaper_profile_id = sp->shaper_profile_id; - sp->n_users++; - - return 0; -} - -/* Traffic manager node shaper update */ -static int -pmd_tm_node_shaper_update(struct rte_eth_dev *dev, - uint32_t node_id, - uint32_t shaper_profile_id, - struct rte_tm_error *error) -{ - struct tm_node *n; - struct tm_shaper_profile *sp; - - /* Port must be started and TM used. */ - if (dev->data->dev_started == 0 && (tm_used(dev) == 0)) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EBUSY)); - - /* Node must be valid */ - n = tm_node_search(dev, node_id); - if (n == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - /* Shaper profile must be valid. */ - sp = tm_shaper_profile_search(dev, shaper_profile_id); - if (sp == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_SHAPER_PROFILE, - NULL, - rte_strerror(EINVAL)); - - switch (n->level) { - case TM_NODE_LEVEL_PORT: - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - /* fall-through */ - case TM_NODE_LEVEL_SUBPORT: - if (update_subport_rate(dev, n, sp)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - return 0; - /* fall-through */ - case TM_NODE_LEVEL_PIPE: - if (update_pipe_rate(dev, n, sp)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - return 0; - /* fall-through */ - case TM_NODE_LEVEL_TC: - if (update_tc_rate(dev, n, sp)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - return 0; - /* fall-through */ - case TM_NODE_LEVEL_QUEUE: - /* fall-through */ - default: - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - } -} - -static inline uint32_t -tm_port_queue_id(struct rte_eth_dev *dev, - uint32_t port_subport_id, - uint32_t subport_pipe_id, - uint32_t pipe_tc_id, - uint32_t tc_queue_id) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_hierarchy *h = &p->soft.tm.h; - uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / - h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT]; - - uint32_t port_pipe_id = - port_subport_id * n_pipes_per_subport + subport_pipe_id; - - uint32_t port_queue_id = - port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id; - - return port_queue_id; -} - -static int -read_port_stats(struct rte_eth_dev *dev, - struct tm_node *nr, - struct rte_tm_node_stats *stats, - uint64_t *stats_mask, - int clear) -{ - struct pmd_internals *p = dev->data->dev_private; - struct tm_hierarchy *h = &p->soft.tm.h; - uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT]; - uint32_t subport_id; - - for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) { - struct rte_sched_subport_stats s; - uint32_t tc_ov, id; - - /* Stats read */ - int status = rte_sched_subport_read_stats(SCHED(p), - subport_id, - &s, - &tc_ov); - if (status) - return status; - - /* Stats accumulate */ - for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) { - nr->stats.n_pkts += - s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id]; - nr->stats.n_bytes += - s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id]; - nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += - s.n_pkts_tc_dropped[id]; - nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += - s.n_bytes_tc_dropped[id]; - } - } - - /* Stats copy */ - if (stats) - memcpy(stats, &nr->stats, sizeof(*stats)); - - if (stats_mask) - *stats_mask = STATS_MASK_DEFAULT; - - /* Stats clear */ - if (clear) - memset(&nr->stats, 0, sizeof(nr->stats)); - - return 0; -} - -static int -read_subport_stats(struct rte_eth_dev *dev, - struct tm_node *ns, - struct rte_tm_node_stats *stats, - uint64_t *stats_mask, - int clear) -{ - struct pmd_internals *p = dev->data->dev_private; - uint32_t subport_id = tm_node_subport_id(dev, ns); - struct rte_sched_subport_stats s; - uint32_t tc_ov, tc_id; - - /* Stats read */ - int status = rte_sched_subport_read_stats(SCHED(p), - subport_id, - &s, - &tc_ov); - if (status) - return status; - - /* Stats accumulate */ - for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) { - ns->stats.n_pkts += - s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id]; - ns->stats.n_bytes += - s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id]; - ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += - s.n_pkts_tc_dropped[tc_id]; - ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += - s.n_bytes_tc_dropped[tc_id]; - } - - /* Stats copy */ - if (stats) - memcpy(stats, &ns->stats, sizeof(*stats)); - - if (stats_mask) - *stats_mask = STATS_MASK_DEFAULT; - - /* Stats clear */ - if (clear) - memset(&ns->stats, 0, sizeof(ns->stats)); - - return 0; -} - -static int -read_pipe_stats(struct rte_eth_dev *dev, - struct tm_node *np, - struct rte_tm_node_stats *stats, - uint64_t *stats_mask, - int clear) -{ - struct pmd_internals *p = dev->data->dev_private; - - uint32_t pipe_id = tm_node_pipe_id(dev, np); - - struct tm_node *ns = np->parent_node; - uint32_t subport_id = tm_node_subport_id(dev, ns); - uint32_t tc_id, queue_id; - uint32_t i; - - /* Stats read */ - for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) { - struct rte_sched_queue_stats s; - uint16_t qlen; - - if (i < RTE_SCHED_TRAFFIC_CLASS_BE) { - tc_id = i; - queue_id = i; - } else { - tc_id = RTE_SCHED_TRAFFIC_CLASS_BE; - queue_id = i - tc_id; - } - - uint32_t qid = tm_port_queue_id(dev, - subport_id, - pipe_id, - tc_id, - queue_id); - - int status = rte_sched_queue_read_stats(SCHED(p), - qid, - &s, - &qlen); - if (status) - return status; - - /* Stats accumulate */ - np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped; - np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped; - np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped; - np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += - s.n_bytes_dropped; - np->stats.leaf.n_pkts_queued = qlen; - } - - /* Stats copy */ - if (stats) - memcpy(stats, &np->stats, sizeof(*stats)); - - if (stats_mask) - *stats_mask = STATS_MASK_DEFAULT; - - /* Stats clear */ - if (clear) - memset(&np->stats, 0, sizeof(np->stats)); - - return 0; -} - -static int -read_tc_stats(struct rte_eth_dev *dev, - struct tm_node *nt, - struct rte_tm_node_stats *stats, - uint64_t *stats_mask, - int clear) -{ - struct pmd_internals *p = dev->data->dev_private; - - uint32_t tc_id = tm_node_tc_id(dev, nt); - - struct tm_node *np = nt->parent_node; - uint32_t pipe_id = tm_node_pipe_id(dev, np); - - struct tm_node *ns = np->parent_node; - uint32_t subport_id = tm_node_subport_id(dev, ns); - struct rte_sched_queue_stats s; - uint32_t qid, i; - uint16_t qlen; - int status; - - /* Stats read */ - if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) { - qid = tm_port_queue_id(dev, - subport_id, - pipe_id, - tc_id, - 0); - - status = rte_sched_queue_read_stats(SCHED(p), - qid, - &s, - &qlen); - if (status) - return status; - - /* Stats accumulate */ - nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped; - nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped; - nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped; - nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += - s.n_bytes_dropped; - nt->stats.leaf.n_pkts_queued = qlen; - } else { - for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) { - qid = tm_port_queue_id(dev, - subport_id, - pipe_id, - tc_id, - i); - - status = rte_sched_queue_read_stats(SCHED(p), - qid, - &s, - &qlen); - if (status) - return status; - - /* Stats accumulate */ - nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped; - nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped; - nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += - s.n_pkts_dropped; - nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += - s.n_bytes_dropped; - nt->stats.leaf.n_pkts_queued = qlen; - } - } - - /* Stats copy */ - if (stats) - memcpy(stats, &nt->stats, sizeof(*stats)); - - if (stats_mask) - *stats_mask = STATS_MASK_DEFAULT; - - /* Stats clear */ - if (clear) - memset(&nt->stats, 0, sizeof(nt->stats)); - - return 0; -} - -static int -read_queue_stats(struct rte_eth_dev *dev, - struct tm_node *nq, - struct rte_tm_node_stats *stats, - uint64_t *stats_mask, - int clear) -{ - struct pmd_internals *p = dev->data->dev_private; - struct rte_sched_queue_stats s; - uint16_t qlen; - - uint32_t queue_id = tm_node_queue_id(dev, nq); - - struct tm_node *nt = nq->parent_node; - uint32_t tc_id = tm_node_tc_id(dev, nt); - - struct tm_node *np = nt->parent_node; - uint32_t pipe_id = tm_node_pipe_id(dev, np); - - struct tm_node *ns = np->parent_node; - uint32_t subport_id = tm_node_subport_id(dev, ns); - - /* Stats read */ - uint32_t qid = tm_port_queue_id(dev, - subport_id, - pipe_id, - tc_id, - queue_id); - - int status = rte_sched_queue_read_stats(SCHED(p), - qid, - &s, - &qlen); - if (status) - return status; - - /* Stats accumulate */ - nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped; - nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped; - nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped; - nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += - s.n_bytes_dropped; - nq->stats.leaf.n_pkts_queued = qlen; - - /* Stats copy */ - if (stats) - memcpy(stats, &nq->stats, sizeof(*stats)); - - if (stats_mask) - *stats_mask = STATS_MASK_QUEUE; - - /* Stats clear */ - if (clear) - memset(&nq->stats, 0, sizeof(nq->stats)); - - return 0; -} - -/* Traffic manager read stats counters for specific node */ -static int -pmd_tm_node_stats_read(struct rte_eth_dev *dev, - uint32_t node_id, - struct rte_tm_node_stats *stats, - uint64_t *stats_mask, - int clear, - struct rte_tm_error *error) -{ - struct tm_node *n; - - /* Port must be started and TM used. */ - if (dev->data->dev_started == 0 && (tm_used(dev) == 0)) - return -rte_tm_error_set(error, - EBUSY, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EBUSY)); - - /* Node must be valid */ - n = tm_node_search(dev, node_id); - if (n == NULL) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_NODE_ID, - NULL, - rte_strerror(EINVAL)); - - switch (n->level) { - case TM_NODE_LEVEL_PORT: - if (read_port_stats(dev, n, stats, stats_mask, clear)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - return 0; - - case TM_NODE_LEVEL_SUBPORT: - if (read_subport_stats(dev, n, stats, stats_mask, clear)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - return 0; - - case TM_NODE_LEVEL_PIPE: - if (read_pipe_stats(dev, n, stats, stats_mask, clear)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - return 0; - - case TM_NODE_LEVEL_TC: - if (read_tc_stats(dev, n, stats, stats_mask, clear)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - return 0; - - case TM_NODE_LEVEL_QUEUE: - default: - if (read_queue_stats(dev, n, stats, stats_mask, clear)) - return -rte_tm_error_set(error, - EINVAL, - RTE_TM_ERROR_TYPE_UNSPECIFIED, - NULL, - rte_strerror(EINVAL)); - return 0; - } -} - -const struct rte_tm_ops pmd_tm_ops = { - .node_type_get = pmd_tm_node_type_get, - .capabilities_get = pmd_tm_capabilities_get, - .level_capabilities_get = pmd_tm_level_capabilities_get, - .node_capabilities_get = pmd_tm_node_capabilities_get, - - .wred_profile_add = pmd_tm_wred_profile_add, - .wred_profile_delete = pmd_tm_wred_profile_delete, - .shared_wred_context_add_update = NULL, - .shared_wred_context_delete = NULL, - - .shaper_profile_add = pmd_tm_shaper_profile_add, - .shaper_profile_delete = pmd_tm_shaper_profile_delete, - .shared_shaper_add_update = pmd_tm_shared_shaper_add_update, - .shared_shaper_delete = pmd_tm_shared_shaper_delete, - - .node_add = pmd_tm_node_add, - .node_delete = pmd_tm_node_delete, - .node_suspend = NULL, - .node_resume = NULL, - .hierarchy_commit = pmd_tm_hierarchy_commit, - - .node_parent_update = pmd_tm_node_parent_update, - .node_shaper_update = pmd_tm_node_shaper_update, - .node_shared_shaper_update = NULL, - .node_stats_update = NULL, - .node_wfq_weight_mode_update = NULL, - .node_cman_update = NULL, - .node_wred_context_update = NULL, - .node_shared_wred_context_update = NULL, - - .node_stats_read = pmd_tm_node_stats_read, -}; -- 2.34.1