From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 18F6AA0562; Fri, 3 Apr 2020 10:54:21 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 64DDB1C1B8; Fri, 3 Apr 2020 10:52:58 +0200 (CEST) Received: from mail-pj1-f68.google.com (mail-pj1-f68.google.com [209.85.216.68]) by dpdk.org (Postfix) with ESMTP id 197B01C1AF for ; Fri, 3 Apr 2020 10:52:57 +0200 (CEST) Received: by mail-pj1-f68.google.com with SMTP id v13so2724633pjb.0 for ; Fri, 03 Apr 2020 01:52:57 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=YtT8UJgo0d4y4f/AjXB0u48nxY0JrEzxkJ/7bds137M=; b=bkLBIHi3n+0VS8Q50m6/Sw4YKRwSnATROUjuYz6OTLlUjW4WS3PJmmTUhsyScekzuk sAeKUvZTTLmV++MeLWhOhhu2qJcDX+tHhkNKyIhVNGsw5U2FSnOTBgulOXA92VxDapmm 8anaTekCOKtvmE4I1FEwoXDVTgxOY1MWhG5/EwQBMVMl5DhTlpfTXwurrdCBksHvNVXi vcHl0/MAcCdwAaSCLyMwE/76ONI5qQH2B+QMswRguG4XlA8a5aYUBlD8L1uVjbl8OGsU JEZZcHZN9ymyhqLEm7xctlVxYLgg0wET1LfT7EDiJN5POZCRYE4efVejduR5oa2Aekph czuw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=YtT8UJgo0d4y4f/AjXB0u48nxY0JrEzxkJ/7bds137M=; b=LaNAwD5IyY31Ex5FwLTwhKjiDdZ/jKJ7y3q887Slt4R/XkmtfgECVHyd31xRbnkR0+ BFpcy9D9qTaSa8tGGaTkihgC324m+SXqx5YblymHMODzj2Lw/3XLSa66CphNKgnkVtoO aUOCj8qIMg1FRC52dZAMYzNttyAY9PcNoBLff52YcLKk+nEaUiGqwpmRZJA//OJNnRWT wTa8XAtguNodEaJ26b/TgSxA4KRD3cxCf4g5QOAipddlraFprcEYSwpJTYKnNa3hDbmQ YrzERg/pmpqpMKhaAmvP3qN7R2ZWUZyQskjvelyZHG17ZrxcJTRZ3pdGPP4pbjmgTKZd OMvA== X-Gm-Message-State: AGi0PuaVj7mC9q1TG/phHp/L+5g02xmJ4ivfGmcygwF9FgtW5TQV/0XT UpjADcCEHEKMXovu0F+JXkg/MF8gZ8KfXg== X-Google-Smtp-Source: APiQypLC4t6Jz5FuzDXugWjQMkMvXsoKMIfSFfGwttbbsU+dddzmh5PMboMBDhv9H5wtBTN/Ad1UAg== X-Received: by 2002:a17:902:6a88:: with SMTP id n8mr698697plk.292.1585903976196; Fri, 03 Apr 2020 01:52:56 -0700 (PDT) Received: from hyd1588t430.marvell.com ([115.113.156.2]) by smtp.gmail.com with ESMTPSA id s9sm5267899pjr.5.2020.04.03.01.52.54 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 03 Apr 2020 01:52:55 -0700 (PDT) From: Nithin Dabilpuram To: Jerin Jacob , Nithin Dabilpuram , Kiran Kumar K Cc: dev@dpdk.org, kkanas@marvell.com Date: Fri, 3 Apr 2020 14:22:15 +0530 Message-Id: <20200403085216.32684-11-nithind1988@gmail.com> X-Mailer: git-send-email 2.8.4 In-Reply-To: <20200403085216.32684-1-nithind1988@gmail.com> References: <20200312111907.31555-1-ndabilpuram@marvell.com> <20200403085216.32684-1-nithind1988@gmail.com> Subject: [dpdk-dev] [PATCH v3 10/11] net/octeontx2: add Tx queue ratelimit callback X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Krzysztof Kanas Add Tx queue ratelimiting support. This support is mutually exclusive with TM support i.e when TM is configured, tx queue ratelimiting config is no more valid. Signed-off-by: Krzysztof Kanas Signed-off-by: Nithin Dabilpuram --- drivers/net/octeontx2/otx2_ethdev.c | 1 + drivers/net/octeontx2/otx2_tm.c | 241 +++++++++++++++++++++++++++++++++++- drivers/net/octeontx2/otx2_tm.h | 3 + 3 files changed, 243 insertions(+), 2 deletions(-) diff --git a/drivers/net/octeontx2/otx2_ethdev.c b/drivers/net/octeontx2/otx2_ethdev.c index 6896797..78b7f3a 100644 --- a/drivers/net/octeontx2/otx2_ethdev.c +++ b/drivers/net/octeontx2/otx2_ethdev.c @@ -2071,6 +2071,7 @@ static const struct eth_dev_ops otx2_eth_dev_ops = { .rx_descriptor_status = otx2_nix_rx_descriptor_status, .tx_descriptor_status = otx2_nix_tx_descriptor_status, .tx_done_cleanup = otx2_nix_tx_done_cleanup, + .set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit, .pool_ops_supported = otx2_nix_pool_ops_supported, .filter_ctrl = otx2_nix_dev_filter_ctrl, .get_module_info = otx2_nix_get_module_info, diff --git a/drivers/net/octeontx2/otx2_tm.c b/drivers/net/octeontx2/otx2_tm.c index c235c00..c7b1f1f 100644 --- a/drivers/net/octeontx2/otx2_tm.c +++ b/drivers/net/octeontx2/otx2_tm.c @@ -2204,14 +2204,15 @@ otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev, } /* Delete default/ratelimit tree */ - if (dev->tm_flags & (NIX_TM_DEFAULT_TREE)) { + if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) { rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false); if (rc) { error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; error->message = "failed to free default resources"; return rc; } - dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE); + dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE | + NIX_TM_RATE_LIMIT_TREE); } /* Free up user alloc'ed resources */ @@ -2673,6 +2674,242 @@ int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev) return 0; } +static int +nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint32_t def = eth_dev->data->nb_tx_queues; + struct rte_tm_node_params params; + uint32_t leaf_parent, i, rc = 0; + + memset(¶ms, 0, sizeof(params)); + + if (nix_tm_have_tl1_access(dev)) { + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL1, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH3, false, ¶ms); + if (rc) + goto error; + leaf_parent = def + 3; + + /* Add per queue SMQ nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i, + leaf_parent, + 0, DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH4, + false, ¶ms); + if (rc) + goto error; + } + + /* Add leaf nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, i, + leaf_parent + 1 + i, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_CNT, + OTX2_TM_LVL_QUEUE, + false, ¶ms); + if (rc) + goto error; + } + + return 0; + } + + dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2; + rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2, + OTX2_TM_LVL_ROOT, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 1, def, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3, + OTX2_TM_LVL_SCH1, false, ¶ms); + if (rc) + goto error; + rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0, + DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4, + OTX2_TM_LVL_SCH2, false, ¶ms); + if (rc) + goto error; + leaf_parent = def + 2; + + /* Add per queue SMQ nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i, + leaf_parent, + 0, DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_SMQ, + OTX2_TM_LVL_SCH3, + false, ¶ms); + if (rc) + goto error; + } + + /* Add leaf nodes */ + for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { + rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0, + DEFAULT_RR_WEIGHT, + NIX_TXSCH_LVL_CNT, + OTX2_TM_LVL_SCH4, + false, ¶ms); + if (rc) + break; + } +error: + return rc; +} + +static int +otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev, + struct otx2_nix_tm_node *tm_node, + uint64_t tx_rate) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile profile; + struct otx2_mbox *mbox = dev->mbox; + volatile uint64_t *reg, *regval; + struct nix_txschq_config *req; + uint16_t flags; + uint8_t k = 0; + int rc; + + flags = tm_node->flags; + + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = NIX_TXSCH_LVL_MDQ; + reg = req->reg; + regval = req->regval; + + if (tx_rate == 0) { + k += prepare_tm_sw_xoff(tm_node, true, ®[k], ®val[k]); + flags &= ~NIX_TM_NODE_ENABLED; + goto exit; + } + + if (!(flags & NIX_TM_NODE_ENABLED)) { + k += prepare_tm_sw_xoff(tm_node, false, ®[k], ®val[k]); + flags |= NIX_TM_NODE_ENABLED; + } + + /* Use only PIR for rate limit */ + memset(&profile, 0, sizeof(profile)); + profile.params.peak.rate = tx_rate; + /* Minimum burst of ~4us Bytes of Tx */ + profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS, + (4ull * tx_rate) / (1E6 * 8)); + if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate) + dev->tm_rate_min = tx_rate; + + k += prepare_tm_shaper_reg(tm_node, &profile, ®[k], ®val[k]); +exit: + req->num_regs = k; + rc = otx2_mbox_process(mbox); + if (rc) + return rc; + + tm_node->flags = flags; + return 0; +} + +int +otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t tx_rate_mbps) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6; + struct otx2_nix_tm_node *tm_node; + int rc; + + /* Check for supported revisions */ + if (otx2_dev_is_95xx_Ax(dev) || + otx2_dev_is_96xx_Ax(dev)) + return -EINVAL; + + if (queue_idx >= eth_dev->data->nb_tx_queues) + return -EINVAL; + + if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) && + !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE)) + goto error; + + if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) && + eth_dev->data->nb_tx_queues > 1) { + /* For TM topology change ethdev needs to be stopped */ + if (eth_dev->data->dev_started) + return -EBUSY; + + /* + * Disable xmit will be enabled when + * new topology is available. + */ + rc = nix_xmit_disable(eth_dev); + if (rc) { + otx2_err("failed to disable TX, rc=%d", rc); + return -EIO; + } + + rc = nix_tm_free_resources(dev, 0, 0, false); + if (rc < 0) { + otx2_tm_dbg("failed to free default resources, rc %d", + rc); + return -EIO; + } + + rc = nix_tm_prepare_rate_limited_tree(eth_dev); + if (rc < 0) { + otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc); + return rc; + } + + rc = nix_tm_alloc_resources(eth_dev, true); + if (rc != 0) { + otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc); + return rc; + } + + dev->tm_flags &= ~NIX_TM_DEFAULT_TREE; + dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE; + } + + tm_node = nix_tm_node_search(dev, queue_idx, false); + + /* check if we found a valid leaf node */ + if (!tm_node || + !nix_tm_is_leaf(dev, tm_node->lvl) || + !tm_node->parent || + tm_node->parent->hw_id == UINT32_MAX) + return -EIO; + + return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate); +error: + otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags); + return -EINVAL; +} + int otx2_nix_tm_fini(struct rte_eth_dev *eth_dev) { diff --git a/drivers/net/octeontx2/otx2_tm.h b/drivers/net/octeontx2/otx2_tm.h index d5d58ec..7b1672e 100644 --- a/drivers/net/octeontx2/otx2_tm.h +++ b/drivers/net/octeontx2/otx2_tm.h @@ -11,6 +11,7 @@ #define NIX_TM_DEFAULT_TREE BIT_ULL(0) #define NIX_TM_COMMITTED BIT_ULL(1) +#define NIX_TM_RATE_LIMIT_TREE BIT_ULL(2) #define NIX_TM_TL1_NO_SP BIT_ULL(3) struct otx2_eth_dev; @@ -20,6 +21,8 @@ int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev); int otx2_nix_tm_fini(struct rte_eth_dev *eth_dev); int otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq, uint32_t *rr_quantum, uint16_t *smq); +int otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, uint16_t tx_rate); int otx2_nix_sq_flush_pre(void *_txq, bool dev_started); int otx2_nix_sq_flush_post(void *_txq); int otx2_nix_sq_enable(void *_txq); -- 2.8.4