From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B0288A0562; Fri, 3 Apr 2020 10:53:57 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4374A1C1A5; Fri, 3 Apr 2020 10:52:53 +0200 (CEST) Received: from mail-pf1-f195.google.com (mail-pf1-f195.google.com [209.85.210.195]) by dpdk.org (Postfix) with ESMTP id 1A1921C12D for ; Fri, 3 Apr 2020 10:52:52 +0200 (CEST) Received: by mail-pf1-f195.google.com with SMTP id c20so3174034pfi.7 for ; Fri, 03 Apr 2020 01:52:52 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=fhzEMz1W0yvZujMeDMjM9s76yucwBB8AcmuixQbXpf0=; b=RUQ08xKqBQUfTQrsyk/6Vw95fJVrm3GzWlY7aL/mkXjvgo292oFmSoCI6krIB7tztB IUB1NOUw4YDRlp88pdQCcboM56FSpvNKMrTbpMJKGXrmIoEt3ixqd8vJOhA4XwTmGBtQ T4DwwgoNRvfoLH4MpKKt4ejPdhcmGIr/Bot/FpfQ8SzXHkwyOFdXNQDB3d724AiT/zh5 KpMliHgWQFwm+36yuQXrVBJprgqLm+zsDAu2Eq13ZmxnNZB3ChtZb/SmTM/CaC20Q3Ok GgQlGNzZzSGE930d+jALOkAOKDnWaNfM5ZZEm4kHTkhtLozMjP5Sly8IeefzLwvDqmno Za/Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=fhzEMz1W0yvZujMeDMjM9s76yucwBB8AcmuixQbXpf0=; b=NlRE6HWaQxmVLzLwwvo65h2Uk8fgLgyKGMakOirJjTrPFAxmnlOeAwIypE8bYDSrlk fPDZtSSOg0gtqFHRkc1qk6dHpgtkEJgXhK3/wQrbBh9fsDllfcauIe4S3zUNfeBjCM0z OaoT/S10bZa0JI7mtyiw+hh6W00JhwwjmWBfJNGIyoO352xq7fEcMvypWt0xDbOEWLqa bJAGdJI82nnoxA3ZrFcw7fKx7jDvIukksFMrmCR694ygMc+iFLKAfNw5LBuH0WhpAsFx Y/Q7KWCGaY052g+kOwjXXIDM1X6hDpbhxyLn/kclVKJ5SeMLyKh2NCRQgznDIM22/+W5 esTg== X-Gm-Message-State: AGi0PuZs8HA5w7+bOfHg43yK+sKJtRJv76aIAMqSlpdU+INhZ+QuCoQP H3ICMpB8pgS7wrCEa8prBAY= X-Google-Smtp-Source: APiQypKOhUcpBIU2kEwgJ0nm0YfI9QoHNlhT9PRFazQomnd5dBjYzQkptqyGNIhxdcKOFbiwA7j7dA== X-Received: by 2002:a65:55c1:: with SMTP id k1mr6784467pgs.1.1585903971236; Fri, 03 Apr 2020 01:52:51 -0700 (PDT) Received: from hyd1588t430.marvell.com ([115.113.156.2]) by smtp.gmail.com with ESMTPSA id s9sm5267899pjr.5.2020.04.03.01.52.49 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 03 Apr 2020 01:52:50 -0700 (PDT) From: Nithin Dabilpuram To: Jerin Jacob , Nithin Dabilpuram , Kiran Kumar K Cc: dev@dpdk.org, kkanas@marvell.com Date: Fri, 3 Apr 2020 14:22:13 +0530 Message-Id: <20200403085216.32684-9-nithind1988@gmail.com> X-Mailer: git-send-email 2.8.4 In-Reply-To: <20200403085216.32684-1-nithind1988@gmail.com> References: <20200312111907.31555-1-ndabilpuram@marvell.com> <20200403085216.32684-1-nithind1988@gmail.com> Subject: [dpdk-dev] [PATCH v3 08/11] net/octeontx2: add tm dynamic topology update cb X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Nithin Dabilpuram Add dynamic parent and shaper update callbacks that can be used to change RR Quantum or PIR/CIR rate dynamically post hierarchy commit. Dynamic parent update callback only supports updating RR quantum of a given child with respect to its parent. There is no support yet to change priority or parent itself. Signed-off-by: Nithin Dabilpuram Signed-off-by: Krzysztof Kanas --- drivers/net/octeontx2/otx2_tm.c | 190 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 190 insertions(+) diff --git a/drivers/net/octeontx2/otx2_tm.c b/drivers/net/octeontx2/otx2_tm.c index 68771d1..d8e54ee 100644 --- a/drivers/net/octeontx2/otx2_tm.c +++ b/drivers/net/octeontx2/otx2_tm.c @@ -2238,6 +2238,194 @@ otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev, } static int +otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, + uint32_t node_id, + uint32_t profile_id, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile *profile = NULL; + struct otx2_mbox *mbox = dev->mbox; + struct otx2_nix_tm_node *tm_node; + struct nix_txschq_config *req; + uint8_t k; + int rc; + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node"; + return -EINVAL; + } + + if (profile_id == tm_node->params.shaper_profile_id) + return 0; + + if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + profile = nix_tm_shaper_profile_search(dev, profile_id); + if (!profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile ID not exist"; + return -EINVAL; + } + } + + tm_node->params.shaper_profile_id = profile_id; + + /* Nothing to do if not yet committed */ + if (!(dev->tm_flags & NIX_TM_COMMITTED)) + return 0; + + tm_node->flags &= ~NIX_TM_NODE_ENABLED; + + /* Flush the specific node with SW_XOFF */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = tm_node->hw_lvl; + k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval); + req->num_regs = k; + + rc = send_tm_reqval(mbox, req, error); + if (rc) + return rc; + + /* Update the PIR/CIR and clear SW XOFF */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = tm_node->hw_lvl; + + k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval); + + k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]); + + req->num_regs = k; + rc = send_tm_reqval(mbox, req, error); + if (!rc) + tm_node->flags |= NIX_TM_NODE_ENABLED; + return rc; +} + +static int +otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, + uint32_t node_id, uint32_t new_parent_id, + uint32_t priority, uint32_t weight, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node, *sibling; + struct otx2_nix_tm_node *new_parent; + struct nix_txschq_config *req; + uint8_t k; + int rc; + + if (!(dev->tm_flags & NIX_TM_COMMITTED)) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy doesn't exist"; + return -EINVAL; + } + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* Parent id valid only for non root nodes */ + if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) { + new_parent = nix_tm_node_search(dev, new_parent_id, true); + if (!new_parent) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "no such parent node"; + return -EINVAL; + } + + /* Current support is only for dynamic weight update */ + if (tm_node->parent != new_parent || + tm_node->priority != priority) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "only weight update supported"; + return -EINVAL; + } + } + + /* Skip if no change */ + if (tm_node->weight == weight) + return 0; + + tm_node->weight = weight; + + /* For leaf nodes, SQ CTX needs update */ + if (nix_tm_is_leaf(dev, tm_node->lvl)) { + /* Update SQ quantum data on the fly */ + rc = nix_sq_sched_data(dev, tm_node, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "sq sched data update failed"; + return rc; + } + } else { + /* XOFF Parent node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->parent->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XOFF this node and all other siblings */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + + k = 0; + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + k += prepare_tm_sw_xoff(sibling, true, &req->reg[k], + &req->regval[k]); + } + req->num_regs = k; + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* Update new weight for current node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + req->num_regs = prepare_tm_sched_reg(dev, tm_node, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XON this node and all other siblings */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + + k = 0; + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + k += prepare_tm_sw_xoff(sibling, false, &req->reg[k], + &req->regval[k]); + } + req->num_regs = k; + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XON Parent node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->parent->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + } + return 0; +} + +static int otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id, struct rte_tm_node_stats *stats, uint64_t *stats_mask, int clear, @@ -2334,6 +2522,8 @@ const struct rte_tm_ops otx2_tm_ops = { .node_resume = otx2_nix_tm_node_resume, .hierarchy_commit = otx2_nix_tm_hierarchy_commit, + .node_shaper_update = otx2_nix_tm_node_shaper_update, + .node_parent_update = otx2_nix_tm_node_parent_update, .node_stats_read = otx2_nix_tm_node_stats_read, }; -- 2.8.4