From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 715EAA0562; Thu, 2 Apr 2020 21:36:37 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 7ACB31C0AD; Thu, 2 Apr 2020 21:35:36 +0200 (CEST) Received: from mail-pj1-f67.google.com (mail-pj1-f67.google.com [209.85.216.67]) by dpdk.org (Postfix) with ESMTP id 4BEA91C08C for ; Thu, 2 Apr 2020 21:35:35 +0200 (CEST) Received: by mail-pj1-f67.google.com with SMTP id np9so1942037pjb.4 for ; Thu, 02 Apr 2020 12:35:35 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=Vv755H2N9R2LmvcCTyIaAK8p6qyc8/GzvlrnQuR1sbI=; b=ubjjkwngDX0s79nuDwmJXSTEwTcTAc6kFLfiGnvTuv0r5kLMiJ9buS93Rv8diH+FQh 6tgulZnJE7Znerez2nOA7wI0PP2hXBxOOIYWdAHQpCdMc/xR5xXiXgodl35F/VOEIkOU 4FJwUb6oJrLX0C2sRpoA44hQT/Z6bANXulE4A30BDOZDTmTdOfRlG4QJXeqeU92tB/aC lbVz1J+OWF31P31Lsd6hGpQHEwAXoua9Xvh+RD3UB2lI6/ZCMXVjs/+kS4mVwwbyKcm1 jxT79pGusZMCsFI3PUqaAYLUOkPD1kJJkF47d6UzQQyyqoUjf3PE9RrtHIqTXHwJlZeX gX9w== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=Vv755H2N9R2LmvcCTyIaAK8p6qyc8/GzvlrnQuR1sbI=; b=cjAyPioLCbbze2gpnUhu+9EhHGzBojEzbIhgd3oDbAdqWTu+WYvuKaVcyiv19Uc5gu 6H4gEvysd/VQ7x34LFf7d1NcYGVjmTJFDRkOFmPk/xcoyi2eZxpiRS3LYJGVu/WYIxbP cVQUAzjljbnbfcnb8mKBbnjde2uR/p943gWLa5ep8/Y7VWit6IAihm5oeriflL2BdZMj T0IidonJzNpVs48GY77ltUHwb5ivSd0rHrIhZ1tLXARzYfhCkcT59oMrrzSPTpENN23L Jrq4R83Wb2zotW8/S2EJtG6Yr80Y4UnAyGvEFJD4hiGssy3auSyMTAb+lwPL9xpV1R4H 4HEw== X-Gm-Message-State: AGi0PuYEMrrO77uEaaSCYoG1bZn5jxxPcGFdO9Cqvukwjk5gM+mT65VY kZ7rh/KXiXce9Xhi/CgsNl8= X-Google-Smtp-Source: APiQypKlHuRrTeRJjFpuhNf6OhKf7v26AdzcobJEEcBpdHI2iy5gN6l410XNfQDf8UjbmTFRJ2AxBg== X-Received: by 2002:a17:902:7581:: with SMTP id j1mr4557731pll.171.1585856134434; Thu, 02 Apr 2020 12:35:34 -0700 (PDT) Received: from hyd1588t430.marvell.com ([115.113.156.2]) by smtp.gmail.com with ESMTPSA id k70sm3898259pga.91.2020.04.02.12.35.32 (version=TLS1_2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Thu, 02 Apr 2020 12:35:34 -0700 (PDT) From: Nithin Dabilpuram To: Jerin Jacob , Nithin Dabilpuram , Kiran Kumar K Cc: dev@dpdk.org, kkanas@marvell.com Date: Fri, 3 Apr 2020 01:04:50 +0530 Message-Id: <20200402193453.17266-9-nithind1988@gmail.com> X-Mailer: git-send-email 2.8.4 In-Reply-To: <20200402193453.17266-1-nithind1988@gmail.com> References: <20200312111907.31555-1-ndabilpuram@marvell.com> <20200402193453.17266-1-nithind1988@gmail.com> Subject: [dpdk-dev] [PATCH v2 08/11] net/octeontx2: add tm dynamic topology update cb X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Nithin Dabilpuram Add dynamic parent and shaper update callbacks that can be used to change RR Quantum or PIR/CIR rate dynamically post hierarchy commit. Dynamic parent update callback only supports updating RR quantum of a given child with respect to its parent. There is no support yet to change priority or parent itself. Signed-off-by: Nithin Dabilpuram Signed-off-by: Krzysztof Kanas --- drivers/net/octeontx2/otx2_tm.c | 190 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 190 insertions(+) diff --git a/drivers/net/octeontx2/otx2_tm.c b/drivers/net/octeontx2/otx2_tm.c index 8230b5e..5a5ba5e 100644 --- a/drivers/net/octeontx2/otx2_tm.c +++ b/drivers/net/octeontx2/otx2_tm.c @@ -2238,6 +2238,194 @@ otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev, } static int +otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, + uint32_t node_id, + uint32_t profile_id, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_shaper_profile *profile = NULL; + struct otx2_mbox *mbox = dev->mbox; + struct otx2_nix_tm_node *tm_node; + struct nix_txschq_config *req; + uint8_t k; + int rc; + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "invalid node"; + return -EINVAL; + } + + if (profile_id == tm_node->params.shaper_profile_id) + return 0; + + if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) { + profile = nix_tm_shaper_profile_search(dev, profile_id); + if (!profile) { + error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID; + error->message = "shaper profile ID not exist"; + return -EINVAL; + } + } + + tm_node->params.shaper_profile_id = profile_id; + + /* Nothing to do if not yet committed */ + if (!(dev->tm_flags & NIX_TM_COMMITTED)) + return 0; + + tm_node->flags &= ~NIX_TM_NODE_ENABLED; + + /* Flush the specific node with SW_XOFF */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = tm_node->hw_lvl; + k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval); + req->num_regs = k; + + rc = send_tm_reqval(mbox, req, error); + if (rc) + return rc; + + /* Update the PIR/CIR and clear SW XOFF */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox); + req->lvl = tm_node->hw_lvl; + + k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval); + + k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]); + + req->num_regs = k; + rc = send_tm_reqval(mbox, req, error); + if (!rc) + tm_node->flags |= NIX_TM_NODE_ENABLED; + return rc; +} + +static int +otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, + uint32_t node_id, uint32_t new_parent_id, + uint32_t priority, uint32_t weight, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node, *sibling; + struct otx2_nix_tm_node *new_parent; + struct nix_txschq_config *req; + uint8_t k; + int rc; + + if (!(dev->tm_flags & NIX_TM_COMMITTED)) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy doesn't exist"; + return -EINVAL; + } + + tm_node = nix_tm_node_search(dev, node_id, true); + if (!tm_node) { + error->type = RTE_TM_ERROR_TYPE_NODE_ID; + error->message = "no such node"; + return -EINVAL; + } + + /* Parent id valid only for non root nodes */ + if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) { + new_parent = nix_tm_node_search(dev, new_parent_id, true); + if (!new_parent) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "no such parent node"; + return -EINVAL; + } + + /* Current support is only for dynamic weight update */ + if (tm_node->parent != new_parent || + tm_node->priority != priority) { + error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID; + error->message = "only weight update supported"; + return -EINVAL; + } + } + + /* Skip if no change */ + if (tm_node->weight == weight) + return 0; + + tm_node->weight = weight; + + /* For leaf nodes, SQ CTX needs update */ + if (nix_tm_is_leaf(dev, tm_node->lvl)) { + /* Update SQ quantum data on the fly */ + rc = nix_sq_sched_data(dev, tm_node, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "sq sched data update failed"; + return rc; + } + } else { + /* XOFF Parent node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->parent->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XOFF this node and all other siblings */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + + k = 0; + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + k += prepare_tm_sw_xoff(sibling, true, &req->reg[k], + &req->regval[k]); + } + req->num_regs = k; + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* Update new weight for current node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + req->num_regs = prepare_tm_sched_reg(dev, tm_node, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XON this node and all other siblings */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->hw_lvl; + + k = 0; + TAILQ_FOREACH(sibling, &dev->node_list, node) { + if (sibling->parent != tm_node->parent) + continue; + k += prepare_tm_sw_xoff(sibling, false, &req->reg[k], + &req->regval[k]); + } + req->num_regs = k; + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + + /* XON Parent node */ + req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox); + req->lvl = tm_node->parent->hw_lvl; + req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false, + req->reg, req->regval); + rc = send_tm_reqval(dev->mbox, req, error); + if (rc) + return rc; + } + return 0; +} + +static int otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id, struct rte_tm_node_stats *stats, uint64_t *stats_mask, int clear, @@ -2334,6 +2522,8 @@ const struct rte_tm_ops otx2_tm_ops = { .node_resume = otx2_nix_tm_node_resume, .hierarchy_commit = otx2_nix_tm_hierarchy_commit, + .node_shaper_update = otx2_nix_tm_node_shaper_update, + .node_parent_update = otx2_nix_tm_node_parent_update, .node_stats_read = otx2_nix_tm_node_stats_read, }; -- 2.8.4