From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2F39DA056B; Thu, 12 Mar 2020 12:20:58 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 9A7331C0B3; Thu, 12 Mar 2020 12:19:32 +0100 (CET) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by dpdk.org (Postfix) with ESMTP id 614451C0B1 for ; Thu, 12 Mar 2020 12:19:31 +0100 (CET) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id 02CBFWvk017671 for ; Thu, 12 Mar 2020 04:19:30 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-type; s=pfpt0818; bh=eqVNf4GIm+Z/xQ2i2DK0WDfe4cacyo7pGQY54+ybW20=; b=El7xGXrbhWctEkBvjVTSjIue+k3+PvcyIWoKjQEmdQksL0HHtjp5hW3dcForpw61iJrL xHCSD7k9h5BZZnOm0jjkDyeOz4aB37PHojOvJPsQpjbZIQKGlleCOHLIM+TM3s5IyE+P RU8oC81F5erhAGgjLYclIPyUgLmRbHzDMLdVkjpJxfPRERt/8dSyHfQVSMluCq6JVcur hmo4OF/qRvjQY89S4VIvw0yLfcF43TbDLEa28ZTSTuLS97GpYjNmpSiP46g0InYrxXRt spt2eppwdTzBa05PfaWumbVIag51rUxnympLNh/IsgBVo9Sb8cnxoYSWrLR2R55k10Gf Uw== Received: from sc-exch01.marvell.com ([199.233.58.181]) by mx0b-0016f401.pphosted.com with ESMTP id 2yqfggs6f1-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT) for ; Thu, 12 Mar 2020 04:19:30 -0700 Received: from SC-EXCH02.marvell.com (10.93.176.82) by SC-EXCH01.marvell.com (10.93.176.81) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Thu, 12 Mar 2020 04:19:28 -0700 Received: from maili.marvell.com (10.93.176.43) by SC-EXCH02.marvell.com (10.93.176.82) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Thu, 12 Mar 2020 04:19:28 -0700 Received: from hyd1588t430.marvell.com (unknown [10.29.52.204]) by maili.marvell.com (Postfix) with ESMTP id 1C6B63F703F; Thu, 12 Mar 2020 04:19:26 -0700 (PDT) From: Nithin Dabilpuram To: Jerin Jacob , Nithin Dabilpuram , Kiran Kumar K CC: Krzysztof Kanas , Date: Thu, 12 Mar 2020 16:49:02 +0530 Message-ID: <20200312111907.31555-7-ndabilpuram@marvell.com> X-Mailer: git-send-email 2.8.4 In-Reply-To: <20200312111907.31555-1-ndabilpuram@marvell.com> References: <20200312111907.31555-1-ndabilpuram@marvell.com> MIME-Version: 1.0 Content-Type: text/plain X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:6.0.138, 18.0.572 definitions=2020-03-12_03:2020-03-11, 2020-03-12 signatures=0 Subject: [dpdk-dev] [PATCH 06/11] net/octeontx2: add tm hierarchy commit callback X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add TM hierarchy commit callback to support enabling newly created topology. Signed-off-by: Nithin Dabilpuram Signed-off-by: Krzysztof Kanas --- drivers/net/octeontx2/otx2_tm.c | 170 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 170 insertions(+) diff --git a/drivers/net/octeontx2/otx2_tm.c b/drivers/net/octeontx2/otx2_tm.c index 175d1d5..ae779a5 100644 --- a/drivers/net/octeontx2/otx2_tm.c +++ b/drivers/net/octeontx2/otx2_tm.c @@ -1668,6 +1668,101 @@ validate_prio(struct otx2_eth_dev *dev, uint32_t lvl, } static int +nix_xmit_disable(struct rte_eth_dev *eth_dev) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + uint16_t sq_cnt = eth_dev->data->nb_tx_queues; + uint16_t sqb_cnt, head_off, tail_off; + struct otx2_nix_tm_node *tm_node; + struct otx2_eth_txq *txq; + uint64_t wdata, val; + int i, rc; + + otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name); + + /* Enable CGX RXTX to drain pkts */ + if (!eth_dev->data->dev_started) { + rc = otx2_cgx_rxtx_start(dev); + if (rc) + return rc; + } + + /* XON all SMQ's */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ) + continue; + if (!(tm_node->flags & NIX_TM_NODE_HWRES)) + continue; + + rc = nix_smq_xoff(dev, tm_node, false); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->hw_id, rc); + goto cleanup; + } + } + + /* Flush all tx queues */ + for (i = 0; i < sq_cnt; i++) { + txq = eth_dev->data->tx_queues[i]; + + rc = otx2_nix_sq_sqb_aura_fc(txq, false); + if (rc) { + otx2_err("Failed to disable sqb aura fc, rc=%d", rc); + goto cleanup; + } + + /* Wait for sq entries to be flushed */ + rc = nix_txq_flush_sq_spin(txq); + if (rc) { + otx2_err("Failed to drain sq, rc=%d\n", rc); + goto cleanup; + } + } + + /* XOFF & Flush all SMQ's. HRM mandates + * all SQ's empty before SMQ flush is issued. + */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ) + continue; + if (!(tm_node->flags & NIX_TM_NODE_HWRES)) + continue; + + rc = nix_smq_xoff(dev, tm_node, true); + if (rc) { + otx2_err("Failed to enable smq %u, rc=%d", + tm_node->hw_id, rc); + goto cleanup; + } + } + + /* Verify sanity of all tx queues */ + for (i = 0; i < sq_cnt; i++) { + txq = eth_dev->data->tx_queues[i]; + + wdata = ((uint64_t)txq->sq << 32); + val = otx2_atomic64_add_nosync(wdata, + (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS)); + + sqb_cnt = val & 0xFFFF; + head_off = (val >> 20) & 0x3F; + tail_off = (val >> 28) & 0x3F; + + if (sqb_cnt > 1 || head_off != tail_off || + (*txq->fc_mem != txq->nb_sqb_bufs)) + otx2_err("Failed to gracefully flush sq %u", txq->sq); + } + +cleanup: + /* restore cgx state */ + if (!eth_dev->data->dev_started) + rc |= otx2_cgx_rxtx_stop(dev); + + return rc; +} + +static int nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id, uint32_t parent_node_id, uint32_t priority, uint32_t weight, uint32_t lvl, @@ -1879,11 +1974,86 @@ nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id, return nix_tm_node_suspend_resume(eth_dev, node_id, error, false); } +static int +nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + struct otx2_nix_tm_node *tm_node; + uint32_t leaf_cnt = 0; + int rc; + + if (dev->tm_flags & NIX_TM_COMMITTED) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "hierarchy exists"; + return -EINVAL; + } + + /* Check if we have all the leaf nodes */ + TAILQ_FOREACH(tm_node, &dev->node_list, node) { + if (tm_node->flags & NIX_TM_NODE_USER && + tm_node->id < dev->tm_leaf_cnt) + leaf_cnt++; + } + + if (leaf_cnt != dev->tm_leaf_cnt) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "incomplete hierarchy"; + return -EINVAL; + } + + /* + * Disable xmit will be enabled when + * new topology is available. + */ + rc = nix_xmit_disable(eth_dev); + if (rc) { + otx2_err("failed to disable TX, rc=%d", rc); + return -EIO; + } + + /* Delete default/ratelimit tree */ + if (dev->tm_flags & (NIX_TM_DEFAULT_TREE)) { + rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "failed to free default resources"; + return rc; + } + dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE); + } + + /* Free up user alloc'ed resources */ + rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, + NIX_TM_NODE_USER, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "failed to free user resources"; + return rc; + } + + rc = nix_tm_alloc_resources(eth_dev, true); + if (rc) { + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED; + error->message = "alloc resources failed"; + /* TODO should we restore default config ? */ + if (clear_on_fail) + nix_tm_free_resources(dev, 0, 0, false); + return rc; + } + + error->type = RTE_TM_ERROR_TYPE_NONE; + dev->tm_flags |= NIX_TM_COMMITTED; + return 0; +} + const struct rte_tm_ops otx2_tm_ops = { .node_add = nix_tm_node_add, .node_delete = nix_tm_node_delete, .node_suspend = nix_tm_node_suspend, .node_resume = nix_tm_node_resume, + .hierarchy_commit = nix_tm_hierarchy_commit, }; static int -- 2.8.4