From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dispatch1-us1.ppe-hosted.com (dispatch1-us1.ppe-hosted.com [67.231.154.164]) by dpdk.org (Postfix) with ESMTP id F403C1B341 for ; Tue, 26 Dec 2017 08:28:02 +0100 (CET) X-Virus-Scanned: Proofpoint Essentials engine Received: from webmail.solarflare.com (webmail.solarflare.com [12.187.104.26]) (using TLSv1 with cipher ECDHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1-us4.ppe-hosted.com (Proofpoint Essentials ESMTP Server) with ESMTPS id EF4B8B4005A for ; Tue, 26 Dec 2017 07:28:01 +0000 (UTC) Received: from ocex03.SolarFlarecom.com (10.20.40.36) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1044.25; Mon, 25 Dec 2017 23:27:58 -0800 Received: from opal.uk.solarflarecom.com (10.17.10.1) by ocex03.SolarFlarecom.com (10.20.40.36) with Microsoft SMTP Server (TLS) id 15.0.1044.25 via Frontend Transport; Mon, 25 Dec 2017 23:27:58 -0800 Received: from uklogin.uk.solarflarecom.com (uklogin.uk.solarflarecom.com [10.17.10.10]) by opal.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id vBQ7Rv81015586 for ; Tue, 26 Dec 2017 07:27:57 GMT Received: from uklogin.uk.solarflarecom.com (localhost.localdomain [127.0.0.1]) by uklogin.uk.solarflarecom.com (8.13.8/8.13.8) with ESMTP id vBQ7RuGQ019696 for ; Tue, 26 Dec 2017 07:27:57 GMT From: Andrew Rybchenko To: Date: Tue, 26 Dec 2017 07:27:51 +0000 Message-ID: <1514273271-19604-7-git-send-email-arybchenko@solarflare.com> X-Mailer: git-send-email 1.8.2.3 In-Reply-To: <1514273271-19604-1-git-send-email-arybchenko@solarflare.com> References: <1514273271-19604-1-git-send-email-arybchenko@solarflare.com> MIME-Version: 1.0 Content-Type: text/plain X-MDID: 1514273282-GnSWvij18Tz6 Subject: [dpdk-dev] [PATCH 6/6] net/sfc: support more options for a number of Tx descriptors X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 26 Dec 2017 07:28:03 -0000 The number of Tx descriptors is not used as HW Tx ring size any more. It simply defines maximum fill level. Signed-off-by: Andrew Rybchenko Reviewed-by: Andy Moreton --- drivers/net/sfc/sfc_dp_tx.h | 8 ++++++++ drivers/net/sfc/sfc_ef10_tx.c | 29 ++++++++++++++++++++++++++--- drivers/net/sfc/sfc_ethdev.c | 3 +++ drivers/net/sfc/sfc_tx.c | 5 ++++- 4 files changed, 41 insertions(+), 4 deletions(-) diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h index 4485b2f..a384a53 100644 --- a/drivers/net/sfc/sfc_dp_tx.h +++ b/drivers/net/sfc/sfc_dp_tx.h @@ -80,6 +80,13 @@ struct sfc_dp_tx_qcreate_info { }; /** + * Get Tx datapath specific device info. + * + * @param dev_info Device info to be adjusted + */ +typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); + +/** * Get size of transmit and event queue rings by the number of Tx * descriptors. * @@ -162,6 +169,7 @@ struct sfc_dp_tx { #define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8 #define SFC_DP_TX_FEAT_MULTI_POOL 0x10 #define SFC_DP_TX_FEAT_REFCNT 0x20 + sfc_dp_tx_get_dev_info_t *get_dev_info; sfc_dp_tx_qsize_up_rings_t *qsize_up_rings; sfc_dp_tx_qcreate_t *qcreate; sfc_dp_tx_qdestroy_t *qdestroy; diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c index 99fe87e..02df39c 100644 --- a/drivers/net/sfc/sfc_ef10_tx.c +++ b/drivers/net/sfc/sfc_ef10_tx.c @@ -481,6 +481,17 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return pktp - &tx_pkts[0]; } +static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info; +static void +sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info) +{ + /* + * Number of descriptors just defines maximum number of pushed + * descriptors (fill level). + */ + dev_info->tx_desc_lim.nb_min = 1; + dev_info->tx_desc_lim.nb_align = 1; +} static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings; static int @@ -489,9 +500,19 @@ sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc, unsigned int *evq_entries, unsigned int *txq_max_fill_level) { - *txq_entries = nb_tx_desc; - *evq_entries = nb_tx_desc; - *txq_max_fill_level = SFC_EF10_TXQ_LIMIT(*txq_entries); + /* + * rte_ethdev API guarantees that the number meets min, max and + * alignment requirements. + */ + if (nb_tx_desc <= EFX_TXQ_MINNDESCS) + *txq_entries = EFX_TXQ_MINNDESCS; + else + *txq_entries = rte_align32pow2(nb_tx_desc); + + *evq_entries = *txq_entries; + + *txq_max_fill_level = RTE_MIN(nb_tx_desc, + SFC_EF10_TXQ_LIMIT(*evq_entries)); return 0; } @@ -637,6 +658,7 @@ struct sfc_dp_tx sfc_ef10_tx = { SFC_DP_TX_FEAT_MULTI_POOL | SFC_DP_TX_FEAT_REFCNT | SFC_DP_TX_FEAT_MULTI_PROCESS, + .get_dev_info = sfc_ef10_get_dev_info, .qsize_up_rings = sfc_ef10_tx_qsize_up_rings, .qcreate = sfc_ef10_tx_qcreate, .qdestroy = sfc_ef10_tx_qdestroy, @@ -654,6 +676,7 @@ struct sfc_dp_tx sfc_ef10_simple_tx = { .type = SFC_DP_TX, }, .features = SFC_DP_TX_FEAT_MULTI_PROCESS, + .get_dev_info = sfc_ef10_get_dev_info, .qsize_up_rings = sfc_ef10_tx_qsize_up_rings, .qcreate = sfc_ef10_tx_qcreate, .qdestroy = sfc_ef10_tx_qdestroy, diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c index 6ff4595..7d75f55 100644 --- a/drivers/net/sfc/sfc_ethdev.c +++ b/drivers/net/sfc/sfc_ethdev.c @@ -171,6 +171,7 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) */ dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS; + /* Initialize to hardware limits */ dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS; /* @@ -181,6 +182,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) if (sa->dp_rx->get_dev_info != NULL) sa->dp_rx->get_dev_info(dev_info); + if (sa->dp_tx->get_dev_info != NULL) + sa->dp_tx->get_dev_info(dev_info); } static const uint32_t * diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index f8ee976..f3f447b 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -160,6 +160,10 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, &txq_max_fill_level); if (rc != 0) goto fail_size_up_rings; + SFC_ASSERT(txq_entries >= EFX_TXQ_MINNDESCS); + SFC_ASSERT(txq_entries <= sa->txq_max_entries); + SFC_ASSERT(txq_entries >= nb_tx_desc); + SFC_ASSERT(txq_max_fill_level <= nb_tx_desc); rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf); if (rc != 0) @@ -168,7 +172,6 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(sw_index < sa->txq_count); txq_info = &sa->txq_info[sw_index]; - SFC_ASSERT(txq_entries <= sa->txq_max_entries); txq_info->entries = txq_entries; rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index, -- 2.7.4