From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 4F2878DA5 for ; Tue, 27 Oct 2015 13:52:27 +0100 (CET) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga103.fm.intel.com with ESMTP; 27 Oct 2015 05:52:26 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,205,1444719600"; d="scan'208";a="836094346" Received: from irvmail001.ir.intel.com ([163.33.26.43]) by orsmga002.jf.intel.com with ESMTP; 27 Oct 2015 05:52:19 -0700 Received: from sivswdev02.ir.intel.com (sivswdev02.ir.intel.com [10.237.217.46]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id t9RCqI3g028734; Tue, 27 Oct 2015 12:52:18 GMT Received: from sivswdev02.ir.intel.com (localhost [127.0.0.1]) by sivswdev02.ir.intel.com with ESMTP id t9RCqI5w020573; Tue, 27 Oct 2015 12:52:18 GMT Received: (from kananye1@localhost) by sivswdev02.ir.intel.com with id t9RCqI77020569; Tue, 27 Oct 2015 12:52:18 GMT From: Konstantin Ananyev To: dev@dpdk.org Date: Tue, 27 Oct 2015 12:51:45 +0000 Message-Id: <1445950311-20497-4-git-send-email-konstantin.ananyev@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1445950311-20497-1-git-send-email-konstantin.ananyev@intel.com> References: <1445950311-20497-1-git-send-email-konstantin.ananyev@intel.com> In-Reply-To: <1445515592-25920-2-git-send-email-konstantin.ananyev@intel.com> References: <1445515592-25920-2-git-send-email-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [PATCHv7 3/9] ixgbe: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 27 Oct 2015 12:52:28 -0000 Signed-off-by: Konstantin Ananyev --- drivers/net/ixgbe/ixgbe_ethdev.c | 23 ++++++++++++++ drivers/net/ixgbe/ixgbe_ethdev.h | 6 ++++ drivers/net/ixgbe/ixgbe_rxtx.c | 68 +++++++++++++++++++++++++--------------- drivers/net/ixgbe/ixgbe_rxtx.h | 21 +++++++++++++ 4 files changed, 93 insertions(+), 25 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index ec2918c..4769bb0 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -386,6 +386,18 @@ static const struct rte_pci_id pci_id_ixgbevf_map[] = { }; +static const struct rte_eth_desc_lim rx_desc_lim = { + .nb_max = IXGBE_MAX_RING_DESC, + .nb_min = IXGBE_MIN_RING_DESC, + .nb_align = IXGBE_RXD_ALIGN, +}; + +static const struct rte_eth_desc_lim tx_desc_lim = { + .nb_max = IXGBE_MAX_RING_DESC, + .nb_min = IXGBE_MIN_RING_DESC, + .nb_align = IXGBE_TXD_ALIGN, +}; + static const struct eth_dev_ops ixgbe_eth_dev_ops = { .dev_configure = ixgbe_dev_configure, .dev_start = ixgbe_dev_start, @@ -456,6 +468,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .rss_hash_conf_get = ixgbe_dev_rss_hash_conf_get, .filter_ctrl = ixgbe_dev_filter_ctrl, .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, + .rxq_info_get = ixgbe_rxq_info_get, + .txq_info_get = ixgbe_txq_info_get, .timesync_enable = ixgbe_timesync_enable, .timesync_disable = ixgbe_timesync_disable, .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, @@ -494,6 +508,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .mac_addr_add = ixgbevf_add_mac_addr, .mac_addr_remove = ixgbevf_remove_mac_addr, .set_mc_addr_list = ixgbe_dev_set_mc_addr_list, + .rxq_info_get = ixgbe_rxq_info_get, + .txq_info_get = ixgbe_txq_info_get, .mac_addr_set = ixgbevf_set_default_mac_addr, .get_reg_length = ixgbevf_get_reg_length, .get_reg = ixgbevf_get_regs, @@ -2396,6 +2412,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS, }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; + dev_info->hash_key_size = IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t); dev_info->reta_size = ETH_RSS_RETA_SIZE_128; dev_info->flow_type_rss_offloads = IXGBE_RSS_OFFLOAD_ALL; @@ -2449,6 +2469,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev, .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS, }; + + dev_info->rx_desc_lim = rx_desc_lim; + dev_info->tx_desc_lim = tx_desc_lim; } /* return 0 means link status changed, -1 means not changed */ diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index c3d4f4f..d16f476 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -351,6 +351,12 @@ int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); +void ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + int ixgbevf_dev_rx_init(struct rte_eth_dev *dev); void ixgbevf_dev_tx_init(struct rte_eth_dev *dev); diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index a598a72..ba08588 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -1821,25 +1821,6 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, **********************************************************************/ /* - * Rings setup and release. - * - * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be - * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will - * also optimize cache line size effect. H/W supports up to cache line size 128. - */ -#define IXGBE_ALIGN 128 - -/* - * Maximum number of Ring Descriptors. - * - * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring - * descriptors should meet the following condition: - * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0 - */ -#define IXGBE_MIN_RING_DESC 32 -#define IXGBE_MAX_RING_DESC 4096 - -/* * Create memzone for HW rings. malloc can't be used as the physical address is * needed. If the memzone is already created, then this function returns a ptr * to the old one. @@ -2007,9 +1988,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, * It must not exceed hardware maximum, and must be multiple * of IXGBE_ALIGN. */ - if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 || - (nb_desc > IXGBE_MAX_RING_DESC) || - (nb_desc < IXGBE_MIN_RING_DESC)) { + if (nb_desc % IXGBE_TXD_ALIGN != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { return -EINVAL; } @@ -2374,9 +2355,9 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, * It must not exceed hardware maximum, and must be multiple * of IXGBE_ALIGN. */ - if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 || - (nb_desc > IXGBE_MAX_RING_DESC) || - (nb_desc < IXGBE_MIN_RING_DESC)) { + if (nb_desc % IXGBE_RXD_ALIGN != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { return (-EINVAL); } @@ -4649,6 +4630,43 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) return 0; } +void +ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct ixgbe_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +void +ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct ixgbe_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_thresh.pthresh = txq->pthresh; + qinfo->conf.tx_thresh.hthresh = txq->hthresh; + qinfo->conf.tx_thresh.wthresh = txq->wthresh; + + qinfo->conf.tx_free_thresh = txq->tx_free_thresh; + qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.txq_flags = txq->txq_flags; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + /* * [VF] Initializes Receive Unit. */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index b9eca67..475a800 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -34,6 +34,27 @@ #ifndef _IXGBE_RXTX_H_ #define _IXGBE_RXTX_H_ +/* + * Rings setup and release. + * + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will + * also optimize cache line size effect. H/W supports up to cache line size 128. + */ +#define IXGBE_ALIGN 128 + +#define IXGBE_RXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_rx_desc)) +#define IXGBE_TXD_ALIGN (IXGBE_ALIGN / sizeof(union ixgbe_adv_tx_desc)) + +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring + * descriptors should meet the following condition: + * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0 + */ +#define IXGBE_MIN_RING_DESC 32 +#define IXGBE_MAX_RING_DESC 4096 #define RTE_PMD_IXGBE_TX_MAX_BURST 32 #define RTE_PMD_IXGBE_RX_MAX_BURST 32 -- 1.8.5.3