DPDK patches and discussions
 help / color / mirror / Atom feed
From: Konstantin Ananyev <konstantin.ananyev@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCHv6 4/9] e1000: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim
Date: Thu, 22 Oct 2015 13:06:27 +0100	[thread overview]
Message-ID: <1445515592-25920-5-git-send-email-konstantin.ananyev@intel.com> (raw)
In-Reply-To: <1445515592-25920-1-git-send-email-konstantin.ananyev@intel.com>

Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
---
 drivers/net/e1000/e1000_ethdev.h | 36 ++++++++++++++++++++
 drivers/net/e1000/em_ethdev.c    | 14 ++++++++
 drivers/net/e1000/em_rxtx.c      | 71 +++++++++++++++++++++++-----------------
 drivers/net/e1000/igb_ethdev.c   | 22 +++++++++++++
 drivers/net/e1000/igb_rxtx.c     | 66 ++++++++++++++++++++++++-------------
 5 files changed, 156 insertions(+), 53 deletions(-)

diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 4e69e44..3c6f613 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -108,6 +108,30 @@
 	ETH_RSS_IPV6_TCP_EX | \
 	ETH_RSS_IPV6_UDP_EX)
 
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * desscriptors should meet the following condition:
+ * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#define	E1000_MIN_RING_DESC	32
+#define	E1000_MAX_RING_DESC	4096
+
+/*
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#define	E1000_ALIGN	128
+
+#define	IGB_RXD_ALIGN	(E1000_ALIGN / sizeof(union e1000_adv_rx_desc))
+#define	IGB_TXD_ALIGN	(E1000_ALIGN / sizeof(union e1000_adv_tx_desc))
+
+#define	EM_RXD_ALIGN	(E1000_ALIGN / sizeof(struct e1000_rx_desc))
+#define	EM_TXD_ALIGN	(E1000_ALIGN / sizeof(struct e1000_data_desc))
+
 /* structure for interrupt relative data */
 struct e1000_interrupt {
 	uint32_t flags;
@@ -307,6 +331,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev);
 
 int igb_pf_host_configure(struct rte_eth_dev *eth_dev);
 
+void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo);
+
+void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo);
+
 /*
  * RX/TX EM function prototypes
  */
@@ -343,6 +373,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 		uint16_t nb_pkts);
 
+void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo);
+
+void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo);
+
 void igb_pf_host_uninit(struct rte_eth_dev *dev);
 
 #endif /* _E1000_ETHDEV_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 912f5dd..0cbc228 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -166,6 +166,8 @@ static const struct eth_dev_ops eth_em_ops = {
 	.mac_addr_add         = eth_em_rar_set,
 	.mac_addr_remove      = eth_em_rar_clear,
 	.set_mc_addr_list     = eth_em_set_mc_addr_list,
+	.rxq_info_get         = em_rxq_info_get,
+	.txq_info_get         = em_txq_info_get,
 };
 
 /**
@@ -933,6 +935,18 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->max_rx_queues = 1;
 	dev_info->max_tx_queues = 1;
+
+	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = E1000_MAX_RING_DESC,
+		.nb_min = E1000_MIN_RING_DESC,
+		.nb_align = EM_RXD_ALIGN,
+	};
+
+	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+		.nb_max = E1000_MAX_RING_DESC,
+		.nb_min = E1000_MIN_RING_DESC,
+		.nb_align = EM_TXD_ALIGN,
+	};
 }
 
 /* return 0 means link status changed, -1 means not changed */
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 3b8776d..03e1bc2 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1081,26 +1081,6 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	return (nb_rx);
 }
 
-/*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define EM_ALIGN 128
-
-/*
- * Maximum number of Ring Descriptors.
- *
- * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * desscriptors should meet the following condition:
- * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
- */
-#define EM_MIN_RING_DESC 32
-#define EM_MAX_RING_DESC 4096
-
 #define	EM_MAX_BUF_SIZE     16384
 #define EM_RCTL_FLXBUF_STEP 1024
 
@@ -1210,11 +1190,11 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	/*
 	 * Validate number of transmit descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
-	 * of EM_ALIGN.
+	 * of E1000_ALIGN.
 	 */
-	if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 ||
-			(nb_desc > EM_MAX_RING_DESC) ||
-			(nb_desc < EM_MIN_RING_DESC)) {
+	if (nb_desc % EM_TXD_ALIGN != 0 ||
+			(nb_desc > E1000_MAX_RING_DESC) ||
+			(nb_desc < E1000_MIN_RING_DESC)) {
 		return -(EINVAL);
 	}
 
@@ -1272,7 +1252,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
 	 * handle the maximum ring size is allocated in order to allow for
 	 * resizing in later calls to the queue setup function.
 	 */
-	tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
+	tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;
 	if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
 			socket_id)) == NULL)
 		return (-ENOMEM);
@@ -1375,11 +1355,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	/*
 	 * Validate number of receive descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
-	 * of EM_ALIGN.
+	 * of E1000_ALIGN.
 	 */
-	if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||
-			(nb_desc > EM_MAX_RING_DESC) ||
-			(nb_desc < EM_MIN_RING_DESC)) {
+	if (nb_desc % EM_RXD_ALIGN != 0 ||
+			(nb_desc > E1000_MAX_RING_DESC) ||
+			(nb_desc < E1000_MIN_RING_DESC)) {
 		return (-EINVAL);
 	}
 
@@ -1399,7 +1379,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
 	}
 
 	/* Allocate RX ring for max possible mumber of hardware descriptors. */
-	rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
+	rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;
 	if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
 			socket_id)) == NULL)
 		return (-ENOMEM);
@@ -1881,3 +1861,34 @@ eth_em_tx_init(struct rte_eth_dev *dev)
 	/* This write will effectively turn on the transmit unit. */
 	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
 }
+
+void
+em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo)
+{
+	struct em_rx_queue *rxq;
+
+	rxq = dev->data->rx_queues[queue_id];
+
+	qinfo->mp = rxq->mb_pool;
+	qinfo->scattered_rx = dev->data->scattered_rx;
+	qinfo->nb_desc = rxq->nb_rx_desc;
+	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+}
+
+void
+em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo)
+{
+	struct em_tx_queue *txq;
+
+	txq = dev->data->tx_queues[queue_id];
+
+	qinfo->nb_desc = txq->nb_tx_desc;
+
+	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+}
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 848ef6e..73c067e 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -281,6 +281,18 @@ static const struct rte_pci_id pci_id_igbvf_map[] = {
 {0},
 };
 
+static const struct rte_eth_desc_lim rx_desc_lim = {
+	.nb_max = E1000_MAX_RING_DESC,
+	.nb_min = E1000_MIN_RING_DESC,
+	.nb_align = IGB_RXD_ALIGN,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+	.nb_max = E1000_MAX_RING_DESC,
+	.nb_min = E1000_MIN_RING_DESC,
+	.nb_align = IGB_RXD_ALIGN,
+};
+
 static const struct eth_dev_ops eth_igb_ops = {
 	.dev_configure        = eth_igb_configure,
 	.dev_start            = eth_igb_start,
@@ -319,6 +331,8 @@ static const struct eth_dev_ops eth_igb_ops = {
 	.rss_hash_conf_get    = eth_igb_rss_hash_conf_get,
 	.filter_ctrl          = eth_igb_filter_ctrl,
 	.set_mc_addr_list     = eth_igb_set_mc_addr_list,
+	.rxq_info_get         = igb_rxq_info_get,
+	.txq_info_get         = igb_txq_info_get,
 	.timesync_enable      = igb_timesync_enable,
 	.timesync_disable     = igb_timesync_disable,
 	.timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
@@ -349,6 +363,8 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
 	.tx_queue_setup       = eth_igb_tx_queue_setup,
 	.tx_queue_release     = eth_igb_tx_queue_release,
 	.set_mc_addr_list     = eth_igb_set_mc_addr_list,
+	.rxq_info_get         = igb_rxq_info_get,
+	.txq_info_get         = igb_txq_info_get,
 	.mac_addr_set         = igbvf_default_mac_addr_set,
 	.get_reg_length       = igbvf_get_reg_length,
 	.get_reg              = igbvf_get_regs,
@@ -1570,6 +1586,9 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.txq_flags = 0,
 	};
+
+	dev_info->rx_desc_lim = rx_desc_lim;
+	dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 static void
@@ -1621,6 +1640,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		},
 		.txq_flags = 0,
 	};
+
+	dev_info->rx_desc_lim = rx_desc_lim;
+	dev_info->tx_desc_lim = tx_desc_lim;
 }
 
 /* return 0 means link status changed, -1 means not changed */
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 19905fd..cca3300 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1148,25 +1148,12 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 }
 
 /*
- * Rings setup and release.
- *
- * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
- * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
- * This will also optimize cache line size effect.
- * H/W supports up to cache line size 128.
- */
-#define IGB_ALIGN 128
-
-/*
  * Maximum number of Ring Descriptors.
  *
  * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
  * desscriptors should meet the following condition:
  *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
  */
-#define IGB_MIN_RING_DESC 32
-#define IGB_MAX_RING_DESC 4096
-
 static const struct rte_memzone *
 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
 		      uint16_t queue_id, uint32_t ring_size, int socket_id)
@@ -1183,10 +1170,10 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
 
 #ifdef RTE_LIBRTE_XEN_DOM0
 	return rte_memzone_reserve_bounded(z_name, ring_size,
-			socket_id, 0, IGB_ALIGN, RTE_PGSIZE_2M);
+			socket_id, 0, E1000_ALIGN, RTE_PGSIZE_2M);
 #else
 	return rte_memzone_reserve_aligned(z_name, ring_size,
-			socket_id, 0, IGB_ALIGN);
+			socket_id, 0, E1000_ALIGN);
 #endif
 }
 
@@ -1282,10 +1269,11 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	/*
 	 * Validate number of transmit descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
-	 * of IGB_ALIGN.
+	 * of E1000_ALIGN.
 	 */
-	if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
-	    (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+	if (nb_desc % IGB_TXD_ALIGN != 0 ||
+			(nb_desc > E1000_MAX_RING_DESC) ||
+			(nb_desc < E1000_MIN_RING_DESC)) {
 		return -EINVAL;
 	}
 
@@ -1321,7 +1309,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 	 * handle the maximum ring size is allocated in order to allow for
 	 * resizing in later calls to the queue setup function.
 	 */
-	size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
+	size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
 	tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
 					size, socket_id);
 	if (tz == NULL) {
@@ -1430,10 +1418,11 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	/*
 	 * Validate number of receive descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
-	 * of IGB_ALIGN.
+	 * of E1000_ALIGN.
 	 */
-	if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
-	    (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+	if (nb_desc % IGB_RXD_ALIGN != 0 ||
+			(nb_desc > E1000_MAX_RING_DESC) ||
+			(nb_desc < E1000_MIN_RING_DESC)) {
 		return (-EINVAL);
 	}
 
@@ -1469,7 +1458,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 	 *  handle the maximum ring size is allocated in order to allow for
 	 *  resizing in later calls to the queue setup function.
 	 */
-	size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
+	size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
 	rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
 	if (rz == NULL) {
 		igb_rx_queue_release(rxq);
@@ -2482,3 +2471,34 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
 	}
 
 }
+
+void
+igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_rxq_info *qinfo)
+{
+	struct igb_rx_queue *rxq;
+
+	rxq = dev->data->rx_queues[queue_id];
+
+	qinfo->mp = rxq->mb_pool;
+	qinfo->scattered_rx = dev->data->scattered_rx;
+	qinfo->nb_desc = rxq->nb_rx_desc;
+
+	qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+	qinfo->conf.rx_drop_en = rxq->drop_en;
+}
+
+void
+igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+	struct rte_eth_txq_info *qinfo)
+{
+	struct igb_tx_queue *txq;
+
+	txq = dev->data->tx_queues[queue_id];
+
+	qinfo->nb_desc = txq->nb_tx_desc;
+
+	qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+	qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+	qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+}
-- 
1.8.5.3

  parent reply	other threads:[~2015-10-22 12:06 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-10-22 12:06 [dpdk-dev] [PATCHv6 0/9] ethdev: add new API to retrieve RX/TX queue information Konstantin Ananyev
2015-10-22 12:06 ` [dpdk-dev] [PATCHv6 1/9] " Konstantin Ananyev
2015-10-27 12:51   ` [dpdk-dev] [PATCHv7 0/9] " Konstantin Ananyev
2015-10-28  9:55     ` Remy Horton
2015-11-01 23:17       ` Thomas Monjalon
2015-10-27 12:51   ` [dpdk-dev] [PATCHv7 1/9] " Konstantin Ananyev
2015-10-27 12:51   ` [dpdk-dev] [PATCHv7 2/9] i40e: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim Konstantin Ananyev
2015-10-27 12:51   ` [dpdk-dev] [PATCHv7 3/9] ixgbe: " Konstantin Ananyev
2015-10-27 12:51   ` [dpdk-dev] [PATCHv7 4/9] e1000: " Konstantin Ananyev
2015-10-27 12:51   ` [dpdk-dev] [PATCHv7 5/9] fm10k: add HW specific desc_lim data into dev_info Konstantin Ananyev
2015-10-27 12:51   ` [dpdk-dev] [PATCHv7 6/9] cxgbe: " Konstantin Ananyev
2015-10-27 12:51   ` [dpdk-dev] [PATCHv7 7/9] vmxnet3: " Konstantin Ananyev
2015-10-31  8:54     ` Yong Wang
2015-11-02 10:33       ` Ananyev, Konstantin
2015-10-27 12:51   ` [dpdk-dev] [PATCHv7 8/9] testpmd: add new command to display RX/TX queue information Konstantin Ananyev
2015-11-01 23:16     ` Thomas Monjalon
2015-11-02 13:33       ` Ananyev, Konstantin
2015-10-27 12:51   ` [dpdk-dev] [PATCHv7 9/9] doc: release notes update for queue_info_get() and (rx|tx)_desc_limit Konstantin Ananyev
2015-10-22 12:06 ` [dpdk-dev] [PATCHv6 2/9] i40e: add support for eth_(rxq|txq)_info_get and (rx|tx)_desc_lim Konstantin Ananyev
2015-10-22 12:06 ` [dpdk-dev] [PATCHv6 3/9] ixgbe: " Konstantin Ananyev
2015-10-22 12:06 ` Konstantin Ananyev [this message]
2015-10-22 12:06 ` [dpdk-dev] [PATCHv6 5/9] fm10k: add HW specific desc_lim data into dev_info Konstantin Ananyev
2015-10-22 12:06 ` [dpdk-dev] [PATCHv6 6/9] cxgbe: " Konstantin Ananyev
2015-10-22 12:06 ` [dpdk-dev] [PATCHv6 7/9] vmxnet3: " Konstantin Ananyev
2015-10-22 12:06 ` [dpdk-dev] [PATCHv6 8/9] testpmd: add new command to display RX/TX queue information Konstantin Ananyev
2015-10-22 12:06 ` [dpdk-dev] [PATCHv6 9/9] doc: release notes update for queue_info_get() Konstantin Ananyev

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1445515592-25920-5-git-send-email-konstantin.ananyev@intel.com \
    --to=konstantin.ananyev@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).