DPDK patches and discussions
 help / color / mirror / Atom feed
From: Xueming Li <xuemingl@nvidia.com>
To: <dev@dpdk.org>
Cc: <xuemingl@nvidia.com>, Jerin Jacob <jerinjacobk@gmail.com>,
	Ferruh Yigit <ferruh.yigit@intel.com>,
	Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
	Thomas Monjalon <thomas@monjalon.net>,
	Lior Margalit <lmargalit@nvidia.com>,
	"Ananyev Konstantin" <konstantin.ananyev@intel.com>,
	Xiaoyun Li <xiaoyun.li@intel.com>
Subject: [dpdk-dev] [PATCH v8 5/6] app/testpmd: force shared Rx queue polled on same core
Date: Mon, 18 Oct 2021 20:08:41 +0800	[thread overview]
Message-ID: <20211018120842.2058637-6-xuemingl@nvidia.com> (raw)
In-Reply-To: <20211018120842.2058637-1-xuemingl@nvidia.com>

Shared Rx queue must be polled on same core. This patch checks and stops
forwarding if shared RxQ being scheduled on multiple
cores.

It's suggested to use same number of Rx queues and polling cores.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
 app/test-pmd/config.c  | 103 +++++++++++++++++++++++++++++++++++++++++
 app/test-pmd/testpmd.c |   4 +-
 app/test-pmd/testpmd.h |   2 +
 3 files changed, 108 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index f8fb8961cae..c4150d77589 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -2890,6 +2890,109 @@ port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
 	}
 }
 
+/*
+ * Check whether a shared rxq scheduled on other lcores.
+ */
+static bool
+fwd_stream_on_other_lcores(uint16_t domain_id, lcoreid_t src_lc,
+			   portid_t src_port, queueid_t src_rxq,
+			   uint32_t share_group, queueid_t share_rxq)
+{
+	streamid_t sm_id;
+	streamid_t nb_fs_per_lcore;
+	lcoreid_t  nb_fc;
+	lcoreid_t  lc_id;
+	struct fwd_stream *fs;
+	struct rte_port *port;
+	struct rte_eth_dev_info *dev_info;
+	struct rte_eth_rxconf *rxq_conf;
+
+	nb_fc = cur_fwd_config.nb_fwd_lcores;
+	/* Check remaining cores. */
+	for (lc_id = src_lc + 1; lc_id < nb_fc; lc_id++) {
+		sm_id = fwd_lcores[lc_id]->stream_idx;
+		nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
+		for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
+		     sm_id++) {
+			fs = fwd_streams[sm_id];
+			port = &ports[fs->rx_port];
+			dev_info = &port->dev_info;
+			rxq_conf = &port->rx_conf[fs->rx_queue];
+			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
+			    == 0)
+				/* Not shared rxq. */
+				continue;
+			if (domain_id != port->dev_info.switch_info.domain_id)
+				continue;
+			if (rxq_conf->share_group != share_group)
+				continue;
+			if (rxq_conf->share_qid != share_rxq)
+				continue;
+			printf("Shared Rx queue group %u queue %hu can't be scheduled on different cores:\n",
+			       share_group, share_rxq);
+			printf("  lcore %hhu Port %hu queue %hu\n",
+			       src_lc, src_port, src_rxq);
+			printf("  lcore %hhu Port %hu queue %hu\n",
+			       lc_id, fs->rx_port, fs->rx_queue);
+			printf("Please use --nb-cores=%hu to limit number of forwarding cores\n",
+			       nb_rxq);
+			return true;
+		}
+	}
+	return false;
+}
+
+/*
+ * Check shared rxq configuration.
+ *
+ * Shared group must not being scheduled on different core.
+ */
+bool
+pkt_fwd_shared_rxq_check(void)
+{
+	streamid_t sm_id;
+	streamid_t nb_fs_per_lcore;
+	lcoreid_t  nb_fc;
+	lcoreid_t  lc_id;
+	struct fwd_stream *fs;
+	uint16_t domain_id;
+	struct rte_port *port;
+	struct rte_eth_dev_info *dev_info;
+	struct rte_eth_rxconf *rxq_conf;
+
+	nb_fc = cur_fwd_config.nb_fwd_lcores;
+	/*
+	 * Check streams on each core, make sure the same switch domain +
+	 * group + queue doesn't get scheduled on other cores.
+	 */
+	for (lc_id = 0; lc_id < nb_fc; lc_id++) {
+		sm_id = fwd_lcores[lc_id]->stream_idx;
+		nb_fs_per_lcore = fwd_lcores[lc_id]->stream_nb;
+		for (; sm_id < fwd_lcores[lc_id]->stream_idx + nb_fs_per_lcore;
+		     sm_id++) {
+			fs = fwd_streams[sm_id];
+			/* Update lcore info stream being scheduled. */
+			fs->lcore = fwd_lcores[lc_id];
+			port = &ports[fs->rx_port];
+			dev_info = &port->dev_info;
+			rxq_conf = &port->rx_conf[fs->rx_queue];
+			if ((dev_info->dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)
+			    == 0)
+				/* Not shared rxq. */
+				continue;
+			/* Check shared rxq not scheduled on remaining cores. */
+			domain_id = port->dev_info.switch_info.domain_id;
+			if (fwd_stream_on_other_lcores(domain_id, lc_id,
+						       fs->rx_port,
+						       fs->rx_queue,
+						       rxq_conf->share_group,
+						       rxq_conf->share_qid))
+				return false;
+		}
+	}
+	return true;
+}
+
 /*
  * Setup forwarding configuration for each logical core.
  */
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 123142ed110..f3f81ef561f 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2236,10 +2236,12 @@ start_packet_forwarding(int with_tx_first)
 
 	fwd_config_setup();
 
+	pkt_fwd_config_display(&cur_fwd_config);
+	if (!pkt_fwd_shared_rxq_check())
+		return;
 	if(!no_flush_rx)
 		flush_fwd_rx_queues();
 
-	pkt_fwd_config_display(&cur_fwd_config);
 	rxtx_config_display();
 
 	fwd_stats_reset();
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 3dfaaad94c0..f121a2da90c 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -144,6 +144,7 @@ struct fwd_stream {
 	uint64_t     core_cycles; /**< used for RX and TX processing */
 	struct pkt_burst_stats rx_burst_stats;
 	struct pkt_burst_stats tx_burst_stats;
+	struct fwd_lcore *lcore; /**< Lcore being scheduled. */
 };
 
 /**
@@ -795,6 +796,7 @@ void port_summary_header_display(void);
 void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
 void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
 void fwd_lcores_config_display(void);
+bool pkt_fwd_shared_rxq_check(void);
 void pkt_fwd_config_display(struct fwd_config *cfg);
 void rxtx_config_display(void);
 void fwd_config_setup(void);
-- 
2.33.0


  parent reply	other threads:[~2021-10-18 12:10 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-18 12:08 [dpdk-dev] [PATCH v8 0/6] ethdev: introduce shared Rx queue Xueming Li
2021-10-18 12:08 ` [dpdk-dev] [PATCH v8 1/6] " Xueming Li
2021-10-18 12:08 ` [dpdk-dev] [PATCH v8 2/6] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-18 12:08 ` [dpdk-dev] [PATCH v8 3/6] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-18 12:08 ` [dpdk-dev] [PATCH v8 4/6] app/testpmd: dump port info for " Xueming Li
2021-10-18 12:08 ` Xueming Li [this message]
2021-10-18 12:08 ` [dpdk-dev] [PATCH v8 6/6] app/testpmd: add forwarding engine " Xueming Li
2021-10-18 13:05 ` [dpdk-dev] [PATCH v8 0/6] ethdev: introduce " Xueming(Steven) Li
  -- strict thread matches above, loose matches on Subject: below --
2021-07-27  3:42 [dpdk-dev] [RFC] " Xueming Li
2021-10-18 12:59 ` [dpdk-dev] [PATCH v8 0/6] " Xueming Li
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 5/6] app/testpmd: force shared Rx queue polled on same core Xueming Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211018120842.2058637-6-xuemingl@nvidia.com \
    --to=xuemingl@nvidia.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=jerinjacobk@gmail.com \
    --cc=konstantin.ananyev@intel.com \
    --cc=lmargalit@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    --cc=xiaoyun.li@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).