DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Min Hu (Connor)" <humin29@huawei.com>
To: <dev@dpdk.org>
Cc: <ferruh.yigit@intel.com>
Subject: [dpdk-dev] [PATCH v2 4/6] net/hns3: improve IO path data cache usage
Date: Wed, 28 Apr 2021 17:53:11 +0800	[thread overview]
Message-ID: <1619603593-23928-5-git-send-email-humin29@huawei.com> (raw)
In-Reply-To: <1619603593-23928-1-git-send-email-humin29@huawei.com>

From: Chengwen Feng <fengchengwen@huawei.com>

This patch improves data cache usage by:
1. Rearrange the rxq frequency accessed fields in the IO path to the
first 128B.
2. Rearrange the txq frequency accessed fields in the IO path to the
first 64B.
3. Make sure ptype table align cacheline size which is 128B instead of
min cacheline size which is 64B because the L1/L2 is 64B and L3 is
128B on Kunpeng ARM platform.

The performance gains are 1.5% in 64B packet macfwd scenarios.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.h |   4 +-
 drivers/net/hns3/hns3_rxtx.h   | 126 ++++++++++++++++++++++++-----------------
 2 files changed, 77 insertions(+), 53 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index 48f5307..cee78f4 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -735,7 +735,7 @@ struct hns3_ptype_table {
 	 * descriptor, it functions only when firmware report the capability of
 	 * HNS3_CAPS_RXD_ADV_LAYOUT_B and driver enabled it.
 	 */
-	uint32_t ptype[HNS3_PTYPE_NUM] __rte_cache_min_aligned;
+	uint32_t ptype[HNS3_PTYPE_NUM] __rte_cache_aligned;
 };
 
 #define HNS3_FIXED_MAX_TQP_NUM_MODE		0
@@ -839,7 +839,7 @@ struct hns3_adapter {
 
 	uint64_t dev_caps_mask;
 
-	struct hns3_ptype_table ptype_tbl __rte_cache_min_aligned;
+	struct hns3_ptype_table ptype_tbl __rte_cache_aligned;
 };
 
 enum {
diff --git a/drivers/net/hns3/hns3_rxtx.h b/drivers/net/hns3/hns3_rxtx.h
index 703c4b7..1e2e994 100644
--- a/drivers/net/hns3/hns3_rxtx.h
+++ b/drivers/net/hns3/hns3_rxtx.h
@@ -289,22 +289,14 @@ struct hns3_rx_bd_errors_stats {
 };
 
 struct hns3_rx_queue {
-	void *io_base;
 	volatile void *io_head_reg;
-	struct hns3_adapter *hns;
 	struct hns3_ptype_table *ptype_tbl;
 	struct rte_mempool *mb_pool;
 	struct hns3_desc *rx_ring;
-	uint64_t rx_ring_phys_addr; /* RX ring DMA address */
-	const struct rte_memzone *mz;
 	struct hns3_entry *sw_ring;
-	struct rte_mbuf *pkt_first_seg;
-	struct rte_mbuf *pkt_last_seg;
 
-	uint16_t queue_id;
 	uint16_t port_id;
 	uint16_t nb_rx_desc;
-	uint16_t rx_buf_len;
 	/*
 	 * threshold for the number of BDs waited to passed to hardware. If the
 	 * number exceeds the threshold, driver will pass these BDs to hardware.
@@ -318,8 +310,6 @@ struct hns3_rx_queue {
 	/* 4 if DEV_RX_OFFLOAD_KEEP_CRC offload set, 0 otherwise */
 	uint8_t crc_len;
 
-	bool rx_deferred_start; /* don't start this queue in dev start */
-	bool configured;        /* indicate if rx queue has been configured */
 	/*
 	 * Indicate whether ignore the outer VLAN field in the Rx BD reported
 	 * by the Hardware. Because the outer VLAN is the PVID if the PVID is
@@ -331,23 +321,45 @@ struct hns3_rx_queue {
 	 * driver does not need to perform PVID-related operation in Rx. At this
 	 * point, the pvid_sw_discard_en will be false.
 	 */
-	bool pvid_sw_discard_en;
-	bool ptype_en;          /* indicate if the ptype field enabled */
-	bool enabled;           /* indicate if Rx queue has been enabled */
+	uint8_t pvid_sw_discard_en:1;
+	uint8_t ptype_en:1;          /* indicate if the ptype field enabled */
+
+	uint64_t mbuf_initializer; /* value to init mbufs used with vector rx */
+	/* offset_table: used for vector, to solve execute re-order problem */
+	uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1];
+
+	uint16_t bulk_mbuf_num; /* indicate bulk_mbuf valid nums */
 
 	struct hns3_rx_basic_stats basic_stats;
+
+	struct rte_mbuf *pkt_first_seg;
+	struct rte_mbuf *pkt_last_seg;
+
+	struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM];
+
 	/* DFX statistics that driver does not need to discard packets */
 	struct hns3_rx_dfx_stats dfx_stats;
 	/* Error statistics that driver needs to discard packets */
 	struct hns3_rx_bd_errors_stats err_stats;
 
-	struct rte_mbuf *bulk_mbuf[HNS3_BULK_ALLOC_MBUF_NUM];
-	uint16_t bulk_mbuf_num;
-
-	/* offset_table: used for vector, to solve execute re-order problem */
-	uint8_t offset_table[HNS3_VECTOR_RX_OFFSET_TABLE_LEN + 1];
-	uint64_t mbuf_initializer; /* value to init mbufs used with vector rx */
 	struct rte_mbuf fake_mbuf; /* fake mbuf used with vector rx */
+
+
+	/*
+	 * The following fields are not accessed in the I/O path, so they are
+	 * placed at the end.
+	 */
+	void *io_base;
+	struct hns3_adapter *hns;
+	uint64_t rx_ring_phys_addr; /* RX ring DMA address */
+	const struct rte_memzone *mz;
+
+	uint16_t queue_id;
+	uint16_t rx_buf_len;
+
+	bool configured;        /* indicate if rx queue has been configured */
+	bool rx_deferred_start; /* don't start this queue in dev start */
+	bool enabled;           /* indicate if Rx queue has been enabled */
 };
 
 struct hns3_tx_basic_stats {
@@ -407,16 +419,10 @@ struct hns3_tx_dfx_stats {
 };
 
 struct hns3_tx_queue {
-	void *io_base;
 	volatile void *io_tail_reg;
-	struct hns3_adapter *hns;
 	struct hns3_desc *tx_ring;
-	uint64_t tx_ring_phys_addr; /* TX ring DMA address */
-	const struct rte_memzone *mz;
 	struct hns3_entry *sw_ring;
 
-	uint16_t queue_id;
-	uint16_t port_id;
 	uint16_t nb_tx_desc;
 	/*
 	 * index of next BD whose corresponding rte_mbuf can be released by
@@ -432,21 +438,12 @@ struct hns3_tx_queue {
 	uint16_t tx_free_thresh;
 
 	/*
-	 * For better performance in tx datapath, releasing mbuf in batches is
-	 * required.
-	 * Only checking the VLD bit of the last descriptor in a batch of the
-	 * thresh descriptors does not mean that these descriptors are all sent
-	 * by hardware successfully. So we need to check that the VLD bits of
-	 * all descriptors are cleared. and then free all mbufs in the batch.
-	 * - tx_rs_thresh
-	 *   Number of mbufs released at a time.
-	 *
-	 * - free
-	 *   Tx mbuf free array used for preserving temporarily address of mbuf
-	 *   released back to mempool, when releasing mbuf in batches.
+	 * The minimum length of the packet supported by hardware in the Tx
+	 * direction.
 	 */
-	uint16_t tx_rs_thresh;
-	struct rte_mbuf **free;
+	uint8_t min_tx_pkt_len;
+
+	uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
 
 	/*
 	 * tso mode.
@@ -464,7 +461,7 @@ struct hns3_tx_queue {
 	 *     checksum of packets that need TSO, so network driver software
 	 *     not need to recalculate it.
 	 */
-	uint8_t tso_mode;
+	uint16_t tso_mode:1;
 	/*
 	 * udp checksum mode.
 	 * value range:
@@ -480,16 +477,10 @@ struct hns3_tx_queue {
 	 *     In this mode, HW does not have the preceding problems and can
 	 *     directly calculate the checksum of these UDP packets.
 	 */
-	uint8_t udp_cksum_mode;
-	/*
-	 * The minimum length of the packet supported by hardware in the Tx
-	 * direction.
-	 */
-	uint32_t min_tx_pkt_len;
+	uint16_t udp_cksum_mode:1;
 
-	uint8_t max_non_tso_bd_num; /* max BD number of one non-TSO packet */
-	bool tx_deferred_start; /* don't start this queue in dev start */
-	bool configured;        /* indicate if tx queue has been configured */
+	uint16_t simple_bd_enable:1;
+	uint16_t tx_push_enable:1;    /* check whether the tx push is enabled */
 	/*
 	 * Indicate whether add the vlan_tci of the mbuf to the inner VLAN field
 	 * of Tx BD. Because the outer VLAN will always be the PVID when the
@@ -502,11 +493,44 @@ struct hns3_tx_queue {
 	 * PVID-related operations in Tx. And pvid_sw_shift_en will be false at
 	 * this point.
 	 */
-	bool pvid_sw_shift_en;
-	bool enabled;           /* indicate if Tx queue has been enabled */
+	uint16_t pvid_sw_shift_en:1;
+
+	/*
+	 * For better performance in tx datapath, releasing mbuf in batches is
+	 * required.
+	 * Only checking the VLD bit of the last descriptor in a batch of the
+	 * thresh descriptors does not mean that these descriptors are all sent
+	 * by hardware successfully. So we need to check that the VLD bits of
+	 * all descriptors are cleared. and then free all mbufs in the batch.
+	 * - tx_rs_thresh
+	 *   Number of mbufs released at a time.
+	 *
+	 * - free
+	 *   Tx mbuf free array used for preserving temporarily address of mbuf
+	 *   released back to mempool, when releasing mbuf in batches.
+	 */
+	uint16_t tx_rs_thresh;
+	struct rte_mbuf **free;
 
 	struct hns3_tx_basic_stats basic_stats;
 	struct hns3_tx_dfx_stats dfx_stats;
+
+
+	/*
+	 * The following fields are not accessed in the I/O path, so they are
+	 * placed at the end.
+	 */
+	void *io_base;
+	struct hns3_adapter *hns;
+	uint64_t tx_ring_phys_addr; /* TX ring DMA address */
+	const struct rte_memzone *mz;
+
+	uint16_t port_id;
+	uint16_t queue_id;
+
+	bool configured;        /* indicate if tx queue has been configured */
+	bool tx_deferred_start; /* don't start this queue in dev start */
+	bool enabled;           /* indicate if Tx queue has been enabled */
 };
 
 #define HNS3_GET_TX_QUEUE_PEND_BD_NUM(txq) \
-- 
2.7.4


  parent reply	other threads:[~2021-04-28  9:53 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-26  3:34 [dpdk-dev] [PATCH 0/6] optimization and bugfix for hns3 PMD Min Hu (Connor)
2021-04-26  3:34 ` [dpdk-dev] [PATCH 1/6] net/hns3: delete some unused capabilities Min Hu (Connor)
2021-04-27 13:37   ` Ferruh Yigit
2021-04-27 14:26     ` Fengchengwen
2021-04-27 14:30     ` Ferruh Yigit
2021-04-26  3:34 ` [dpdk-dev] [PATCH 2/6] net/hns3: modify write reg opt API impl Min Hu (Connor)
2021-04-26  3:34 ` [dpdk-dev] [PATCH 3/6] net/hns3: use RTE DIM instead of ARRAY SIZE Min Hu (Connor)
2021-04-26  3:34 ` [dpdk-dev] [PATCH 4/6] net/hns3: improve IO path data cache usage Min Hu (Connor)
2021-04-26  3:34 ` [dpdk-dev] [PATCH 5/6] net/hns3: log fdir configuration Min Hu (Connor)
2021-04-27 13:39   ` Ferruh Yigit
2021-04-27 14:15     ` Fengchengwen
2021-04-27 14:25     ` Ferruh Yigit
2021-04-27 14:29       ` Fengchengwen
2021-04-26  3:34 ` [dpdk-dev] [PATCH 6/6] net/hns3: fix vector Rx burst default value Min Hu (Connor)
2021-04-27 13:46   ` Ferruh Yigit
2021-04-27 14:34     ` Fengchengwen
2021-04-28  9:53 ` [dpdk-dev] [PATCH v2 0/6] optimization and bugfix for hns3 PMD Min Hu (Connor)
2021-04-28  9:53   ` [dpdk-dev] [PATCH v2 1/6] net/hns3: delete some unused capabilities Min Hu (Connor)
2021-04-28  9:53   ` [dpdk-dev] [PATCH v2 2/6] net/hns3: modify write reg opt API impl Min Hu (Connor)
2021-04-28  9:53   ` [dpdk-dev] [PATCH v2 3/6] net/hns3: use RTE DIM instead of ARRAY SIZE Min Hu (Connor)
2021-04-28  9:53   ` Min Hu (Connor) [this message]
2021-04-28  9:53   ` [dpdk-dev] [PATCH v2 5/6] net/hns3: log FDIR configuration Min Hu (Connor)
2021-04-28  9:53   ` [dpdk-dev] [PATCH v2 6/6] net/hns3: fix vector Rx burst can't exceed 32 Min Hu (Connor)
2021-04-30  6:28 ` [dpdk-dev] [PATCH v3 0/6] optimization and bugfix for hns3 PMD Min Hu (Connor)
2021-04-30  6:28   ` [dpdk-dev] [PATCH v3 1/6] net/hns3: delete some unused capabilities Min Hu (Connor)
2021-04-30  6:28   ` [dpdk-dev] [PATCH v3 2/6] net/hns3: modify write reg opt API impl Min Hu (Connor)
2021-04-30  6:28   ` [dpdk-dev] [PATCH v3 3/6] net/hns3: use RTE DIM instead of ARRAY SIZE Min Hu (Connor)
2021-04-30  6:28   ` [dpdk-dev] [PATCH v3 4/6] net/hns3: improve IO path data cache usage Min Hu (Connor)
2021-04-30  6:28   ` [dpdk-dev] [PATCH v3 5/6] net/hns3: log FDIR configuration Min Hu (Connor)
2021-04-30  6:28   ` [dpdk-dev] [PATCH v3 6/6] net/hns3: fix vector Rx burst can't exceed 32 Min Hu (Connor)
2021-05-04 16:03   ` [dpdk-dev] [PATCH v3 0/6] optimization and bugfix for hns3 PMD Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1619603593-23928-5-git-send-email-humin29@huawei.com \
    --to=humin29@huawei.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).