From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 7FCEE43D5B;
	Thu, 28 Mar 2024 00:10:09 +0100 (CET)
Received: from mails.dpdk.org (localhost [127.0.0.1])
	by mails.dpdk.org (Postfix) with ESMTP id 2390B42D45;
	Thu, 28 Mar 2024 00:09:34 +0100 (CET)
Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182])
 by mails.dpdk.org (Postfix) with ESMTP id 0E6A7406BC
 for <dev@dpdk.org>; Thu, 28 Mar 2024 00:09:21 +0100 (CET)
Received: by linux.microsoft.com (Postfix, from userid 1086)
 id 8179C20E6948; Wed, 27 Mar 2024 16:09:19 -0700 (PDT)
DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 8179C20E6948
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com;
 s=default; t=1711580959;
 bh=ooRKZKpQqT6Xz3T4PN3KAauYAiKEPc2MXyayPWFB/FY=;
 h=From:To:Cc:Subject:Date:In-Reply-To:References:From;
 b=b/z7B3r0fxdTou4rn89ZNDpXF8imGe/TonCEoRZ7RURxjlAVM2MMEy+mZTtIrK6y4
 h7j5aHnm5YRkMVZNopEfd/T0KVKkU3uLCYFKpm5goEWa70kvC8csFEb6hUc0F2kVhA
 8SfDv4TtqscPUgDSCn+UK7IMO5iL8LLY0L5h8Wxs=
From: Tyler Retzlaff <roretzla@linux.microsoft.com>
To: dev@dpdk.org
Cc: Akhil Goyal <gakhil@marvell.com>, Aman Singh <aman.deep.singh@intel.com>,
 Anatoly Burakov <anatoly.burakov@intel.com>,
 Bruce Richardson <bruce.richardson@intel.com>,
 Byron Marohn <byron.marohn@intel.com>, Conor Walsh <conor.walsh@intel.com>,
 Cristian Dumitrescu <cristian.dumitrescu@intel.com>,
 Dariusz Sosnowski <dsosnowski@nvidia.com>,
 David Hunt <david.hunt@intel.com>, Jerin Jacob <jerinj@marvell.com>,
 Jingjing Wu <jingjing.wu@intel.com>,
 Kirill Rybalchenko <kirill.rybalchenko@intel.com>,
 Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>,
 Matan Azrad <matan@nvidia.com>, Ori Kam <orika@nvidia.com>,
 Radu Nicolau <radu.nicolau@intel.com>, Ruifeng Wang <ruifeng.wang@arm.com>,
 Sameh Gobriel <sameh.gobriel@intel.com>,
 Sivaprasad Tummala <sivaprasad.tummala@amd.com>,
 Suanming Mou <suanmingm@nvidia.com>, Sunil Kumar Kori <skori@marvell.com>,
 Vamsi Attunuru <vattunuru@marvell.com>,
 Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
 Vladimir Medvedkin <vladimir.medvedkin@intel.com>,
 Yipeng Wang <yipeng1.wang@intel.com>,
 Yuying Zhang <Yuying.Zhang@intel.com>,
 Yuying Zhang <yuying.zhang@intel.com>,
 Tyler Retzlaff <roretzla@linux.microsoft.com>
Subject: [PATCH v2 06/15] common/mlx5: pack structures when building with MSVC
Date: Wed, 27 Mar 2024 16:09:09 -0700
Message-Id: <1711580958-20808-7-git-send-email-roretzla@linux.microsoft.com>
X-Mailer: git-send-email 1.8.3.1
In-Reply-To: <1711580958-20808-1-git-send-email-roretzla@linux.microsoft.com>
References: <1710968771-16435-1-git-send-email-roretzla@linux.microsoft.com>
 <1711580958-20808-1-git-send-email-roretzla@linux.microsoft.com>
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.29
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org

Add __rte_msvc_pushpack(1) to all __rte_packed structs to cause packing
when building with MSVC.

Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
---
 drivers/common/mlx5/mlx5_common_mr.h    |  4 ++++
 drivers/common/mlx5/mlx5_common_utils.h |  1 +
 drivers/common/mlx5/mlx5_prm.h          | 28 ++++++++++++++++++++++++++++
 3 files changed, 33 insertions(+)

diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
index 8789d40..4f4bd73 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -49,6 +49,7 @@ struct mlx5_mr {
 };
 
 /* Cache entry for Memory Region. */
+__rte_msvc_pack
 struct mr_cache_entry {
 	uintptr_t start; /* Start address of MR. */
 	uintptr_t end; /* End address of MR. */
@@ -56,6 +57,7 @@ struct mr_cache_entry {
 } __rte_packed;
 
 /* MR Cache table for Binary search. */
+__rte_msvc_pack
 struct mlx5_mr_btree {
 	uint32_t len; /* Number of entries. */
 	uint32_t size; /* Total number of entries. */
@@ -65,6 +67,7 @@ struct mlx5_mr_btree {
 struct mlx5_common_device;
 
 /* Per-queue MR control descriptor. */
+__rte_msvc_pack
 struct mlx5_mr_ctrl {
 	uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
 	uint32_t cur_gen; /* Generation number saved to flush caches. */
@@ -78,6 +81,7 @@ struct mlx5_mr_ctrl {
 LIST_HEAD(mlx5_mempool_reg_list, mlx5_mempool_reg);
 
 /* Global per-device MR cache. */
+__rte_msvc_pack
 struct mlx5_mr_share_cache {
 	uint32_t dev_gen; /* Generation number to flush local caches. */
 	rte_rwlock_t rwlock; /* MR cache Lock. */
diff --git a/drivers/common/mlx5/mlx5_common_utils.h b/drivers/common/mlx5/mlx5_common_utils.h
index ae15119..a44975c 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -27,6 +27,7 @@
  * Structure of the entry in the mlx5 list, user should define its own struct
  * that contains this in order to store the data.
  */
+__rte_msvc_pack
 struct mlx5_list_entry {
 	LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
 	uint32_t ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index c671c75..bf9ecd1 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -319,6 +319,7 @@ enum mlx5_mpw_mode {
 };
 
 /* WQE Control segment. */
+__rte_msvc_pack
 struct mlx5_wqe_cseg {
 	uint32_t opcode;
 	uint32_t sq_ds;
@@ -336,10 +337,12 @@ struct mlx5_wqe_cseg {
 #define WQE_CSEG_WQE_INDEX_OFFSET	 8
 
 /* Header of data segment. Minimal size Data Segment */
+__rte_msvc_pack
 struct mlx5_wqe_dseg {
 	uint32_t bcount;
 	union {
 		uint8_t inline_data[MLX5_DSEG_MIN_INLINE_SIZE];
+		__rte_msvc_pack
 		struct {
 			uint32_t lkey;
 			uint64_t pbuf;
@@ -348,8 +351,10 @@ struct mlx5_wqe_dseg {
 } __rte_packed;
 
 /* Subset of struct WQE Ethernet Segment. */
+__rte_msvc_pack
 struct mlx5_wqe_eseg {
 	union {
+		__rte_msvc_pack
 		struct {
 			uint32_t swp_offs;
 			uint8_t	cs_flags;
@@ -362,6 +367,7 @@ struct mlx5_wqe_eseg {
 				uint16_t vlan_tag;
 			};
 		} __rte_packed;
+		__rte_msvc_pack
 		struct {
 			uint32_t offsets;
 			uint32_t flags;
@@ -371,6 +377,7 @@ struct mlx5_wqe_eseg {
 	};
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_wqe_qseg {
 	uint32_t reserved0;
 	uint32_t reserved1;
@@ -378,6 +385,7 @@ struct mlx5_wqe_qseg {
 	uint32_t qpn_cqn;
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_wqe_wseg {
 	uint32_t operation;
 	uint32_t lkey;
@@ -388,6 +396,7 @@ struct mlx5_wqe_wseg {
 } __rte_packed;
 
 /* The title WQEBB, header of WQE. */
+__rte_msvc_pack
 struct mlx5_wqe {
 	union {
 		struct mlx5_wqe_cseg cseg;
@@ -437,6 +446,7 @@ struct mlx5_cqe {
 	uint8_t lro_num_seg;
 	union {
 		uint8_t user_index_bytes[3];
+		__rte_msvc_pack
 		struct {
 			uint8_t user_index_hi;
 			uint16_t user_index_low;
@@ -460,6 +470,7 @@ struct mlx5_cqe_ts {
 	uint8_t op_own;
 };
 
+__rte_msvc_pack
 struct mlx5_wqe_rseg {
 	uint64_t raddr;
 	uint32_t rkey;
@@ -479,6 +490,7 @@ struct mlx5_wqe_rseg {
 #define MLX5_UMR_KLM_NUM_ALIGN \
 	(MLX5_UMR_KLM_PTR_ALIGN / sizeof(struct mlx5_klm))
 
+__rte_msvc_pack
 struct mlx5_wqe_umr_cseg {
 	uint32_t if_cf_toe_cq_res;
 	uint32_t ko_to_bs;
@@ -486,6 +498,7 @@ struct mlx5_wqe_umr_cseg {
 	uint32_t rsvd1[8];
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_wqe_mkey_cseg {
 	uint32_t fr_res_af_sf;
 	uint32_t qpn_mkey;
@@ -549,6 +562,7 @@ enum {
 #define MLX5_CRYPTO_MMO_TYPE_OFFSET 24
 #define MLX5_CRYPTO_MMO_OP_OFFSET 20
 
+__rte_msvc_pack
 struct mlx5_wqe_umr_bsf_seg {
 	/*
 	 * bs_bpt_eo_es contains:
@@ -582,6 +596,7 @@ struct mlx5_wqe_umr_bsf_seg {
 #pragma GCC diagnostic ignored "-Wpedantic"
 #endif
 
+__rte_msvc_pack
 struct mlx5_umr_wqe {
 	struct mlx5_wqe_cseg ctr;
 	struct mlx5_wqe_umr_cseg ucseg;
@@ -592,18 +607,21 @@ struct mlx5_umr_wqe {
 	};
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_rdma_write_wqe {
 	struct mlx5_wqe_cseg ctr;
 	struct mlx5_wqe_rseg rseg;
 	struct mlx5_wqe_dseg dseg[];
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_wqe_send_en_seg {
 	uint32_t reserve[2];
 	uint32_t sqnpc;
 	uint32_t qpn;
 } __rte_packed;
 
+__rte_msvc_pack
 struct mlx5_wqe_send_en_wqe {
 	struct mlx5_wqe_cseg ctr;
 	struct mlx5_wqe_send_en_seg sseg;
@@ -650,6 +668,7 @@ struct mlx5_wqe_metadata_seg {
 	uint64_t addr;
 };
 
+__rte_msvc_pack
 struct mlx5_gga_wqe {
 	uint32_t opcode;
 	uint32_t sq_ds;
@@ -663,16 +682,19 @@ struct mlx5_gga_wqe {
 } __rte_packed;
 
 union mlx5_gga_compress_opaque {
+	__rte_msvc_pack
 	struct {
 		uint32_t syndrome;
 		uint32_t reserved0;
 		uint32_t scattered_length;
 		union {
+			__rte_msvc_pack
 			struct {
 				uint32_t reserved1[5];
 				uint32_t crc32;
 				uint32_t adler32;
 			} v1 __rte_packed;
+			__rte_msvc_pack
 			struct {
 				uint32_t crc32;
 				uint32_t adler32;
@@ -685,9 +707,11 @@ struct mlx5_gga_wqe {
 };
 
 union mlx5_gga_crypto_opaque {
+	__rte_msvc_pack
 	struct {
 		uint32_t syndrome;
 		uint32_t reserved0[2];
+		__rte_msvc_pack
 		struct {
 			uint32_t iv[3];
 			uint32_t tag_size;
@@ -4134,6 +4158,7 @@ enum mlx5_aso_op {
 #define MLX5_ASO_CSEG_READ_ENABLE 1
 
 /* ASO WQE CTRL segment. */
+__rte_msvc_pack
 struct mlx5_aso_cseg {
 	uint32_t va_h;
 	uint32_t va_l_r;
@@ -4150,6 +4175,7 @@ struct mlx5_aso_cseg {
 #define MLX5_MTR_MAX_TOKEN_VALUE INT32_MAX
 
 /* A meter data segment - 2 per ASO WQE. */
+__rte_msvc_pack
 struct mlx5_aso_mtr_dseg {
 	uint32_t v_bo_sc_bbog_mm;
 	/*
@@ -4191,6 +4217,7 @@ struct mlx5_aso_mtr_dseg {
 #define MLX5_ASO_MTRS_PER_POOL 128
 
 /* ASO WQE data segment. */
+__rte_msvc_pack
 struct mlx5_aso_dseg {
 	union {
 		uint8_t data[MLX5_ASO_WQE_DSEG_SIZE];
@@ -4199,6 +4226,7 @@ struct mlx5_aso_dseg {
 } __rte_packed;
 
 /* ASO WQE. */
+__rte_msvc_pack
 struct mlx5_aso_wqe {
 	struct mlx5_wqe_cseg general_cseg;
 	struct mlx5_aso_cseg aso_cseg;
-- 
1.8.3.1