From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8454245D31; Wed, 27 Nov 2024 01:54:03 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4937342D26; Wed, 27 Nov 2024 01:53:17 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by mails.dpdk.org (Postfix) with ESMTP id ED52C427C4 for ; Wed, 27 Nov 2024 01:53:03 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1213) id 6DB3E20545BC; Tue, 26 Nov 2024 16:53:02 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com 6DB3E20545BC DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linux.microsoft.com; s=default; t=1732668782; bh=Nv0AHZftUuKGv90VAre+BW/rCB5mIrRuVYsn6b2gFZg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=T19srqIH7NGLReQWJD0UVWl0rzTGI1FD/OaI53KPUw+Jap+JkhA2ZAwJmzbprTkrG Gk1zZKBRcILKIVZrwq2Rbm96dU9H5Gtw412prn/JRXCTJKllY1KPX3PdJk4b3pg67f DYcLQj4Zy+POgirvd9ZO1dwkrUZWIgunE0yrT2B8= From: Andre Muezerie To: roretzla@linux.microsoft.com Cc: aman.deep.singh@intel.com, anatoly.burakov@intel.com, bruce.richardson@intel.com, byron.marohn@intel.com, conor.walsh@intel.com, cristian.dumitrescu@intel.com, david.hunt@intel.com, dev@dpdk.org, dsosnowski@nvidia.com, gakhil@marvell.com, jerinj@marvell.com, jingjing.wu@intel.com, kirill.rybalchenko@intel.com, konstantin.v.ananyev@yandex.ru, matan@nvidia.com, orika@nvidia.com, radu.nicolau@intel.com, ruifeng.wang@arm.com, sameh.gobriel@intel.com, sivaprasad.tummala@amd.com, skori@marvell.com, stephen@networkplumber.org, suanmingm@nvidia.com, vattunuru@marvell.com, viacheslavo@nvidia.com, vladimir.medvedkin@intel.com, yipeng1.wang@intel.com, Andre Muezerie Subject: [PATCH v6 07/30] drivers/bus: replace packed attributes Date: Tue, 26 Nov 2024 16:52:18 -0800 Message-Id: <1732668761-5556-8-git-send-email-andremue@linux.microsoft.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1732668761-5556-1-git-send-email-andremue@linux.microsoft.com> References: <1710968771-16435-1-git-send-email-roretzla@linux.microsoft.com> <1732668761-5556-1-git-send-email-andremue@linux.microsoft.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org MSVC struct packing is not compatible with GCC. Replace macro __rte_packed with __rte_packed_begin to push existing pack value and set packing to 1-byte and macro __rte_packed_end to restore the pack value prior to the push. Macro __rte_packed_end is deliberately utilized to trigger a MSVC compiler warning if no existing packing has been pushed allowing easy identification of locations where the __rte_packed_begin is missing. Signed-off-by: Andre Muezerie --- drivers/bus/dpaa/include/fsl_bman.h | 15 ++- drivers/bus/dpaa/include/fsl_fman.h | 4 +- drivers/bus/dpaa/include/fsl_qman.h | 155 ++++++++++++++------------- drivers/bus/ifpga/bus_ifpga_driver.h | 8 +- drivers/bus/vmbus/rte_vmbus_reg.h | 108 +++++++++---------- 5 files changed, 150 insertions(+), 140 deletions(-) diff --git a/drivers/bus/dpaa/include/fsl_bman.h b/drivers/bus/dpaa/include/fsl_bman.h index 34d7eb32ce..9d3fc9f395 100644 --- a/drivers/bus/dpaa/include/fsl_bman.h +++ b/drivers/bus/dpaa/include/fsl_bman.h @@ -86,6 +86,7 @@ static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf) } while (0) /* See 1.5.3.5.4: "Release Command" */ +__rte_packed_begin struct bm_rcr_entry { union { struct { @@ -95,7 +96,7 @@ struct bm_rcr_entry { }; struct bm_buffer bufs[8]; }; -} __packed; +} __rte_packed_end; #define BM_RCR_VERB_VBIT 0x80 #define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */ #define BM_RCR_VERB_CMD_BPID_SINGLE 0x20 @@ -104,20 +105,23 @@ struct bm_rcr_entry { /* See 1.5.3.1: "Acquire Command" */ /* See 1.5.3.2: "Query Command" */ +__rte_packed_begin struct bm_mcc_acquire { u8 bpid; u8 __reserved1[62]; -} __packed; +} __rte_packed_end; +__rte_packed_begin struct bm_mcc_query { u8 __reserved2[63]; -} __packed; +} __rte_packed_end; +__rte_packed_begin struct bm_mc_command { u8 __dont_write_directly__verb; union { struct bm_mcc_acquire acquire; struct bm_mcc_query query; }; -} __packed; +} __rte_packed_end; #define BM_MCC_VERB_VBIT 0x80 #define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */ #define BM_MCC_VERB_CMD_ACQUIRE 0x10 @@ -136,6 +140,7 @@ struct bm_pool_state { } as, ds; }; +__rte_packed_begin struct bm_mc_result { union { struct { @@ -152,7 +157,7 @@ struct bm_mc_result { } acquire; struct bm_pool_state query; }; -} __packed; +} __rte_packed_end; #define BM_MCR_VERB_VBIT 0x80 #define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK #define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h index 5a9750ad0c..513d14cced 100644 --- a/drivers/bus/dpaa/include/fsl_fman.h +++ b/drivers/bus/dpaa/include/fsl_fman.h @@ -12,7 +12,7 @@ /* Status field in FD is updated on Rx side by FMAN with following information. * Refer to field description in FM BG. */ -struct fm_status_t { +__rte_packed_begin struct fm_status_t { unsigned int reserved0:3; unsigned int dcl4c:1; /* Don't Check L4 Checksum */ unsigned int reserved1:1; @@ -38,7 +38,7 @@ struct fm_status_t { unsigned int phe:1; /* Header Error during parsing */ unsigned int frdr:1; /* Frame Dropped by disabled port */ unsigned int reserved5:4; -} __rte_packed; +} __rte_packed_end; /* Set MAC address for a particular interface */ __rte_internal diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h index 25dbf72fd4..7886d466d1 100644 --- a/drivers/bus/dpaa/include/fsl_qman.h +++ b/drivers/bus/dpaa/include/fsl_qman.h @@ -221,6 +221,7 @@ static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd) } while (0) /* Scatter/Gather table entry */ +__rte_packed_begin struct qm_sg_entry { union { struct { @@ -273,7 +274,7 @@ struct qm_sg_entry { }; u16 val_off; }; -} __packed; +} __rte_packed_end; static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg) { return sg->addr; @@ -292,6 +293,7 @@ static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg) } while (0) /* See 1.5.8.1: "Enqueue Command" */ +__rte_packed_begin struct __rte_aligned(8) qm_eqcr_entry { u8 __dont_write_directly__verb; u8 dca; @@ -301,7 +303,7 @@ struct __rte_aligned(8) qm_eqcr_entry { u32 tag; struct qm_fd fd; /* this has alignment 8 */ u8 __reserved3[32]; -} __packed; +} __rte_packed_end; /* "Frame Dequeue Response" */ @@ -330,8 +332,9 @@ struct __rte_aligned(8) qm_dqrr_entry { /* "ERN Message Response" */ /* "FQ State Change Notification" */ -struct __rte_aligned(8) qm_mr_entry { +__rte_packed_begin struct __rte_aligned(8) qm_mr_entry { union { + __rte_packed_begin alignas(8) struct { u8 verb; u8 dca; @@ -341,7 +344,8 @@ struct __rte_aligned(8) qm_mr_entry { u32 fqid; /* 24-bit */ u32 tag; struct qm_fd fd; /* this has alignment 8 */ - } __packed ern; + } __rte_packed_end ern; + __rte_packed_begin alignas(8) struct { u8 verb; #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ @@ -359,7 +363,8 @@ struct __rte_aligned(8) qm_mr_entry { u32 fqid; /* 24-bit */ u32 tag; struct qm_fd fd; /* this has alignment 8 */ - } __packed dcern; + } __rte_packed_end dcern; + __rte_packed_begin alignas(8) struct { u8 verb; u8 fqs; /* Frame Queue Status */ @@ -367,10 +372,10 @@ struct __rte_aligned(8) qm_mr_entry { u32 fqid; /* 24-bit */ u32 contextB; u8 __reserved2[16]; - } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ + } __rte_packed_end fq; /* FQRN/FQRNI/FQRL/FQPN */ }; u8 __reserved2[32]; -} __packed; +} __rte_packed_end; #define QM_MR_VERB_VBIT 0x80 /* * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb @@ -405,7 +410,7 @@ struct __rte_aligned(8) qm_mr_entry { * latter has two inlines to assist with converting to/from the mant+exp * representation. */ -struct qm_fqd_stashing { +__rte_packed_begin struct qm_fqd_stashing { /* See QM_STASHING_EXCL_<...> */ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u8 exclusive; @@ -421,8 +426,8 @@ struct qm_fqd_stashing { u8 __reserved1:2; u8 exclusive; #endif -} __packed; -struct qm_fqd_taildrop { +} __rte_packed_end; +__rte_packed_begin struct qm_fqd_taildrop { #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u16 __reserved1:3; u16 mant:8; @@ -432,8 +437,8 @@ struct qm_fqd_taildrop { u16 mant:8; u16 __reserved1:3; #endif -} __packed; -struct qm_fqd_oac { +} __rte_packed_end; +__rte_packed_begin struct qm_fqd_oac { /* "Overhead Accounting Control", see QM_OAC_<...> */ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u8 oac:2; /* "Overhead Accounting Control" */ @@ -444,11 +449,11 @@ struct qm_fqd_oac { #endif /* Two's-complement value (-128 to +127) */ signed char oal; /* "Overhead Accounting Length" */ -} __packed; -struct qm_fqd { +} __rte_packed_end; +__rte_packed_begin struct qm_fqd { union { u8 orpc; - struct { + __rte_packed_begin struct { #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u8 __reserved1:2; u8 orprws:3; @@ -460,13 +465,13 @@ struct qm_fqd { u8 orprws:3; u8 __reserved1:2; #endif - } __packed; + } __rte_packed_end; }; u8 cgid; u16 fq_ctrl; /* See QM_FQCTRL_<...> */ union { u16 dest_wq; - struct { + __rte_packed_begin struct { #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u16 channel:13; /* qm_channel */ u16 wq:3; @@ -474,7 +479,7 @@ struct qm_fqd { u16 wq:3; u16 channel:13; /* qm_channel */ #endif - } __packed dest; + } __rte_packed_end dest; }; #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u16 __reserved2:1; @@ -509,7 +514,7 @@ struct qm_fqd { }; /* Treat it as s/w portal stashing config */ /* see "FQD Context_A field used for [...]" */ - struct { + __rte_packed_begin struct { #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ struct qm_fqd_stashing stashing; /* @@ -523,10 +528,10 @@ struct qm_fqd { u16 context_hi; struct qm_fqd_stashing stashing; #endif - } __packed; + } __rte_packed_end; } context_a; struct qm_fqd_oac oac_query; -} __packed; +} __rte_packed_end; /* 64-bit converters for context_hi/lo */ static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd) { @@ -618,10 +623,10 @@ static inline u32 qm_fqd_taildrop_get(const struct qm_fqd_taildrop *td) * Slope = SA / (2 ^ Sn) * MaxP = 4 * (Pn + 1) */ -struct qm_cgr_wr_parm { +__rte_packed_begin struct qm_cgr_wr_parm { union { u32 word; - struct { + __rte_packed_begin struct { #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u32 MA:8; u32 Mn:5; @@ -635,9 +640,9 @@ struct qm_cgr_wr_parm { u32 Mn:5; u32 MA:8; #endif - } __packed; + } __rte_packed_end; }; -} __packed; +} __rte_packed_end; /* * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding * management commands, this is padded to a 16-bit structure field, so that's @@ -645,10 +650,10 @@ struct qm_cgr_wr_parm { * these fields as follows; * CS threshold = TA * (2 ^ Tn) */ -struct qm_cgr_cs_thres { +__rte_packed_begin struct qm_cgr_cs_thres { union { u16 hword; - struct { + __rte_packed_begin struct { #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u16 __reserved:3; u16 TA:8; @@ -658,15 +663,15 @@ struct qm_cgr_cs_thres { u16 TA:8; u16 __reserved:3; #endif - } __packed; + } __rte_packed_end; }; -} __packed; +} __rte_packed_end; /* * This identical structure of CGR fields is present in the "Init/Modify CGR" * commands and the "Query CGR" result. It's suctioned out here into its own * struct. */ -struct __qm_mc_cgr { +__rte_packed_begin struct __qm_mc_cgr { struct qm_cgr_wr_parm wr_parm_g; struct qm_cgr_wr_parm wr_parm_y; struct qm_cgr_wr_parm wr_parm_r; @@ -694,7 +699,7 @@ struct __qm_mc_cgr { u16 __cs_thres; }; u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */ -} __packed; +} __rte_packed_end; #define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */ #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/ #define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */ @@ -733,25 +738,25 @@ static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val, /* See 1.5.8.6.2: "CGR Test Write" */ /* See 1.5.8.6.3: "Query CGR" */ /* See 1.5.8.6.4: "Query Congestion Group State" */ -struct qm_mcc_initfq { +__rte_packed_begin struct qm_mcc_initfq { u8 __reserved1; u16 we_mask; /* Write Enable Mask */ u32 fqid; /* 24-bit */ u16 count; /* Initialises 'count+1' FQDs */ struct qm_fqd fqd; /* the FQD fields go here */ u8 __reserved3[30]; -} __packed; -struct qm_mcc_queryfq { +} __rte_packed_end; +__rte_packed_begin struct qm_mcc_queryfq { u8 __reserved1[3]; u32 fqid; /* 24-bit */ u8 __reserved2[56]; -} __packed; -struct qm_mcc_queryfq_np { +} __rte_packed_end; +__rte_packed_begin struct qm_mcc_queryfq_np { u8 __reserved1[3]; u32 fqid; /* 24-bit */ u8 __reserved2[56]; -} __packed; -struct qm_mcc_alterfq { +} __rte_packed_end; +__rte_packed_begin struct qm_mcc_alterfq { u8 __reserved1[3]; u32 fqid; /* 24-bit */ u8 __reserved2; @@ -759,37 +764,37 @@ struct qm_mcc_alterfq { u8 __reserved3[10]; u32 context_b; /* frame queue context b */ u8 __reserved4[40]; -} __packed; -struct qm_mcc_initcgr { +} __rte_packed_end; +__rte_packed_begin struct qm_mcc_initcgr { u8 __reserved1; u16 we_mask; /* Write Enable Mask */ struct __qm_mc_cgr cgr; /* CGR fields */ u8 __reserved2[2]; u8 cgid; u8 __reserved4[32]; -} __packed; -struct qm_mcc_cgrtestwrite { +} __rte_packed_end; +__rte_packed_begin struct qm_mcc_cgrtestwrite { u8 __reserved1[2]; u8 i_bcnt_hi:8;/* high 8-bits of 40-bit "Instant" */ u32 i_bcnt_lo; /* low 32-bits of 40-bit */ u8 __reserved2[23]; u8 cgid; u8 __reserved3[32]; -} __packed; -struct qm_mcc_querycgr { +} __rte_packed_end; +__rte_packed_begin struct qm_mcc_querycgr { u8 __reserved1[30]; u8 cgid; u8 __reserved2[32]; -} __packed; -struct qm_mcc_querycongestion { +} __rte_packed_end; +__rte_packed_begin struct qm_mcc_querycongestion { u8 __reserved[63]; -} __packed; -struct qm_mcc_querywq { +} __rte_packed_end; +__rte_packed_begin struct qm_mcc_querywq { u8 __reserved; /* select channel if verb != QUERYWQ_DEDICATED */ union { u16 channel_wq; /* ignores wq (3 lsbits) */ - struct { + __rte_packed_begin struct { #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u16 id:13; /* qm_channel */ u16 __reserved1:3; @@ -797,12 +802,12 @@ struct qm_mcc_querywq { u16 __reserved1:3; u16 id:13; /* qm_channel */ #endif - } __packed channel; + } __rte_packed_end channel; }; u8 __reserved2[60]; -} __packed; +} __rte_packed_end; -struct qm_mc_command { +__rte_packed_begin struct qm_mc_command { u8 __dont_write_directly__verb; union { struct qm_mcc_initfq initfq; @@ -815,7 +820,7 @@ struct qm_mc_command { struct qm_mcc_querycongestion querycongestion; struct qm_mcc_querywq querywq; }; -} __packed; +} __rte_packed_end; /* INITFQ-specific flags */ #define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */ @@ -842,15 +847,15 @@ struct qm_mc_command { #define QM_CGR_WE_CS_THRES 0x0002 #define QM_CGR_WE_MODE 0x0001 -struct qm_mcr_initfq { +__rte_packed_begin struct qm_mcr_initfq { u8 __reserved1[62]; -} __packed; -struct qm_mcr_queryfq { +} __rte_packed_end; +__rte_packed_begin struct qm_mcr_queryfq { u8 __reserved1[8]; struct qm_fqd fqd; /* the FQD fields are here */ u8 __reserved2[30]; -} __packed; -struct qm_mcr_queryfq_np { +} __rte_packed_end; +__rte_packed_begin struct qm_mcr_queryfq_np { u8 __reserved1; u8 state; /* QM_MCR_NP_STATE_*** */ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ @@ -929,16 +934,16 @@ struct qm_mcr_queryfq_np { u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */ #endif -} __packed; +} __rte_packed_end; -struct qm_mcr_alterfq { +__rte_packed_begin struct qm_mcr_alterfq { u8 fqs; /* Frame Queue Status */ u8 __reserved1[61]; -} __packed; -struct qm_mcr_initcgr { +} __rte_packed_end; +__rte_packed_begin struct qm_mcr_initcgr { u8 __reserved1[62]; -} __packed; -struct qm_mcr_cgrtestwrite { +} __rte_packed_end; +__rte_packed_begin struct qm_mcr_cgrtestwrite { u16 __reserved1; struct __qm_mc_cgr cgr; /* CGR fields */ u8 __reserved2[3]; @@ -953,8 +958,8 @@ struct qm_mcr_cgrtestwrite { u16 wr_prob_y; u16 wr_prob_r; u8 __reserved5[8]; -} __packed; -struct qm_mcr_querycgr { +} __rte_packed_end; +__rte_packed_begin struct qm_mcr_querycgr { u16 __reserved1; struct __qm_mc_cgr cgr; /* CGR fields */ u8 __reserved2[3]; @@ -990,21 +995,21 @@ struct qm_mcr_querycgr { u32 cscn_targ_swp[4]; u8 __reserved5[16]; }; -} __packed; +} __rte_packed_end; struct __qm_mcr_querycongestion { u32 state[8]; }; -struct qm_mcr_querycongestion { +__rte_packed_begin struct qm_mcr_querycongestion { u8 __reserved[30]; /* Access this struct using QM_MCR_QUERYCONGESTION() */ struct __qm_mcr_querycongestion state; -} __packed; -struct qm_mcr_querywq { +} __rte_packed_end; +__rte_packed_begin struct qm_mcr_querywq { union { u16 channel_wq; /* ignores wq (3 lsbits) */ - struct { + __rte_packed_begin struct { #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ u16 id:13; /* qm_channel */ u16 __reserved:3; @@ -1012,13 +1017,13 @@ struct qm_mcr_querywq { u16 __reserved:3; u16 id:13; /* qm_channel */ #endif - } __packed channel; + } __rte_packed_end channel; }; u8 __reserved[28]; u32 wq_len[8]; -} __packed; +} __rte_packed_end; -struct qm_mc_result { +__rte_packed_begin struct qm_mc_result { u8 verb; u8 result; union { @@ -1032,7 +1037,7 @@ struct qm_mc_result { struct qm_mcr_querycongestion querycongestion; struct qm_mcr_querywq querywq; }; -} __packed; +} __rte_packed_end; #define QM_MCR_VERB_RRID 0x80 #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK diff --git a/drivers/bus/ifpga/bus_ifpga_driver.h b/drivers/bus/ifpga/bus_ifpga_driver.h index af151ffd4b..c292144130 100644 --- a/drivers/bus/ifpga/bus_ifpga_driver.h +++ b/drivers/bus/ifpga/bus_ifpga_driver.h @@ -29,10 +29,10 @@ struct rte_afu_driver; #define IFPGA_BUS_BITSTREAM_PATH_MAX_LEN 256 -struct rte_afu_uuid { +__rte_packed_begin struct rte_afu_uuid { uint64_t uuid_low; uint64_t uuid_high; -} __rte_packed; +} __rte_packed_end; #define IFPGA_BUS_DEV_PORT_MAX 4 @@ -40,10 +40,10 @@ struct rte_afu_uuid { * A structure describing an ID for a AFU driver. Each driver provides a * table of these IDs for each device that it supports. */ -struct rte_afu_id { +__rte_packed_begin struct rte_afu_id { struct rte_afu_uuid uuid; int port; /**< port number */ -} __rte_packed; +} __rte_packed_end; /** * A structure PR (Partial Reconfiguration) configuration AFU driver. diff --git a/drivers/bus/vmbus/rte_vmbus_reg.h b/drivers/bus/vmbus/rte_vmbus_reg.h index e3299aa871..239ddbe5f0 100644 --- a/drivers/bus/vmbus/rte_vmbus_reg.h +++ b/drivers/bus/vmbus/rte_vmbus_reg.h @@ -12,14 +12,14 @@ #define VMBUS_MSG_DSIZE_MAX 240 #define VMBUS_MSG_SIZE 256 -struct vmbus_message { +__rte_packed_begin struct vmbus_message { uint32_t type; /* HYPERV_MSGTYPE_ */ uint8_t dsize; /* data size */ uint8_t flags; /* VMBUS_MSGFLAG_ */ uint16_t rsvd; uint64_t id; uint8_t data[VMBUS_MSG_DSIZE_MAX]; -} __rte_packed; +} __rte_packed_end; #define VMBUS_MSGFLAG_PENDING 0x01 @@ -27,10 +27,10 @@ struct vmbus_message { * Hyper-V Monitor Notification Facility */ -struct vmbus_mon_trig { +__rte_packed_begin struct vmbus_mon_trig { RTE_ATOMIC(uint32_t) pending; uint32_t armed; -} __rte_packed; +} __rte_packed_end; #define VMBUS_MONTRIGS_MAX 4 #define VMBUS_MONTRIG_LEN 32 @@ -38,13 +38,13 @@ struct vmbus_mon_trig { /* * Hyper-V Monitor Notification Facility */ -struct hyperv_mon_param { +__rte_packed_begin struct hyperv_mon_param { uint32_t connid; uint16_t evtflag_ofs; uint16_t rsvd; -} __rte_packed; +} __rte_packed_end; -struct vmbus_mon_page { +__rte_packed_begin struct vmbus_mon_page { uint32_t state; uint32_t rsvd1; @@ -57,13 +57,13 @@ struct vmbus_mon_page { struct hyperv_mon_param param[VMBUS_MONTRIGS_MAX][VMBUS_MONTRIG_LEN]; uint8_t rsvd4[1984]; -} __rte_packed; +} __rte_packed_end; /* * Buffer ring */ -struct vmbus_bufring { +__rte_packed_begin struct vmbus_bufring { volatile uint32_t windex; volatile uint32_t rindex; @@ -108,7 +108,7 @@ struct vmbus_bufring { * !!! DO NOT place any fields below this !!! */ uint8_t data[]; -} __rte_packed; +} __rte_packed_end; /* * Channel packets @@ -137,26 +137,26 @@ vmbus_chanpkt_getlen(uint16_t pktlen) /* * GPA stuffs. */ -struct vmbus_gpa_range { +__rte_packed_begin struct vmbus_gpa_range { uint32_t len; uint32_t ofs; uint64_t page[]; -} __rte_packed; +} __rte_packed_end; /* This is actually vmbus_gpa_range.gpa_page[1] */ -struct vmbus_gpa { +__rte_packed_begin struct vmbus_gpa { uint32_t len; uint32_t ofs; uint64_t page; -} __rte_packed; +} __rte_packed_end; -struct vmbus_chanpkt_hdr { +__rte_packed_begin struct vmbus_chanpkt_hdr { uint16_t type; /* VMBUS_CHANPKT_TYPE_ */ uint16_t hlen; /* header len, in 8 bytes */ uint16_t tlen; /* total len, in 8 bytes */ uint16_t flags; /* VMBUS_CHANPKT_FLAG_ */ uint64_t xactid; -} __rte_packed; +} __rte_packed_end; static inline uint32_t vmbus_chanpkt_datalen(const struct vmbus_chanpkt_hdr *pkt) @@ -165,29 +165,29 @@ vmbus_chanpkt_datalen(const struct vmbus_chanpkt_hdr *pkt) - vmbus_chanpkt_getlen(pkt->hlen); } -struct vmbus_chanpkt { +__rte_packed_begin struct vmbus_chanpkt { struct vmbus_chanpkt_hdr hdr; -} __rte_packed; +} __rte_packed_end; -struct vmbus_rxbuf_desc { +__rte_packed_begin struct vmbus_rxbuf_desc { uint32_t len; uint32_t ofs; -} __rte_packed; +} __rte_packed_end; -struct vmbus_chanpkt_rxbuf { +__rte_packed_begin struct vmbus_chanpkt_rxbuf { struct vmbus_chanpkt_hdr hdr; uint16_t rxbuf_id; uint16_t rsvd; uint32_t rxbuf_cnt; struct vmbus_rxbuf_desc rxbuf[]; -} __rte_packed; +} __rte_packed_end; -struct vmbus_chanpkt_sglist { +__rte_packed_begin struct vmbus_chanpkt_sglist { struct vmbus_chanpkt_hdr hdr; uint32_t rsvd; uint32_t gpa_cnt; struct vmbus_gpa gpa[]; -} __rte_packed; +} __rte_packed_end; /* * Channel messages @@ -213,39 +213,39 @@ struct vmbus_chanpkt_sglist { #define VMBUS_CHANMSG_TYPE_DISCONNECT 16 /* REQ */ #define VMBUS_CHANMSG_TYPE_MAX 22 -struct vmbus_chanmsg_hdr { +__rte_packed_begin struct vmbus_chanmsg_hdr { uint32_t type; /* VMBUS_CHANMSG_TYPE_ */ uint32_t rsvd; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_CONNECT */ -struct vmbus_chanmsg_connect { +__rte_packed_begin struct vmbus_chanmsg_connect { struct vmbus_chanmsg_hdr hdr; uint32_t ver; uint32_t rsvd; uint64_t evtflags; uint64_t mnf1; uint64_t mnf2; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_CONNECT_RESP */ -struct vmbus_chanmsg_connect_resp { +__rte_packed_begin struct vmbus_chanmsg_connect_resp { struct vmbus_chanmsg_hdr hdr; uint8_t done; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_CHREQUEST */ -struct vmbus_chanmsg_chrequest { +__rte_packed_begin struct vmbus_chanmsg_chrequest { struct vmbus_chanmsg_hdr hdr; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_DISCONNECT */ -struct vmbus_chanmsg_disconnect { +__rte_packed_begin struct vmbus_chanmsg_disconnect { struct vmbus_chanmsg_hdr hdr; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_CHOPEN */ -struct vmbus_chanmsg_chopen { +__rte_packed_begin struct vmbus_chanmsg_chopen { struct vmbus_chanmsg_hdr hdr; uint32_t chanid; uint32_t openid; @@ -254,73 +254,73 @@ struct vmbus_chanmsg_chopen { uint32_t txbr_pgcnt; #define VMBUS_CHANMSG_CHOPEN_UDATA_SIZE 120 uint8_t udata[VMBUS_CHANMSG_CHOPEN_UDATA_SIZE]; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_CHOPEN_RESP */ -struct vmbus_chanmsg_chopen_resp { +__rte_packed_begin struct vmbus_chanmsg_chopen_resp { struct vmbus_chanmsg_hdr hdr; uint32_t chanid; uint32_t openid; uint32_t status; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_GPADL_CONN */ -struct vmbus_chanmsg_gpadl_conn { +__rte_packed_begin struct vmbus_chanmsg_gpadl_conn { struct vmbus_chanmsg_hdr hdr; uint32_t chanid; uint32_t gpadl; uint16_t range_len; uint16_t range_cnt; struct vmbus_gpa_range range; -} __rte_packed; +} __rte_packed_end; #define VMBUS_CHANMSG_GPADL_CONN_PGMAX 26 /* VMBUS_CHANMSG_TYPE_GPADL_SUBCONN */ -struct vmbus_chanmsg_gpadl_subconn { +__rte_packed_begin struct vmbus_chanmsg_gpadl_subconn { struct vmbus_chanmsg_hdr hdr; uint32_t msgno; uint32_t gpadl; uint64_t gpa_page[]; -} __rte_packed; +} __rte_packed_end; #define VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX 28 /* VMBUS_CHANMSG_TYPE_GPADL_CONNRESP */ -struct vmbus_chanmsg_gpadl_connresp { +__rte_packed_begin struct vmbus_chanmsg_gpadl_connresp { struct vmbus_chanmsg_hdr hdr; uint32_t chanid; uint32_t gpadl; uint32_t status; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_CHCLOSE */ -struct vmbus_chanmsg_chclose { +__rte_packed_begin struct vmbus_chanmsg_chclose { struct vmbus_chanmsg_hdr hdr; uint32_t chanid; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_GPADL_DISCONN */ -struct vmbus_chanmsg_gpadl_disconn { +__rte_packed_begin struct vmbus_chanmsg_gpadl_disconn { struct vmbus_chanmsg_hdr hdr; uint32_t chanid; uint32_t gpadl; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_CHFREE */ -struct vmbus_chanmsg_chfree { +__rte_packed_begin struct vmbus_chanmsg_chfree { struct vmbus_chanmsg_hdr hdr; uint32_t chanid; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_CHRESCIND */ -struct vmbus_chanmsg_chrescind { +__rte_packed_begin struct vmbus_chanmsg_chrescind { struct vmbus_chanmsg_hdr hdr; uint32_t chanid; -} __rte_packed; +} __rte_packed_end; /* VMBUS_CHANMSG_TYPE_CHOFFER */ -struct vmbus_chanmsg_choffer { +__rte_packed_begin struct vmbus_chanmsg_choffer { struct vmbus_chanmsg_hdr hdr; rte_uuid_t chtype; rte_uuid_t chinst; @@ -337,7 +337,7 @@ struct vmbus_chanmsg_choffer { uint8_t flags1; /* VMBUS_CHOFFER_FLAG1_ */ uint16_t flags2; uint32_t connid; -} __rte_packed; +} __rte_packed_end; #define VMBUS_CHOFFER_FLAG1_HASMNF 0x01 -- 2.34.1