DPDK patches and discussions
 help / color / mirror / Atom feed
From: Soumyadeep Hore <soumyadeep.hore@intel.com>
To: bruce.richardson@intel.com, anatoly.burakov@intel.com
Cc: dev@dpdk.org
Subject: [PATCH v2 17/21] drivers: add flex array support and fix issues
Date: Tue,  4 Jun 2024 08:06:07 +0000	[thread overview]
Message-ID: <20240604080611.2197835-18-soumyadeep.hore@intel.com> (raw)
In-Reply-To: <20240604080611.2197835-1-soumyadeep.hore@intel.com>

With the internal Linux upstream feedback that is received on
IDPF driver and also some references available online, it
is discouraged to use 1-sized array fields in the structures,
especially in the new Linux drivers that are going to be
upstreamed. Instead, it is recommended to use flex array fields
for the dynamic sized structures.

Some fixes based on code change is introduced to compile dpdk.

Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
 drivers/common/idpf/base/virtchnl2.h       | 466 ++++-----------------
 drivers/common/idpf/idpf_common_virtchnl.c |   2 +-
 drivers/net/cpfl/cpfl_ethdev.c             |  28 +-
 3 files changed, 86 insertions(+), 410 deletions(-)

diff --git a/drivers/common/idpf/base/virtchnl2.h b/drivers/common/idpf/base/virtchnl2.h
index 468b8f355d..fb017b1306 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -63,6 +63,10 @@ enum virtchnl2_status {
 #define VIRTCHNL2_CHECK_STRUCT_LEN(n, X)	\
 	static_assert((n) == sizeof(struct X),	\
 		      "Structure length does not match with the expected value")
+#define VIRTCHNL2_CHECK_STRUCT_VAR_LEN(n, X, T)		\
+	VIRTCHNL2_CHECK_STRUCT_LEN(n, X)
+
+#define STRUCT_VAR_LEN		1
 
 /**
  * New major set of opcodes introduced and so leaving room for
@@ -696,10 +700,9 @@ VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
 struct virtchnl2_queue_reg_chunks {
 	__le16 num_chunks;
 	u8 pad[6];
-	struct virtchnl2_queue_reg_chunk chunks[1];
+	struct virtchnl2_queue_reg_chunk chunks[STRUCT_VAR_LEN];
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_reg_chunks);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(40, virtchnl2_queue_reg_chunks, chunks);
 
 /**
  * enum virtchnl2_vport_flags - Vport flags
@@ -773,7 +776,7 @@ struct virtchnl2_create_vport {
 	u8 pad[20];
 	struct virtchnl2_queue_reg_chunks chunks;
 };
-VIRTCHNL2_CHECK_STRUCT_LEN(192, virtchnl2_create_vport);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(192, virtchnl2_create_vport, chunks.chunks);
 
 /**
  * struct virtchnl2_vport - Vport identifier information
@@ -859,10 +862,9 @@ struct virtchnl2_config_tx_queues {
 	__le32 vport_id;
 	__le16 num_qinfo;
 	u8 pad[10];
-	struct virtchnl2_txq_info qinfo[1];
+	struct virtchnl2_txq_info qinfo[STRUCT_VAR_LEN];
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(72, virtchnl2_config_tx_queues);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(72, virtchnl2_config_tx_queues, qinfo);
 
 /**
  * struct virtchnl2_rxq_info - Receive queue config info
@@ -940,10 +942,9 @@ struct virtchnl2_config_rx_queues {
 	__le32 vport_id;
 	__le16 num_qinfo;
 	u8 pad[18];
-	struct virtchnl2_rxq_info qinfo[1];
+	struct virtchnl2_rxq_info qinfo[STRUCT_VAR_LEN];
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(112, virtchnl2_config_rx_queues);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(112, virtchnl2_config_rx_queues, qinfo);
 
 /**
  * struct virtchnl2_add_queues - Data for VIRTCHNL2_OP_ADD_QUEUES
@@ -973,16 +974,15 @@ struct virtchnl2_add_queues {
 
 	struct virtchnl2_queue_reg_chunks chunks;
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_add_queues);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(56, virtchnl2_add_queues, chunks.chunks);
 
 /* Queue Groups Extension */
 /**
  * struct virtchnl2_rx_queue_group_info - RX queue group info
- * @rss_lut_size: IN/OUT, user can ask to update rss_lut size originally
- *		  allocated by CreateVport command. New size will be returned
- *		  if allocation succeeded, otherwise original rss_size from
- *		  CreateVport will be returned.
+ * @rss_lut_size: User can ask to update rss_lut size originally allocated by
+ *		  CreateVport command. New size will be returned if allocation
+ *		  succeeded, otherwise original rss_size from CreateVport
+ *		  will be returned.
  * @pad: Padding for future extensions
  */
 struct virtchnl2_rx_queue_group_info {
@@ -1010,7 +1010,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rx_queue_group_info);
  * @cir_pad: Future extension purpose for CIR only
  * @pad2: Padding for future extensions
  */
-struct virtchnl2_tx_queue_group_info { /* IN */
+struct virtchnl2_tx_queue_group_info {
 	u8 tx_tc;
 	u8 priority;
 	u8 is_sp;
@@ -1043,19 +1043,17 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_group_id);
 /**
  * struct virtchnl2_queue_group_info - Queue group info
  * @qg_id: Queue group ID
- * @num_tx_q: Number of TX queues
- * @num_tx_complq: Number of completion queues
- * @num_rx_q: Number of RX queues
- * @num_rx_bufq: Number of RX buffer queues
+ * @num_tx_q: Number of TX queues requested
+ * @num_tx_complq: Number of completion queues requested
+ * @num_rx_q: Number of RX queues requested
+ * @num_rx_bufq: Number of RX buffer queues requested
  * @tx_q_grp_info: TX queue group info
  * @rx_q_grp_info: RX queue group info
  * @pad: Padding for future extensions
- * @chunks: Queue register chunks
+ * @chunks: Queue register chunks from CP
  */
 struct virtchnl2_queue_group_info {
-	/* IN */
 	struct virtchnl2_queue_group_id qg_id;
-	/* IN, Number of queue of different types in the group. */
 	__le16 num_tx_q;
 	__le16 num_tx_complq;
 	__le16 num_rx_q;
@@ -1064,56 +1062,52 @@ struct virtchnl2_queue_group_info {
 	struct virtchnl2_tx_queue_group_info tx_q_grp_info;
 	struct virtchnl2_rx_queue_group_info rx_q_grp_info;
 	u8 pad[40];
-	struct virtchnl2_queue_reg_chunks chunks; /* OUT */
-};
-
-VIRTCHNL2_CHECK_STRUCT_LEN(120, virtchnl2_queue_group_info);
-
-/**
- * struct virtchnl2_queue_groups - Queue groups list
- * @num_queue_groups: Total number of queue groups
- * @pad: Padding for future extensions
- * @groups: Array of queue group info
- */
-struct virtchnl2_queue_groups {
-	__le16 num_queue_groups;
-	u8 pad[6];
-	struct virtchnl2_queue_group_info groups[1];
+	struct virtchnl2_queue_reg_chunks chunks;
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_queue_groups);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(120, virtchnl2_queue_group_info, chunks.chunks);
 
 /**
  * struct virtchnl2_add_queue_groups - Add queue groups
- * @vport_id: IN, vport_id to add queue group to, same as allocated by
+ * @vport_id: Vport_id to add queue group to, same as allocated by
  *	      CreateVport. NA for mailbox and other types not assigned to vport.
+ * @num_queue_groups: Total number of queue groups
  * @pad: Padding for future extensions
- * @qg_info: IN/OUT. List of all the queue groups
+#ifndef FLEX_ARRAY_SUPPORT
+ * @groups: List of all the queue group info structures
+#endif
  *
  * PF sends this message to request additional transmit/receive queue groups
  * beyond the ones that were assigned via CREATE_VPORT request.
  * virtchnl2_add_queue_groups structure is used to specify the number of each
  * type of queues. CP responds with the same structure with the actual number of
- * groups and queues assigned followed by num_queue_groups and num_chunks of
- * virtchnl2_queue_groups and virtchnl2_queue_chunk structures.
+ * groups and queues assigned followed by num_queue_groups and groups of
+ * virtchnl2_queue_group_info and virtchnl2_queue_chunk structures.
+#ifdef FLEX_ARRAY_SUPPORT
+ * (Note: There is no specific field for the queue group info but are added at
+ * the end of the add queue groups message. Receiver of this message is expected
+ * to extract the queue group info accordingly. Reason for doing this is because
+ * compiler doesn't allow nested flexible array fields).
+#endif
  *
  * Associated with VIRTCHNL2_OP_ADD_QUEUE_GROUPS.
  */
 struct virtchnl2_add_queue_groups {
 	__le32 vport_id;
-	u8 pad[4];
-	struct virtchnl2_queue_groups qg_info;
+	__le16 num_queue_groups;
+	u8 pad[10];
+	struct virtchnl2_queue_group_info groups[STRUCT_VAR_LEN];
+
 };
 
 VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_add_queue_groups);
 
 /**
  * struct virtchnl2_delete_queue_groups - Delete queue groups
- * @vport_id: IN, vport_id to delete queue group from, same as allocated by
+ * @vport_id: Vport ID to delete queue group from, same as allocated by
  *	      CreateVport.
- * @num_queue_groups: IN/OUT, Defines number of groups provided
+ * @num_queue_groups: Defines number of groups provided
  * @pad: Padding
- * @qg_ids: IN, IDs & types of Queue Groups to delete
+ * @qg_ids: IDs & types of Queue Groups to delete
  *
  * PF sends this message to delete queue groups.
  * PF sends virtchnl2_delete_queue_groups struct to specify the queue groups
@@ -1127,10 +1121,9 @@ struct virtchnl2_delete_queue_groups {
 	__le16 num_queue_groups;
 	u8 pad[2];
 
-	struct virtchnl2_queue_group_id qg_ids[1];
+	struct virtchnl2_queue_group_id qg_ids[STRUCT_VAR_LEN];
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_delete_queue_groups);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(16, virtchnl2_delete_queue_groups, qg_ids);
 
 /**
  * struct virtchnl2_vector_chunk - Structure to specify a chunk of contiguous
@@ -1188,10 +1181,9 @@ struct virtchnl2_vector_chunks {
 	__le16 num_vchunks;
 	u8 pad[14];
 
-	struct virtchnl2_vector_chunk vchunks[1];
+	struct virtchnl2_vector_chunk vchunks[STRUCT_VAR_LEN];
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(48, virtchnl2_vector_chunks);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(48, virtchnl2_vector_chunks, vchunks);
 
 /**
  * struct virtchnl2_alloc_vectors - Vector allocation info
@@ -1213,8 +1205,7 @@ struct virtchnl2_alloc_vectors {
 
 	struct virtchnl2_vector_chunks vchunks;
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(64, virtchnl2_alloc_vectors);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(64, virtchnl2_alloc_vectors, vchunks.vchunks);
 
 /**
  * struct virtchnl2_rss_lut - RSS LUT info
@@ -1235,10 +1226,9 @@ struct virtchnl2_rss_lut {
 	__le16 lut_entries_start;
 	__le16 lut_entries;
 	u8 pad[4];
-	__le32 lut[1];
+	__le32 lut[STRUCT_VAR_LEN];
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_lut);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(16, virtchnl2_rss_lut, lut);
 
 /**
  * struct virtchnl2_rss_hash - RSS hash info
@@ -1387,10 +1377,9 @@ struct virtchnl2_ptype {
 	u8 ptype_id_8;
 	u8 proto_id_count;
 	__le16 pad;
-	__le16 proto_id[1];
+	__le16 proto_id[STRUCT_VAR_LEN];
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptype);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(8, virtchnl2_ptype, proto_id);
 
 /**
  * struct virtchnl2_get_ptype_info - Packet type info
@@ -1426,7 +1415,7 @@ struct virtchnl2_get_ptype_info {
 	__le16 start_ptype_id;
 	__le16 num_ptypes;
 	__le32 pad;
-	struct virtchnl2_ptype ptype[1];
+	struct virtchnl2_ptype ptype[STRUCT_VAR_LEN];
 };
 
 VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_get_ptype_info);
@@ -1627,10 +1616,9 @@ VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
 struct virtchnl2_queue_chunks {
 	__le16 num_chunks;
 	u8 pad[6];
-	struct virtchnl2_queue_chunk chunks[1];
+	struct virtchnl2_queue_chunk chunks[STRUCT_VAR_LEN];
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_chunks);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(24, virtchnl2_queue_chunks, chunks);
 
 /**
  * struct virtchnl2_del_ena_dis_queues - Enable/disable queues info
@@ -1652,8 +1640,7 @@ struct virtchnl2_del_ena_dis_queues {
 
 	struct virtchnl2_queue_chunks chunks;
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_del_ena_dis_queues);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(32, virtchnl2_del_ena_dis_queues, chunks.chunks);
 
 /**
  * struct virtchnl2_queue_vector - Queue to vector mapping
@@ -1697,10 +1684,10 @@ struct virtchnl2_queue_vector_maps {
 	__le32 vport_id;
 	__le16 num_qv_maps;
 	u8 pad[10];
-	struct virtchnl2_queue_vector qv_maps[1];
-};
 
-VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_vector_maps);
+	struct virtchnl2_queue_vector qv_maps[STRUCT_VAR_LEN];
+};
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(40, virtchnl2_queue_vector_maps, qv_maps);
 
 /**
  * struct virtchnl2_loopback - Loopback info
@@ -1752,10 +1739,10 @@ struct virtchnl2_mac_addr_list {
 	__le32 vport_id;
 	__le16 num_mac_addr;
 	u8 pad[2];
-	struct virtchnl2_mac_addr mac_addr_list[1];
-};
 
-VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mac_addr_list);
+	struct virtchnl2_mac_addr mac_addr_list[STRUCT_VAR_LEN];
+};
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(16, virtchnl2_mac_addr_list, mac_addr_list);
 
 /**
  * struct virtchnl2_promisc_info - Promiscuous type information
@@ -1854,10 +1841,10 @@ struct virtchnl2_ptp_tx_tstamp {
 	__le16 num_latches;
 	__le16 latch_size;
 	u8 pad[4];
-	struct virtchnl2_ptp_tx_tstamp_entry ptp_tx_tstamp_entries[1];
+	struct virtchnl2_ptp_tx_tstamp_entry ptp_tx_tstamp_entries[STRUCT_VAR_LEN];
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_tx_tstamp);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(24, virtchnl2_ptp_tx_tstamp,
+			       ptp_tx_tstamp_entries);
 
 /**
  * struct virtchnl2_get_ptp_caps - Get PTP capabilities
@@ -1882,8 +1869,8 @@ struct virtchnl2_get_ptp_caps {
 	struct virtchnl2_ptp_device_clock_control device_clock_control;
 	struct virtchnl2_ptp_tx_tstamp tx_tstamp;
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_get_ptp_caps);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(88, virtchnl2_get_ptp_caps,
+			       tx_tstamp.ptp_tx_tstamp_entries);
 
 /**
  * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp
@@ -1918,13 +1905,12 @@ VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
  */
 struct virtchnl2_ptp_tx_tstamp_latches {
 	__le16 num_latches;
-	/* latch size expressed in bits */
 	__le16 latch_size;
 	u8 pad[4];
-	struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[1];
+	struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[STRUCT_VAR_LEN];
 };
-
-VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_tx_tstamp_latches);
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(24, virtchnl2_ptp_tx_tstamp_latches,
+			       tstamp_latches);
 
 static inline const char *virtchnl2_op_str(__le32 v_opcode)
 {
@@ -2002,314 +1988,4 @@ static inline const char *virtchnl2_op_str(__le32 v_opcode)
 	}
 }
 
-/**
- * virtchnl2_vc_validate_vf_msg
- * @ver: Virtchnl2 version info
- * @v_opcode: Opcode for the message
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * Validate msg format against struct for each opcode.
- */
-static inline int
-virtchnl2_vc_validate_vf_msg(__rte_unused struct virtchnl2_version_info *ver, u32 v_opcode,
-			     u8 *msg, __le16 msglen)
-{
-	bool err_msg_format = false;
-	__le32 valid_len = 0;
-
-	/* Validate message length */
-	switch (v_opcode) {
-	case VIRTCHNL2_OP_VERSION:
-		valid_len = sizeof(struct virtchnl2_version_info);
-		break;
-	case VIRTCHNL2_OP_GET_CAPS:
-		valid_len = sizeof(struct virtchnl2_get_capabilities);
-		break;
-	case VIRTCHNL2_OP_CREATE_VPORT:
-		valid_len = sizeof(struct virtchnl2_create_vport);
-		if (msglen >= valid_len) {
-			struct virtchnl2_create_vport *cvport =
-				(struct virtchnl2_create_vport *)msg;
-
-			if (cvport->chunks.num_chunks == 0) {
-				/* Zero chunks is allowed as input */
-				break;
-			}
-
-			valid_len += (cvport->chunks.num_chunks - 1) *
-				      sizeof(struct virtchnl2_queue_reg_chunk);
-		}
-		break;
-	case VIRTCHNL2_OP_NON_FLEX_CREATE_ADI:
-		valid_len = sizeof(struct virtchnl2_non_flex_create_adi);
-		if (msglen >= valid_len) {
-			struct virtchnl2_non_flex_create_adi *cadi =
-				(struct virtchnl2_non_flex_create_adi *)msg;
-
-			if (cadi->chunks.num_chunks == 0) {
-				/* Zero chunks is allowed as input */
-				break;
-			}
-
-			if (cadi->vchunks.num_vchunks == 0) {
-				err_msg_format = true;
-				break;
-			}
-			valid_len += (cadi->chunks.num_chunks - 1) *
-				      sizeof(struct virtchnl2_queue_reg_chunk);
-			valid_len += (cadi->vchunks.num_vchunks - 1) *
-				      sizeof(struct virtchnl2_vector_chunk);
-		}
-		break;
-	case VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI:
-		valid_len = sizeof(struct virtchnl2_non_flex_destroy_adi);
-		break;
-	case VIRTCHNL2_OP_DESTROY_VPORT:
-	case VIRTCHNL2_OP_ENABLE_VPORT:
-	case VIRTCHNL2_OP_DISABLE_VPORT:
-		valid_len = sizeof(struct virtchnl2_vport);
-		break;
-	case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
-		valid_len = sizeof(struct virtchnl2_config_tx_queues);
-		if (msglen >= valid_len) {
-			struct virtchnl2_config_tx_queues *ctq =
-				(struct virtchnl2_config_tx_queues *)msg;
-			if (ctq->num_qinfo == 0) {
-				err_msg_format = true;
-				break;
-			}
-			valid_len += (ctq->num_qinfo - 1) *
-				     sizeof(struct virtchnl2_txq_info);
-		}
-		break;
-	case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
-		valid_len = sizeof(struct virtchnl2_config_rx_queues);
-		if (msglen >= valid_len) {
-			struct virtchnl2_config_rx_queues *crq =
-				(struct virtchnl2_config_rx_queues *)msg;
-			if (crq->num_qinfo == 0) {
-				err_msg_format = true;
-				break;
-			}
-			valid_len += (crq->num_qinfo - 1) *
-				     sizeof(struct virtchnl2_rxq_info);
-		}
-		break;
-	case VIRTCHNL2_OP_ADD_QUEUES:
-		valid_len = sizeof(struct virtchnl2_add_queues);
-		if (msglen >= valid_len) {
-			struct virtchnl2_add_queues *add_q =
-				(struct virtchnl2_add_queues *)msg;
-
-			if (add_q->chunks.num_chunks == 0) {
-				/* Zero chunks is allowed as input */
-				break;
-			}
-
-			valid_len += (add_q->chunks.num_chunks - 1) *
-				      sizeof(struct virtchnl2_queue_reg_chunk);
-		}
-		break;
-	case VIRTCHNL2_OP_ENABLE_QUEUES:
-	case VIRTCHNL2_OP_DISABLE_QUEUES:
-	case VIRTCHNL2_OP_DEL_QUEUES:
-		valid_len = sizeof(struct virtchnl2_del_ena_dis_queues);
-		if (msglen >= valid_len) {
-			struct virtchnl2_del_ena_dis_queues *qs =
-				(struct virtchnl2_del_ena_dis_queues *)msg;
-			if (qs->chunks.num_chunks == 0 ||
-			    qs->chunks.num_chunks > VIRTCHNL2_OP_DEL_ENABLE_DISABLE_QUEUES_MAX) {
-				err_msg_format = true;
-				break;
-			}
-			valid_len += (qs->chunks.num_chunks - 1) *
-				      sizeof(struct virtchnl2_queue_chunk);
-		}
-		break;
-	case VIRTCHNL2_OP_ADD_QUEUE_GROUPS:
-		valid_len = sizeof(struct virtchnl2_add_queue_groups);
-		if (msglen != valid_len) {
-			__le64 offset;
-			__le32 i;
-			struct virtchnl2_add_queue_groups *add_queue_grp =
-				(struct virtchnl2_add_queue_groups *)msg;
-			struct virtchnl2_queue_groups *groups = &(add_queue_grp->qg_info);
-			struct virtchnl2_queue_group_info *grp_info;
-			__le32 chunk_size = sizeof(struct virtchnl2_queue_reg_chunk);
-			__le32 group_size = sizeof(struct virtchnl2_queue_group_info);
-			__le32 total_chunks_size;
-
-			if (groups->num_queue_groups == 0) {
-				err_msg_format = true;
-				break;
-			}
-			valid_len += (groups->num_queue_groups - 1) *
-				      sizeof(struct virtchnl2_queue_group_info);
-			offset = (u8 *)(&groups->groups[0]) - (u8 *)groups;
-
-			for (i = 0; i < groups->num_queue_groups; i++) {
-				grp_info = (struct virtchnl2_queue_group_info *)
-						   ((u8 *)groups + offset);
-				if (grp_info->chunks.num_chunks == 0) {
-					offset += group_size;
-					continue;
-				}
-				total_chunks_size = (grp_info->chunks.num_chunks - 1) * chunk_size;
-				offset += group_size + total_chunks_size;
-				valid_len += total_chunks_size;
-			}
-		}
-		break;
-	case VIRTCHNL2_OP_DEL_QUEUE_GROUPS:
-		valid_len = sizeof(struct virtchnl2_delete_queue_groups);
-		if (msglen != valid_len) {
-			struct virtchnl2_delete_queue_groups *del_queue_grp =
-				(struct virtchnl2_delete_queue_groups *)msg;
-
-			if (del_queue_grp->num_queue_groups == 0) {
-				err_msg_format = true;
-				break;
-			}
-
-			valid_len += (del_queue_grp->num_queue_groups - 1) *
-				      sizeof(struct virtchnl2_queue_group_id);
-		}
-		break;
-	case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
-	case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
-		valid_len = sizeof(struct virtchnl2_queue_vector_maps);
-		if (msglen >= valid_len) {
-			struct virtchnl2_queue_vector_maps *v_qp =
-				(struct virtchnl2_queue_vector_maps *)msg;
-			if (v_qp->num_qv_maps == 0 ||
-			    v_qp->num_qv_maps > VIRTCHNL2_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) {
-				err_msg_format = true;
-				break;
-			}
-			valid_len += (v_qp->num_qv_maps - 1) *
-				      sizeof(struct virtchnl2_queue_vector);
-		}
-		break;
-	case VIRTCHNL2_OP_ALLOC_VECTORS:
-		valid_len = sizeof(struct virtchnl2_alloc_vectors);
-		if (msglen >= valid_len) {
-			struct virtchnl2_alloc_vectors *v_av =
-				(struct virtchnl2_alloc_vectors *)msg;
-
-			if (v_av->vchunks.num_vchunks == 0) {
-				/* Zero chunks is allowed as input */
-				break;
-			}
-
-			valid_len += (v_av->vchunks.num_vchunks - 1) *
-				      sizeof(struct virtchnl2_vector_chunk);
-		}
-		break;
-	case VIRTCHNL2_OP_DEALLOC_VECTORS:
-		valid_len = sizeof(struct virtchnl2_vector_chunks);
-		if (msglen >= valid_len) {
-			struct virtchnl2_vector_chunks *v_chunks =
-				(struct virtchnl2_vector_chunks *)msg;
-			if (v_chunks->num_vchunks == 0) {
-				err_msg_format = true;
-				break;
-			}
-			valid_len += (v_chunks->num_vchunks - 1) *
-				      sizeof(struct virtchnl2_vector_chunk);
-		}
-		break;
-	case VIRTCHNL2_OP_GET_RSS_KEY:
-	case VIRTCHNL2_OP_SET_RSS_KEY:
-		valid_len = sizeof(struct virtchnl2_rss_key);
-		if (msglen >= valid_len) {
-			struct virtchnl2_rss_key *vrk =
-				(struct virtchnl2_rss_key *)msg;
-
-			if (vrk->key_len == 0) {
-				/* Zero length is allowed as input */
-				break;
-			}
-
-			valid_len += vrk->key_len - 1;
-		}
-		break;
-	case VIRTCHNL2_OP_GET_RSS_LUT:
-	case VIRTCHNL2_OP_SET_RSS_LUT:
-		valid_len = sizeof(struct virtchnl2_rss_lut);
-		if (msglen >= valid_len) {
-			struct virtchnl2_rss_lut *vrl =
-				(struct virtchnl2_rss_lut *)msg;
-
-			if (vrl->lut_entries == 0) {
-				/* Zero entries is allowed as input */
-				break;
-			}
-
-			valid_len += (vrl->lut_entries - 1) * sizeof(vrl->lut);
-		}
-		break;
-	case VIRTCHNL2_OP_GET_RSS_HASH:
-	case VIRTCHNL2_OP_SET_RSS_HASH:
-		valid_len = sizeof(struct virtchnl2_rss_hash);
-		break;
-	case VIRTCHNL2_OP_SET_SRIOV_VFS:
-		valid_len = sizeof(struct virtchnl2_sriov_vfs_info);
-		break;
-	case VIRTCHNL2_OP_GET_PTYPE_INFO:
-		valid_len = sizeof(struct virtchnl2_get_ptype_info);
-		break;
-	case VIRTCHNL2_OP_GET_STATS:
-		valid_len = sizeof(struct virtchnl2_vport_stats);
-		break;
-	case VIRTCHNL2_OP_GET_PORT_STATS:
-		valid_len = sizeof(struct virtchnl2_port_stats);
-		break;
-	case VIRTCHNL2_OP_RESET_VF:
-		break;
-	case VIRTCHNL2_OP_GET_PTP_CAPS:
-		valid_len = sizeof(struct virtchnl2_get_ptp_caps);
-
-		if (msglen > valid_len) {
-			struct virtchnl2_get_ptp_caps *ptp_caps =
-			(struct virtchnl2_get_ptp_caps *)msg;
-
-			if (ptp_caps->tx_tstamp.num_latches == 0) {
-				err_msg_format = true;
-				break;
-			}
-
-			valid_len += ((ptp_caps->tx_tstamp.num_latches - 1) *
-				      sizeof(struct virtchnl2_ptp_tx_tstamp_entry));
-		}
-		break;
-	case VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES:
-		valid_len = sizeof(struct virtchnl2_ptp_tx_tstamp_latches);
-
-		if (msglen > valid_len) {
-			struct virtchnl2_ptp_tx_tstamp_latches *tx_tstamp_latches =
-			(struct virtchnl2_ptp_tx_tstamp_latches *)msg;
-
-			if (tx_tstamp_latches->num_latches == 0) {
-				err_msg_format = true;
-				break;
-			}
-
-			valid_len += ((tx_tstamp_latches->num_latches - 1) *
-				      sizeof(struct virtchnl2_ptp_tx_tstamp_latch));
-		}
-		break;
-	/* These are always errors coming from the VF */
-	case VIRTCHNL2_OP_EVENT:
-	case VIRTCHNL2_OP_UNKNOWN:
-	default:
-		return VIRTCHNL2_STATUS_ERR_ESRCH;
-	}
-	/* Few more checks */
-	if (err_msg_format || valid_len != msglen)
-		return VIRTCHNL2_STATUS_ERR_EINVAL;
-
-	return 0;
-}
-
 #endif /* _VIRTCHNL_2_H_ */
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index c46ed50eb5..f00202f43c 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -366,7 +366,7 @@ idpf_vc_queue_grps_add(struct idpf_vport *vport,
 	int err = -1;
 
 	size = sizeof(*p2p_queue_grps_info) +
-	       (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+	       (p2p_queue_grps_info->num_queue_groups - 1) *
 		   sizeof(struct virtchnl2_queue_group_info);
 
 	memset(&args, 0, sizeof(args));
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7e718e9e19..e707043bf7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -2393,18 +2393,18 @@ cpfl_p2p_q_grps_add(struct idpf_vport *vport,
 	int ret;
 
 	p2p_queue_grps_info->vport_id = vport->vport_id;
-	p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
-	p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
-	p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
-	p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
-	p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
-	p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
-	p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
-	p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
-	p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
-	p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
-	p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
-	p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+	p2p_queue_grps_info->num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+	p2p_queue_grps_info->groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+	p2p_queue_grps_info->groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+	p2p_queue_grps_info->groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+	p2p_queue_grps_info->groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+	p2p_queue_grps_info->groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+	p2p_queue_grps_info->groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+	p2p_queue_grps_info->groups[0].rx_q_grp_info.rss_lut_size = 0;
+	p2p_queue_grps_info->groups[0].tx_q_grp_info.tx_tc = 0;
+	p2p_queue_grps_info->groups[0].tx_q_grp_info.priority = 0;
+	p2p_queue_grps_info->groups[0].tx_q_grp_info.is_sp = 0;
+	p2p_queue_grps_info->groups[0].tx_q_grp_info.pir_weight = 0;
 
 	ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
 	if (ret != 0) {
@@ -2423,13 +2423,13 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
 	struct virtchnl2_queue_reg_chunks *vc_chunks_out;
 	int i, type;
 
-	if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+	if (p2p_q_vc_out_info->groups[0].qg_id.queue_group_type !=
 	    VIRTCHNL2_QUEUE_GROUP_P2P) {
 		PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
 		return -EINVAL;
 	}
 
-	vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+	vc_chunks_out = &p2p_q_vc_out_info->groups[0].chunks;
 
 	for (i = 0; i < vc_chunks_out->num_chunks; i++) {
 		type = vc_chunks_out->chunks[i].type;
-- 
2.43.0


  parent reply	other threads:[~2024-06-04  8:50 UTC|newest]

Thread overview: 125+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-05-28  7:28 [PATCH 00/25] Update IDPF Base Driver Soumyadeep Hore
2024-05-28  7:28 ` [PATCH 01/25] common/idpf: added NVME CPF specific code with defines Soumyadeep Hore
2024-05-29 12:32   ` Bruce Richardson
2024-05-28  7:28 ` [PATCH 02/25] common/idpf: updated IDPF VF device ID Soumyadeep Hore
2024-05-28  7:28 ` [PATCH 03/25] common/idpf: update ADD QUEUE GROUPS offset Soumyadeep Hore
2024-05-29 12:38   ` Bruce Richardson
2024-05-28  7:28 ` [PATCH 04/25] common/idpf: update in PTP message validation Soumyadeep Hore
2024-05-29 13:03   ` Bruce Richardson
2024-05-28  7:28 ` [PATCH 05/25] common/idpf: added FLOW STEER capability and a vport flag Soumyadeep Hore
2024-05-28  7:28 ` [PATCH 06/25] common/idpf: moved the IDPF HW into API header file Soumyadeep Hore
2024-05-28  7:28 ` [PATCH 07/25] common/idpf: avoid defensive programming Soumyadeep Hore
2024-05-28  7:28 ` [PATCH 08/25] common/idpf: move related defines into enums Soumyadeep Hore
2024-05-28  7:28 ` [PATCH 09/25] common/idpf: add flex array support to virtchnl2 structures Soumyadeep Hore
2024-06-04  8:05 ` [PATCH v2 00/21] Update MEV TS Base Driver Soumyadeep Hore
2024-06-04  8:05   ` [PATCH v2 01/21] common/idpf: added NVME CPF specific code with defines Soumyadeep Hore
2024-06-04  8:05   ` [PATCH v2 02/21] common/idpf: updated IDPF VF device ID Soumyadeep Hore
2024-06-04  8:05   ` [PATCH v2 03/21] common/idpf: added new virtchnl2 capability and vport flag Soumyadeep Hore
2024-06-04  8:05   ` [PATCH v2 04/21] common/idpf: moved the idpf HW into API header file Soumyadeep Hore
2024-06-04  8:05   ` [PATCH v2 05/21] common/idpf: avoid defensive programming Soumyadeep Hore
2024-06-04  8:05   ` [PATCH v2 06/21] common/idpf: use BIT ULL for large bitmaps Soumyadeep Hore
2024-06-04  8:05   ` [PATCH v2 07/21] common/idpf: convert data type to 'le' Soumyadeep Hore
2024-06-04  8:05   ` [PATCH v2 08/21] common/idpf: compress RXDID mask definitions Soumyadeep Hore
2024-06-04  8:05   ` [PATCH v2 09/21] common/idpf: refactor size check macro Soumyadeep Hore
2024-06-04  8:06   ` [PATCH v2 10/21] common/idpf: update mask of Rx FLEX DESC ADV FF1 M Soumyadeep Hore
2024-06-04  8:06   ` [PATCH v2 11/21] common/idpf: use 'pad' and 'reserved' fields appropriately Soumyadeep Hore
2024-06-04  8:06   ` [PATCH v2 12/21] common/idpf: move related defines into enums Soumyadeep Hore
2024-06-04  8:06   ` [PATCH v2 13/21] common/idpf: avoid variable 0-init Soumyadeep Hore
2024-06-04  8:06   ` [PATCH v2 14/21] common/idpf: update in PTP message validation Soumyadeep Hore
2024-06-04  8:06   ` [PATCH v2 15/21] common/idpf: rename INLINE FLOW STEER to FLOW STEER Soumyadeep Hore
2024-06-04  8:06   ` [PATCH v2 16/21] common/idpf: add wmb before tail Soumyadeep Hore
2024-06-04  8:06   ` Soumyadeep Hore [this message]
2024-06-04  8:06   ` [PATCH v2 18/21] common/idpf: enable flow steer capability for vports Soumyadeep Hore
2024-06-04  8:06   ` [PATCH v2 19/21] common/idpf: add a new Tx context descriptor structure Soumyadeep Hore
2024-06-04  8:06   ` [PATCH v2 20/21] common/idpf: remove idpf common file Soumyadeep Hore
2024-06-04  8:06   ` [PATCH v2 21/21] drivers: adding type to idpf vc queue switch Soumyadeep Hore
2024-06-12  3:52   ` [PATCH v3 00/22] Update MEV TS Base Driver Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 01/22] common/idpf: added NVME CPF specific code with defines Soumyadeep Hore
2024-06-14 10:33       ` Burakov, Anatoly
2024-06-12  3:52     ` [PATCH v3 02/22] common/idpf: updated IDPF VF device ID Soumyadeep Hore
2024-06-14 10:36       ` Burakov, Anatoly
2024-06-12  3:52     ` [PATCH v3 03/22] common/idpf: added new virtchnl2 capability and vport flag Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 04/22] common/idpf: moved the idpf HW into API header file Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 05/22] common/idpf: avoid defensive programming Soumyadeep Hore
2024-06-14 12:16       ` Burakov, Anatoly
2024-06-12  3:52     ` [PATCH v3 06/22] common/idpf: use BIT ULL for large bitmaps Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 07/22] common/idpf: convert data type to 'le' Soumyadeep Hore
2024-06-14 12:19       ` Burakov, Anatoly
2024-06-12  3:52     ` [PATCH v3 08/22] common/idpf: compress RXDID mask definitions Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 09/22] common/idpf: refactor size check macro Soumyadeep Hore
2024-06-14 12:21       ` Burakov, Anatoly
2024-06-12  3:52     ` [PATCH v3 10/22] common/idpf: update mask of Rx FLEX DESC ADV FF1 M Soumyadeep Hore
2024-06-18 10:57       ` [PATCH v4 00/21] Update MEV TS Base Driver Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 01/21] common/idpf: updated IDPF VF device ID Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 02/21] common/idpf: added new virtchnl2 capability and vport flag Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 03/21] common/idpf: moved the idpf HW into API header file Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 04/21] common/idpf: avoid defensive programming Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 05/21] common/idpf: use BIT ULL for large bitmaps Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 06/21] common/idpf: convert data type to 'le' Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 07/21] common/idpf: compress RXDID mask definitions Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 08/21] common/idpf: refactor size check macro Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 09/21] common/idpf: update mask of Rx FLEX DESC ADV FF1 M Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 10/21] common/idpf: use 'pad' and 'reserved' fields appropriately Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 11/21] common/idpf: move related defines into enums Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 12/21] common/idpf: avoid variable 0-init Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 13/21] common/idpf: update in PTP message validation Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 14/21] common/idpf: rename INLINE FLOW STEER to FLOW STEER Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 15/21] common/idpf: add wmb before tail Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 16/21] drivers: add flex array support and fix issues Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 17/21] common/idpf: enable flow steer capability for vports Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 18/21] common/idpf: add a new Tx context descriptor structure Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 19/21] common/idpf: remove idpf common file Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 20/21] drivers: adding type to idpf vc queue switch Soumyadeep Hore
2024-06-18 10:57         ` [PATCH v4 21/21] doc: updated the documentation for cpfl PMD Soumyadeep Hore
2024-06-24  9:16           ` [PATCH v5 00/21] Update MEV TS Base Driver Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 01/21] common/idpf: updated IDPF VF device ID Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 02/21] common/idpf: added new virtchnl2 capability and vport flag Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 03/21] common/idpf: moved the idpf HW into API header file Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 04/21] common/idpf: avoid defensive programming Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 05/21] common/idpf: use BIT ULL for large bitmaps Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 06/21] common/idpf: convert data type to 'le' Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 07/21] common/idpf: compress RXDID mask definitions Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 08/21] common/idpf: refactor size check macro Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 09/21] common/idpf: update mask of Rx FLEX DESC ADV FF1 M Soumyadeep Hore
2024-06-28 14:16               ` Bruce Richardson
2024-06-24  9:16             ` [PATCH v5 10/21] common/idpf: use 'pad' and 'reserved' fields appropriately Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 11/21] common/idpf: move related defines into enums Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 12/21] common/idpf: avoid variable 0-init Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 13/21] common/idpf: update in PTP message validation Soumyadeep Hore
2024-06-28 14:33               ` Bruce Richardson
2024-06-24  9:16             ` [PATCH v5 14/21] common/idpf: rename INLINE FLOW STEER to FLOW STEER Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 15/21] common/idpf: add wmb before tail Soumyadeep Hore
2024-06-28 14:45               ` Bruce Richardson
2024-07-01 10:05                 ` Hore, Soumyadeep
2024-07-01  9:13               ` [PATCH v6 0/7] Update MEV TS Base Driver Soumyadeep Hore
2024-07-01  9:13                 ` [PATCH v6 1/7] common/idpf: add wmb before tail Soumyadeep Hore
2024-07-01  9:13                 ` [PATCH v6 2/7] drivers: adding macros for dynamic data structures Soumyadeep Hore
2024-07-01  9:13                 ` [PATCH v6 3/7] common/idpf: enable flow steer capability for vports Soumyadeep Hore
2024-07-01  9:13                 ` [PATCH v6 4/7] common/idpf: add a new Tx context descriptor structure Soumyadeep Hore
2024-07-01  9:13                 ` [PATCH v6 5/7] common/idpf: remove idpf common file Soumyadeep Hore
2024-07-01  9:13                 ` [PATCH v6 6/7] drivers: adding config queue types for virtchnl2 message Soumyadeep Hore
2024-07-01  9:13                 ` [PATCH v6 7/7] doc: updated the documentation for cpfl PMD Soumyadeep Hore
2024-07-01 11:23                 ` [PATCH v6 0/7] Update MEV TS Base Driver Bruce Richardson
2024-06-24  9:16             ` [PATCH v5 16/21] drivers: add flex array support and fix issues Soumyadeep Hore
2024-06-28 14:50               ` Bruce Richardson
2024-07-01 10:09                 ` Hore, Soumyadeep
2024-06-24  9:16             ` [PATCH v5 17/21] common/idpf: enable flow steer capability for vports Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 18/21] common/idpf: add a new Tx context descriptor structure Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 19/21] common/idpf: remove idpf common file Soumyadeep Hore
2024-06-24  9:16             ` [PATCH v5 20/21] drivers: adding type to idpf vc queue switch Soumyadeep Hore
2024-06-28 14:53               ` Bruce Richardson
2024-06-24  9:16             ` [PATCH v5 21/21] doc: updated the documentation for cpfl PMD Soumyadeep Hore
2024-06-28 14:58             ` [PATCH v5 00/21] Update MEV TS Base Driver Bruce Richardson
2024-06-12  3:52     ` [PATCH v3 11/22] common/idpf: use 'pad' and 'reserved' fields appropriately Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 12/22] common/idpf: move related defines into enums Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 13/22] common/idpf: avoid variable 0-init Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 14/22] common/idpf: update in PTP message validation Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 15/22] common/idpf: rename INLINE FLOW STEER to FLOW STEER Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 16/22] common/idpf: add wmb before tail Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 17/22] drivers: add flex array support and fix issues Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 18/22] common/idpf: enable flow steer capability for vports Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 19/22] common/idpf: add a new Tx context descriptor structure Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 20/22] common/idpf: remove idpf common file Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 21/22] drivers: adding type to idpf vc queue switch Soumyadeep Hore
2024-06-12  3:52     ` [PATCH v3 22/22] doc: updated the documentation for cpfl PMD Soumyadeep Hore
2024-06-14 12:48     ` [PATCH v3 00/22] Update MEV TS Base Driver Burakov, Anatoly

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240604080611.2197835-18-soumyadeep.hore@intel.com \
    --to=soumyadeep.hore@intel.com \
    --cc=anatoly.burakov@intel.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).