From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6ED58A04B5; Mon, 2 Dec 2019 09:06:49 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 651D01C11F; Mon, 2 Dec 2019 08:59:21 +0100 (CET) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id C66211C0D1 for ; Mon, 2 Dec 2019 08:59:09 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Dec 2019 23:59:09 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.69,268,1571727600"; d="scan'208";a="207993700" Received: from dpdk_yexl_af_xdp.sh.intel.com ([10.67.119.186]) by fmsmga008.fm.intel.com with ESMTP; 01 Dec 2019 23:59:07 -0800 From: Xiaolong Ye To: Beilei Xing , Qi Zhang Cc: dev@dpdk.org, Xiaolong Ye , Phani R Burra Date: Mon, 2 Dec 2019 15:49:23 +0800 Message-Id: <20191202074935.97629-58-xiaolong.ye@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20191202074935.97629-1-xiaolong.ye@intel.com> References: <20191202074935.97629-1-xiaolong.ye@intel.com> Subject: [dpdk-dev] [PATCH 57/69] net/i40e/base: update virtchnl header with advanced features X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Updating the virtchnl header file with new interfaces. The new interfaces will support the split queue and advanced features. These new features are initially targeted for APF and CPF driver model. AVF also will enable the new features in future and use the advanced interfaces. Signed-off-by: Phani R Burra Reviewed-by: Aleksandr Loktionov Reviewed-by: Kirsher Jeffrey T Signed-off-by: Xiaolong Ye --- drivers/net/i40e/base/virtchnl.h | 511 ++++++++++++++++++++++++++++++- 1 file changed, 509 insertions(+), 2 deletions(-) diff --git a/drivers/net/i40e/base/virtchnl.h b/drivers/net/i40e/base/virtchnl.h index 0ea03d06b..c3495ab39 100644 --- a/drivers/net/i40e/base/virtchnl.h +++ b/drivers/net/i40e/base/virtchnl.h @@ -134,7 +134,34 @@ enum virtchnl_ops { VIRTCHNL_OP_DISABLE_CHANNELS = 31, VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, - +#ifdef VIRTCHNL_EXT_FEATURES + /* New major set of opcodes introduced and so leaving room for + * old misc opcodes to be added in future. Also these opcodes may only + * be used if both the PF and VF have successfully negotiated the + * VIRTCHNL_VF_CAP_EXT_FEATURES capability during initial capabilities + * exchange. + */ + VIRTCHNL_OP_GET_CAPS = 100, + VIRTCHNL_OP_CREATE_VPORT = 101, + VIRTCHNL_OP_DESTROY_VPORT = 102, + VIRTCHNL_OP_ENABLE_VPORT = 103, + VIRTCHNL_OP_DISABLE_VPORT = 104, + VIRTCHNL_OP_CONFIG_TX_QUEUES = 105, + VIRTCHNL_OP_CONFIG_RX_QUEUES = 106, + VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107, + VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108, + VIRTCHNL_OP_ADD_QUEUES = 109, + VIRTCHNL_OP_DEL_QUEUES = 110, + VIRTCHNL_OP_MAP_VECTOR_QUEUE = 111, + VIRTCHNL_OP_UNMAP_VECTOR_QUEUE = 112, + VIRTCHNL_OP_MAP_VECTOR_ITR = 113, + VIRTCHNL_OP_GET_RSS_KEY = 114, + VIRTCHNL_OP_GET_RSS_LUT = 115, + VIRTCHNL_OP_GET_RSS_HASH = 116, + VIRTCHNL_OP_SET_RSS_HASH = 117, + VIRTCHNL_OP_CREATE_VFS = 118, + VIRTCHNL_OP_DESTROY_VFS = 119, +#endif /* VIRTCHNL_EXT_FEATURES */ }; /* These macros are used to generate compilation errors if a structure/union @@ -248,8 +275,10 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 #define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 /* Define below the capability flags that are not offloads */ +#ifdef VIRTCHNL_EXT_FEATURES +#define VIRTCHNL_VF_CAP_EXT_FEATURES 0x01000000 +#endif /* VIRTCHNL_EXT_FEATURES */ #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080 - #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ VIRTCHNL_VF_OFFLOAD_RSS_PF) @@ -707,6 +736,362 @@ enum virtchnl_vfr_states { VIRTCHNL_VFR_VFACTIVE, }; +#ifdef VIRTCHNL_EXT_FEATURES +/* PF capability flags + * VIRTCHNL_CAP_STATELESS_OFFLOADS flag indicates stateless offloads + * such as TX/RX Checksum offloading and TSO for non-tunneled packets. Please + * note that old and new capabilities are exclusive and not supposed to be + * mixed + */ +#define VIRTCHNL_CAP_STATELESS_OFFLOADS BIT(1) +#define VIRTCHNL_CAP_UDP_SEG_OFFLOAD BIT(2) +#define VIRTCHNL_CAP_RSS BIT(3) +#define VIRTCHNL_CAP_TCP_RSC BIT(4) +#define VIRTCHNL_CAP_HEADER_SPLIT BIT(5) +#define VIRTCHNL_CAP_RDMA BIT(6) +#define VIRTCHNL_CAP_SRIOV BIT(7) +/* Earliest Departure Time capability used for Timing Wheel */ +#define VIRTCHNL_CAP_EDT BIT(8) + +/* Type of virtual port */ +enum virtchnl_vport_type { + VIRTCHNL_VPORT_TYPE_DEFAULT = 0, +}; + +/* Type of queue model */ +enum virtchnl_queue_model { + VIRTCHNL_QUEUE_MODEL_SINGLE = 0, + VIRTCHNL_QUEUE_MODEL_SPLIT = 1, +}; + +/* TX and RX queue types are valid in legacy as well as split queue models. + * With Split Queue model, 2 additional types are introduced - TX_COMPLETION + * and RX_BUFFER. In split queue model, RX corresponds to the queue where HW + * posts completions. + */ +enum virtchnl_queue_type { + VIRTCHNL_QUEUE_TYPE_TX = 0, + VIRTCHNL_QUEUE_TYPE_RX = 1, + VIRTCHNL_QUEUE_TYPE_TX_COMPLETION = 2, + VIRTCHNL_QUEUE_TYPE_RX_BUFFER = 3, +}; + +/* RX Queue Feature bits */ +#define VIRTCHNL_RXQ_RSC BIT(1) +#define VIRTCHNL_RXQ_HDR_SPLIT BIT(2) +#define VIRTCHNL_RXQ_IMMEDIATE_WRITE_BACK BIT(4) + +/* RX Queue Descriptor Types */ +enum virtchnl_rxq_desc_size { + VIRTCHNL_RXQ_DESC_SIZE_16BYTE = 0, + VIRTCHNL_RXQ_DESC_SIZE_32BYTE = 1, +}; + +/* TX Queue Scheduling Modes Queue mode is the legacy type i.e. inorder + * and Flow mode is out of order packet processing + */ +enum virtchnl_txq_sched_mode { + VIRTCHNL_TXQ_SCHED_MODE_QUEUE = 0, + VIRTCHNL_TXQ_SCHED_MODE_FLOW = 1, +}; + +/* Queue Descriptor Profiles Base mode is the legacy and Native is the + * flex descriptors + */ +enum virtchnl_desc_profile { + VIRTCHNL_TXQ_DESC_PROFILE_BASE = 0, + VIRTCHNL_TXQ_DESC_PROFILE_NATIVE = 1, +}; + +/* Type of RSS algorithm */ +enum virtchnl_rss_algorithm { + VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0, + VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1, + VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2, + VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3, +}; + +/* VIRTCHNL_OP_GET_CAPS + * PF sends this message to CP to negotiate capabilities by filling + * in the u64 bitmap of its desired capabilities. + * CP responds with an updated virtchnl_get_capabilities structure + * with allowed capabilities and possible max number of vfs it can create. + */ +struct virtchnl_get_capabilities { + u64 cap_flags; + u16 max_num_vfs; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_get_capabilities); + +/* structure to specify a chunk of contiguous queues */ +struct virtchnl_queue_chunk { + enum virtchnl_queue_type type; + u16 start_queue_id; + u16 num_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk); + +/* structure to specify several chunks of contiguous queues */ +struct virtchnl_queue_chunks { + u16 num_chunks; + struct virtchnl_queue_chunk chunks[]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_queue_chunks); + +/* VIRTCHNL_OP_CREATE_VPORT + * PF sends this message to CP to create a vport by filling in the first 8 + * fields of virtchnl_create_vport structure (vport type, tx, rx queue models + * and desired number of queues and vectors). CP responds with the updated + * virtchnl_create_vport structure containing the number of assigned queues, + * vectors, vport id, max mtu, default mac addr followed by chunks which in turn + * will have an array of num_chunks entries of virtchnl_queue_chunk structures. + */ +struct virtchnl_create_vport { + enum virtchnl_vport_type vport_type; + /* single or split */ + enum virtchnl_queue_model txq_model; + /* single or split */ + enum virtchnl_queue_model rxq_model; + u16 num_tx_q; + /* valid only if txq_model is split Q */ + u16 num_tx_complq; + u16 num_rx_q; + /* valid only if rxq_model is split Q */ + u16 num_rx_bufq; + u16 num_vectors; + u16 vport_id; + u16 max_mtu; + u8 default_mac_addr[ETH_ALEN]; + enum virtchnl_rss_algorithm rss_algorithm; + u16 rss_key_size; + u16 rss_lut_size; + u16 qset_handle; + struct virtchnl_queue_chunks chunks; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_create_vport); + +/* VIRTCHNL_OP_DESTROY_VPORT + * VIRTCHNL_OP_ENABLE_VPORT + * VIRTCHNL_OP_DISABLE_VPORT + * PF sends this message to CP to destroy, enable or disable a vport by filling + * in the vport_id in virtchnl_vport structure. + * CP responds with the status of the requested operation. + */ +struct virtchnl_vport { + u16 vport_id; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_vport); + +/* Tx queue config info */ +struct virtchnl_txq_info_v2 { + u16 queue_id; + /* single or split */ + enum virtchnl_queue_model model; + /* tx or tx_completion */ + enum virtchnl_queue_type type; + /* queue or flow based */ + enum virtchnl_txq_sched_mode sched_mode; + /* base or native */ + enum virtchnl_desc_profile desc_profile; + u16 ring_len; + u64 dma_ring_addr; + /* valid only if queue model is split and type is tx */ + u16 tx_compl_queue_id; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_txq_info_v2); + +/* VIRTCHNL_OP_CONFIG_TX_QUEUES + * PF sends this message to set up parameters for one or more TX queues. + * This message contains an array of num_qinfo instances of virtchnl_txq_info_v2 + * structures. CP configures requested queues and returns a status code. If + * num_qinfo specified is greater than the number of queues associated with the + * vport, an error is returned and no queues are configured. + */ +struct virtchnl_config_tx_queues { + u16 vport_id; + u16 num_qinfo; + struct virtchnl_txq_info_v2 txq_info[]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_config_tx_queues); + +/* Rx queue config info */ +struct virtchnl_rxq_info_v2 { + u16 queue_id; + /* single or split */ + enum virtchnl_queue_model model; + /* rx or rx buffer */ + enum virtchnl_queue_type type; + /* base or native */ + enum virtchnl_desc_profile desc_profile; + /* rsc, header-split, immediate write back */ + u16 queue_flags; + /* 16 or 32 byte */ + enum virtchnl_rxq_desc_size desc_size; + u16 ring_len; + u16 hdr_buffer_size; + u32 data_buffer_size; + u32 max_pkt_size; + u64 dma_ring_addr; + u64 dma_head_wb_addr; + u16 rsc_low_watermark; + u8 buffer_notif_stride; + enum virtchnl_rx_hsplit rx_split_pos; + /* valid only if queue model is split and type is rx buffer*/ + u16 rx_bufq1_id; + /* valid only if queue model is split and type is rx buffer*/ + u16 rx_bufq2_id; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_rxq_info_v2); + +/* VIRTCHNL_OP_CONFIG_RX_QUEUES + * PF sends this message to set up parameters for one or more RX queues. + * This message contains an array of num_qinfo instances of virtchnl_rxq_info_v2 + * structures. CP configures requested queues and returns a status code. + * If the number of queues specified is greater than the number of queues + * associated with the vport, an error is returned and no queues are configured. + */ +struct virtchnl_config_rx_queues { + u16 vport_id; + u16 num_qinfo; + struct virtchnl_rxq_info_v2 rxq_info[]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_config_rx_queues); + +/* VIRTCHNL_OP_ADD_QUEUES + * PF sends this message to request additional TX/RX queues beyond the ones + * that were assigned via CREATE_VPORT request. virtchnl_add_queues structure is + * used to specify the number of each type of queues. + * CP responds with the same structure with the actual number of queues assigned + * followed by num_chunks of virtchnl_queue_chunk structures. + */ +struct virtchnl_add_queues { + u16 vport_id; + u16 num_tx_q; + u16 num_tx_complq; + u16 num_rx_q; + u16 num_rx_bufq; + struct virtchnl_queue_chunks chunks; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_add_queues); + +/* VIRTCHNL_OP_ENABLE_QUEUES + * VIRTCHNL_OP_DISABLE_QUEUES + * VIRTCHNL_OP_DEL_QUEUES + * PF sends these messages to enable, disable or delete queues specified in + * chunks. PF sends virtchnl_del_ena_dis_queues struct to specify the queues + * to be enabled/disabled/deleted. Also applicable to single queue RX or + * TX. CP performs requested action and returns status. + */ +struct virtchnl_del_ena_dis_queues { + u16 vport_id; + struct virtchnl_queue_chunks chunks; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_del_ena_dis_queues); + +/* Vector to Queue mapping */ +struct virtchnl_vector_queue { + u16 vector_id; + u16 queue_id; + enum virtchnl_queue_type queue_type; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vector_queue); + +/* VIRTCHNL_OP_MAP_VECTOR_QUEUE + * VIRTCHNL_OP_UNMAP_VECTOR_QUEUE + * PF sends this message to map or unmap vectors to queues. + * This message contains an array of num_vector_queue_pairs instances of + * virtchnl_vector_queue structures. CP configures interrupt mapping and returns + * a status code. If the number of vectors specified is greater than the number + * of vectors associated with the vport, an error is returned and no vectors are + * mapped. + */ +struct virtchnl_vector_queue_pairs { + u16 vport_id; + u16 num_vector_queue_pairs; + struct virtchnl_vector_queue vq[]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vector_queue_pairs); + +/* Vector to ITR index registers mapping */ +struct virtchnl_vector_itr { + u16 vector_id; + u16 rxitr_idx; + u16 txitr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vector_itr); + +/* VIRTCHNL_OP_MAP_VECTOR_ITR + * PF sends this message to map vectors to RX and TX ITR index registers. + * This message contains an array of num_vector_itr_pairs instances of + * virtchnl_vector_itr structures. CP configures requested queues and returns a + * status code. If the number of vectors specified is greater than the number of + * vectors associated with the VSI, an error is returned and no vectors are + * mapped. + */ +struct virtchnl_vector_itr_pairs { + u16 vport_id; + u16 num_vector_itr_pairs; + struct virtchnl_vector_itr vitr[]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vector_itr_pairs); + +/* VIRTCHNL_OP_GET_RSS_LUT + * PF sends this message to get RSS lookup table. Only supported if + * both PF and CP drivers set the VIRTCHNL_CAP_RSS bit during configuration + * negotiation. Uses the virtchnl_rss_lut structure + */ + +/* VIRTCHNL_OP_GET_RSS_KEY + * PF sends this message to get RSS key. Only supported if + * both PF and CP drivers set the VIRTCHNL_CAP_RSS bit during configuration + * negotiation. Used the virtchnl_rss_key structure + */ + +/* VIRTCHNL_OP_GET_RSS_HASH + * VIRTCHNL_OP_SET_RSS_HASH + * PF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the CP sets these to all possible traffic types that the + * hardware supports. The PF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + * Only supported if both PF and CP drivers set the VIRTCHNL_CAP_RSS bit + * during configuration negotiation. + */ +struct virtchnl_rss_hash { + u16 vport_id; + u64 hash; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_rss_hash); + +/* VIRTCHNL_OP_CREATE_SRIOV_VFS + * VIRTCHNL_OP_DESTROY_SRIOV_VFS + * This message is used to let the CP know how many SRIOV VFs need to be + * created. The actual allocation of resources for the VFs in terms of VSI, + * Queues and Interrupts is done by CP. When this call completes, the APF driver + * calls pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices. + */ +struct virtchnl_sriov_vfs_info { + u16 num_vfs; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_sriov_vfs_info); + +#endif /* VIRTCHNL_EXT_FEATURES */ /** * virtchnl_vc_validate_vf_msg * @ver: Virtchnl version info @@ -871,6 +1256,128 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG: break; #endif +#ifdef VIRTCHNL_EXT_FEATURES + case VIRTCHNL_OP_GET_CAPS: + valid_len = sizeof(struct virtchnl_get_capabilities); + break; + case VIRTCHNL_OP_CREATE_VPORT: + valid_len = sizeof(struct virtchnl_create_vport); + if (msglen >= valid_len) { + struct virtchnl_create_vport *cvport = + (struct virtchnl_create_vport *)msg; + + valid_len += cvport->chunks.num_chunks * + sizeof(struct virtchnl_queue_chunk); + } + break; + case VIRTCHNL_OP_DESTROY_VPORT: + case VIRTCHNL_OP_ENABLE_VPORT: + case VIRTCHNL_OP_DISABLE_VPORT: + valid_len = sizeof(struct virtchnl_vport); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUES: + valid_len = sizeof(struct virtchnl_config_tx_queues); + if (msglen >= valid_len) { + struct virtchnl_config_tx_queues *ctq = + (struct virtchnl_config_tx_queues *)msg; + if (ctq->num_qinfo == 0) { + err_msg_format = true; + break; + } + valid_len += ctq->num_qinfo * + sizeof(struct virtchnl_txq_info_v2); + } + break; + case VIRTCHNL_OP_CONFIG_RX_QUEUES: + valid_len = sizeof(struct virtchnl_config_rx_queues); + if (msglen >= valid_len) { + struct virtchnl_config_rx_queues *crq = + (struct virtchnl_config_rx_queues *)msg; + if (crq->num_qinfo == 0) { + err_msg_format = true; + break; + } + valid_len += crq->num_qinfo * + sizeof(struct virtchnl_rxq_info_v2); + } + break; + case VIRTCHNL_OP_ADD_QUEUES: + valid_len = sizeof(struct virtchnl_add_queues); + if (msglen >= valid_len) { + struct virtchnl_add_queues *add_q = + (struct virtchnl_add_queues *)msg; + + valid_len += add_q->chunks.num_chunks * + sizeof(struct virtchnl_queue_chunk); + } + break; + case VIRTCHNL_OP_ENABLE_QUEUES_V2: + case VIRTCHNL_OP_DISABLE_QUEUES_V2: + case VIRTCHNL_OP_DEL_QUEUES: + valid_len = sizeof(struct virtchnl_del_ena_dis_queues); + if (msglen >= valid_len) { + struct virtchnl_del_ena_dis_queues *qs = + (struct virtchnl_del_ena_dis_queues *)msg; + if (qs->chunks.num_chunks == 0) { + err_msg_format = true; + break; + } + valid_len += qs->chunks.num_chunks * + sizeof(struct virtchnl_queue_chunk); + } + break; + case VIRTCHNL_OP_MAP_VECTOR_QUEUE: + case VIRTCHNL_OP_UNMAP_VECTOR_QUEUE: + valid_len = sizeof(struct virtchnl_vector_queue_pairs); + if (msglen >= valid_len) { + struct virtchnl_vector_queue_pairs *v_qp = + (struct virtchnl_vector_queue_pairs *)msg; + if (v_qp->num_vector_queue_pairs == 0) { + err_msg_format = true; + break; + } + valid_len += v_qp->num_vector_queue_pairs * + sizeof(struct virtchnl_vector_queue); + } + break; + case VIRTCHNL_OP_MAP_VECTOR_ITR: + valid_len = sizeof(struct virtchnl_vector_itr_pairs); + if (msglen >= valid_len) { + struct virtchnl_vector_itr_pairs *v_itrp = + (struct virtchnl_vector_itr_pairs *)msg; + if (v_itrp->num_vector_itr_pairs == 0) { + err_msg_format = true; + break; + } + valid_len += v_itrp->num_vector_itr_pairs * + sizeof(struct virtchnl_vector_itr); + } + break; + case VIRTCHNL_OP_GET_RSS_KEY: + valid_len = sizeof(struct virtchnl_rss_key); + if (msglen >= valid_len) { + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + valid_len += vrk->key_len - 1; + } + break; + case VIRTCHNL_OP_GET_RSS_LUT: + valid_len = sizeof(struct virtchnl_rss_lut); + if (msglen >= valid_len) { + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; + valid_len += vrl->lut_entries - 1; + } + break; + case VIRTCHNL_OP_GET_RSS_HASH: + case VIRTCHNL_OP_SET_RSS_HASH: + valid_len = sizeof(struct virtchnl_rss_hash); + break; + case VIRTCHNL_OP_CREATE_VFS: + case VIRTCHNL_OP_DESTROY_VFS: + valid_len = sizeof(struct virtchnl_sriov_vfs_info); + break; +#endif /* VIRTCHNL_EXT_FEATURES */ /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: -- 2.17.1