From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 41AAAA0C4D; Thu, 17 Jun 2021 12:18:43 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 7FCF6410DF; Thu, 17 Jun 2021 12:18:38 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id 3186540150 for ; Thu, 17 Jun 2021 12:18:36 +0200 (CEST) IronPort-SDR: kj71VdmmZOvLoBtPLNKzeuDgcV8ZJJhgBRcY9gi6/Yvx2dvd2pFny+Sx4u+ooI7sHaf/g/h+2I 4+ivyA2muN5A== X-IronPort-AV: E=McAfee;i="6200,9189,10017"; a="206297207" X-IronPort-AV: E=Sophos;i="5.83,280,1616482800"; d="scan'208";a="206297207" Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Jun 2021 03:18:33 -0700 IronPort-SDR: q/7BbBSHJhoWP3UZ/JxI29KKWx2sfkdwl2VoJLOdgI+rW+qnDcm4lx86KEoZFhmO4+iZhEfJif 0iziHWsG/K5A== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.83,280,1616482800"; d="scan'208";a="488603096" Received: from dpdk-xuting-main.sh.intel.com ([10.67.117.76]) by fmsmga002.fm.intel.com with ESMTP; 17 Jun 2021 03:18:30 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, qiming.yang@intel.com Date: Thu, 17 Jun 2021 18:17:04 +0800 Message-Id: <20210617101708.113951-2-ting.xu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20210617101708.113951-1-ting.xu@intel.com> References: <20210601014034.36100-1-ting.xu@intel.com> <20210617101708.113951-1-ting.xu@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v2 1/5] common/iavf: support ETS-based QoS offload configuration X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch adds new virtchnl opcodes and structures for QoS configuration, which includes: 1. VIRTCHNL_VF_OFFLOAD_TC, to negotiate the capability supporting QoS configuration. If VF and PF both have this flag, then the ETS-based QoS offload function is supported. 2. VIRTCHNL_OP_DCF_CONFIG_BW, DCF is supposed to configure min and max bandwidth for each VF per enabled TCs. To make the VSI node bandwidth configuration work, DCF also needs to configure TC node bandwidth directly. 3. VIRTCHNL_OP_GET_QOS_CAPS, VF queries current QoS configuration, such as enabled TCs, arbiter type, up2tc and bandwidth of VSI node. The configuration is previously set by DCB and DCF, and now is the potential QoS capability of VF. VF can take it as reference to configure queue TC mapping. 4. VIRTCHNL_OP_CONFIG_TC_MAP, set VF queues to TC mapping for all Tx and Rx queues. Queues mapping to one TC should be continuous and all allocated queues should be mapped. Signed-off-by: Ting Xu --- drivers/common/iavf/iavf_type.h | 2 + drivers/common/iavf/virtchnl.h | 125 ++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+) diff --git a/drivers/common/iavf/iavf_type.h b/drivers/common/iavf/iavf_type.h index f3815d523b..73dfb47e70 100644 --- a/drivers/common/iavf/iavf_type.h +++ b/drivers/common/iavf/iavf_type.h @@ -141,6 +141,8 @@ enum iavf_debug_mask { #define IAVF_PHY_LED_MODE_MASK 0xFFFF #define IAVF_PHY_LED_MODE_ORIG 0x80000000 +#define IAVF_MAX_TRAFFIC_CLASS 8 + /* Memory types */ enum iavf_memset_type { IAVF_NONDMA_MEM = 0, diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h index 3a60faff93..a56f8b4589 100644 --- a/drivers/common/iavf/virtchnl.h +++ b/drivers/common/iavf/virtchnl.h @@ -85,6 +85,10 @@ enum virtchnl_rx_hsplit { VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, }; +enum virtchnl_bw_limit_type { + VIRTCHNL_BW_SHAPER = 0, +}; + #define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6 /* END GENERIC DEFINES */ @@ -130,6 +134,7 @@ enum virtchnl_ops { VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, /* opcodes 34, 35, 36, and 37 are reserved */ + VIRTCHNL_OP_DCF_CONFIG_BW = 37, VIRTCHNL_OP_DCF_VLAN_OFFLOAD = 38, VIRTCHNL_OP_DCF_CMD_DESC = 39, VIRTCHNL_OP_DCF_CMD_BUFF = 40, @@ -152,6 +157,8 @@ enum virtchnl_ops { VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58, VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59, + VIRTCHNL_OP_GET_QOS_CAPS = 66, + VIRTCHNL_OP_CONFIG_TC_MAP = 67, VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107, VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108, VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111, @@ -398,6 +405,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26) #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27) #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28) +#define VIRTCHNL_VF_OFFLOAD_TC BIT(29) #define VIRTCHNL_VF_CAP_DCF BIT(30) /* BIT(31) is reserved */ @@ -1285,6 +1293,13 @@ struct virtchnl_filter { VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); +struct virtchnl_shaper_bw { + u32 committed; + u32 peak; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw); + /* VIRTCHNL_OP_DCF_GET_VSI_MAP * VF sends this message to get VSI mapping table. * PF responds with an indirect message containing VF's @@ -1357,6 +1372,32 @@ struct virtchnl_dcf_vlan_offload { VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_dcf_vlan_offload); +/* VIRTCHNL_OP_DCF_CONFIG_BW + * VF send this message to set the bandwidth configuration of each + * TC with a specific vf id. If vf id is 0xffff, it is used to configure + * TC node bandwidth directly. + */ +struct virtchnl_dcf_bw_cfg { + u8 tc_id; + u8 pad[3]; + enum virtchnl_bw_limit_type type; + union { + struct virtchnl_shaper_bw shaper; + u8 pad2[32]; + }; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_dcf_bw_cfg); + +struct virtchnl_dcf_bw_cfg_list { +#define VIRTCHNL_DCF_TC_LEVEL 0xffff + u16 vf_id; + u16 num_elem; + struct virtchnl_dcf_bw_cfg cfg[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_dcf_bw_cfg_list); + struct virtchnl_supported_rxdids { /* see enum virtchnl_rx_desc_id_bitmasks */ u64 supported_rxdids; @@ -1767,6 +1808,62 @@ struct virtchnl_fdir_del { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); +/* VIRTCHNL_OP_GET_QOS_CAPS + * VF sends this message to get its QoS Caps, such as + * TC number, Arbiter and Bandwidth. + */ +struct virtchnl_qos_cap_elem { + u8 tc_id; + u8 prio_of_tc; +#define VIRTCHNL_ABITER_STRICT 0 +#define VIRTCHNL_ABITER_ETS 2 + u8 arbiter; +#define VIRTCHNL_STRICT_WEIGHT 1 + u8 weight; + enum virtchnl_bw_limit_type type; + union { + struct virtchnl_shaper_bw shaper; + u8 pad2[32]; + }; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem); + +struct virtchnl_qos_cap_list { + u16 vsi_id; + u16 num_elem; + struct virtchnl_qos_cap_elem cap[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_qos_cap_list); + +/* VIRTCHNL_OP_CONFIG_TC_MAP + * VF sends message virtchnl_queue_tc_mapping to set queue to tc + * mapping for all the Tx and Rx queues with a specified VSI, and + * would get response about bitmap of valid user priorities + * associated with queues. + */ +struct virtchnl_queue_tc_mapping { + u16 vsi_id; + u16 num_tc; + u16 num_queue_pairs; + u8 pad[2]; + union { + struct { + u16 start_queue_id; + u16 queue_count; + } req; + struct { +#define VIRTCHNL_USER_PRIO_TYPE_UP 0 +#define VIRTCHNL_USER_PRIO_TYPE_DSCP 1 + u16 prio_type; + u16 valid_prio_bitmap; + } resp; + } tc[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping); + /* VIRTCHNL_OP_QUERY_FDIR_FILTER * VF sends this request to PF by filling out vsi_id, * flow_id and reset_counter. PF will return query_info @@ -2117,6 +2214,19 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_DCF_GET_VSI_MAP: case VIRTCHNL_OP_DCF_GET_PKG_INFO: break; + case VIRTCHNL_OP_DCF_CONFIG_BW: + valid_len = sizeof(struct virtchnl_dcf_bw_cfg_list); + if (msglen >= valid_len) { + struct virtchnl_dcf_bw_cfg_list *cfg_list = + (struct virtchnl_dcf_bw_cfg_list *)msg; + if (cfg_list->num_elem == 0) { + err_msg_format = true; + break; + } + valid_len += (cfg_list->num_elem - 1) * + sizeof(struct virtchnl_dcf_bw_cfg); + } + break; case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: break; case VIRTCHNL_OP_ADD_RSS_CFG: @@ -2132,6 +2242,21 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_QUERY_FDIR_FILTER: valid_len = sizeof(struct virtchnl_fdir_query); break; + case VIRTCHNL_OP_GET_QOS_CAPS: + break; + case VIRTCHNL_OP_CONFIG_TC_MAP: + valid_len = sizeof(struct virtchnl_queue_tc_mapping); + if (msglen >= valid_len) { + struct virtchnl_queue_tc_mapping *q_tc = + (struct virtchnl_queue_tc_mapping *)msg; + if (q_tc->num_tc == 0) { + err_msg_format = true; + break; + } + valid_len += (q_tc->num_tc - 1) * + sizeof(q_tc->tc[0]); + } + break; case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: break; case VIRTCHNL_OP_ADD_VLAN_V2: -- 2.25.1