From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id B47708E5D for ; Thu, 24 Sep 2015 08:03:22 +0200 (CEST) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga102.fm.intel.com with ESMTP; 23 Sep 2015 23:03:22 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.17,579,1437462000"; d="scan'208";a="775863974" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by orsmga001.jf.intel.com with ESMTP; 23 Sep 2015 23:03:22 -0700 Received: from shecgisg004.sh.intel.com (shecgisg004.sh.intel.com [10.239.29.89]) by shvmail01.sh.intel.com with ESMTP id t8O63JVa019519; Thu, 24 Sep 2015 14:03:19 +0800 Received: from shecgisg004.sh.intel.com (localhost [127.0.0.1]) by shecgisg004.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t8O63FCn019844; Thu, 24 Sep 2015 14:03:17 +0800 Received: (from wujingji@localhost) by shecgisg004.sh.intel.com (8.13.6/8.13.6/Submit) id t8O63FAl019840; Thu, 24 Sep 2015 14:03:15 +0800 From: Jingjing Wu To: dev@dpdk.org Date: Thu, 24 Sep 2015 14:03:04 +0800 Message-Id: <1443074591-19803-2-git-send-email-jingjing.wu@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1443074591-19803-1-git-send-email-jingjing.wu@intel.com> References: <1443074591-19803-1-git-send-email-jingjing.wu@intel.com> Cc: yulong.pei@intel.com Subject: [dpdk-dev] [PATCH 1/8] ethdev: rename dcb_queue to dcb_tc in dcb config struct X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 24 Sep 2015 06:03:23 -0000 Signed-off-by: Jingjing Wu --- app/test-pmd/testpmd.c | 8 ++++---- drivers/net/ixgbe/ixgbe_rxtx.c | 10 +++++----- examples/vmdq_dcb/main.c | 4 ++-- lib/librte_ether/rte_ethdev.h | 14 +++++++------- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 386bf84..c8ae909 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -1866,8 +1866,8 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf) vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools); } for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - vmdq_rx_conf.dcb_queue[i] = i; - vmdq_tx_conf.dcb_queue[i] = i; + vmdq_rx_conf.dcb_tc[i] = i; + vmdq_tx_conf.dcb_tc[i] = i; } /*set DCB mode of RX and TX of multiple queues*/ @@ -1897,8 +1897,8 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf) tx_conf.nb_tcs = dcb_conf->num_tcs; for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ - rx_conf.dcb_queue[i] = i; - tx_conf.dcb_queue[i] = i; + rx_conf.dcb_tc[i] = i; + tx_conf.dcb_tc[i] = i; } eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB; eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index a598a72..d331ef5 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -2903,7 +2903,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) * mapping is done with 3 bits per priority, * so shift by i*3 each time */ - queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3)); + queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3)); IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping); @@ -3038,7 +3038,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, } /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = vmdq_rx_conf->dcb_queue[i]; + j = vmdq_rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3066,7 +3066,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = vmdq_tx_conf->dcb_queue[i]; + j = vmdq_tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3088,7 +3088,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = rx_conf->dcb_queue[i]; + j = rx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); @@ -3109,7 +3109,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, /* User Priority to Traffic Class mapping */ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - j = tx_conf->dcb_queue[i]; + j = tx_conf->dcb_tc[i]; tc = &dcb_config->tc_config[j]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c index c31c2ce..b90ac28 100644 --- a/examples/vmdq_dcb/main.c +++ b/examples/vmdq_dcb/main.c @@ -107,7 +107,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = { .default_pool = 0, .nb_pool_maps = 0, .pool_map = {{0, 0},}, - .dcb_queue = {0}, + .dcb_tc = {0}, }, }, }; @@ -144,7 +144,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf, enum rte_eth_nb_pools num_pools) conf.pool_map[i].pools = 1 << (i % num_pools); } for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ - conf.dcb_queue[i] = (uint8_t)(i % (NUM_QUEUES/num_pools)); + conf.dcb_tc[i] = (uint8_t)(i % (NUM_QUEUES/num_pools)); } (void)(rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf))); (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf, diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h index fa06554..0aa00a6 100644 --- a/lib/librte_ether/rte_ethdev.h +++ b/lib/librte_ether/rte_ethdev.h @@ -543,20 +543,20 @@ enum rte_eth_nb_pools { /* This structure may be extended in future. */ struct rte_eth_dcb_rx_conf { enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs */ - uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES]; - /**< Possible DCB queue,4 or 8. */ + /** Traffic class each UP mapped to. */ + uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]; }; struct rte_eth_vmdq_dcb_tx_conf { enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools. */ - uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES]; - /**< Possible DCB queue,4 or 8. */ + /** Traffic class each UP mapped to. */ + uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]; }; struct rte_eth_dcb_tx_conf { enum rte_eth_nb_tcs nb_tcs; /**< Possible DCB TCs, 4 or 8 TCs. */ - uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES]; - /**< Possible DCB queue,4 or 8. */ + /** Traffic class each UP mapped to. */ + uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]; }; struct rte_eth_vmdq_tx_conf { @@ -583,7 +583,7 @@ struct rte_eth_vmdq_dcb_conf { uint16_t vlan_id; /**< The vlan id of the received frame */ uint64_t pools; /**< Bitmask of pools for packet rx */ } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */ - uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES]; + uint8_t dcb_tc[ETH_DCB_NUM_USER_PRIORITIES]; /**< Selects a queue in a pool */ }; -- 2.4.0