From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1EFACA04DC; Mon, 19 Oct 2020 11:09:13 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id BAF0CE22F; Mon, 19 Oct 2020 10:53:55 +0200 (CEST) Received: from smtpbg506.qq.com (smtpbg506.qq.com [203.205.250.33]) by dpdk.org (Postfix) with ESMTP id 5896EC9B6 for ; Mon, 19 Oct 2020 10:53:25 +0200 (CEST) X-QQ-mid: bizesmtp6t1603097600tm4u65gho Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp6.qq.com (ESMTP) with id ; Mon, 19 Oct 2020 16:53:20 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: l6IKqkG+NbljIggJHbIPvL6Cl0GInHVJn/vrdby3e8lPgqFrjDD3277kaghBr ie/CQl7l2RW1dId9XHzDZIK5GyrsfPf9z/JXaRt3JP+/SpzzC2y9IYzjEMT0IbD4er7turs 6iDSODWWBjo9P2+97Il7CtWQuxbpdOWKbPF0pPLqwW6A4HamIMf0/tH/LcBWqL0RjtehUwn TfevH5jMjysRQ1d1EQrsIglTpSoznPpMiwRqitBBXvEWxdOJ/C1ozJt5XCzHIp1u0Rjfirf Opf/g9IL7BKuOmqjcCTFOhAhIqErVQ8y+S5pf+AeX3W0qftzZyuTC/06ezPznrjVGNlZP2B 6/ANbTp3/d049vQIPwir0kyMB3M8Q== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Mon, 19 Oct 2020 16:53:58 +0800 Message-Id: <20201019085415.82207-42-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201019085415.82207-1-jiawenwu@trustnetic.com> References: <20201019085415.82207-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH v4 41/58] net/txgbe: add VMDq configure X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add multiple queue setting with VMDq. Signed-off-by: Jiawen Wu --- doc/guides/nics/features/txgbe.ini | 1 + drivers/net/txgbe/txgbe_ethdev.c | 35 ++++ drivers/net/txgbe/txgbe_ethdev.h | 2 + drivers/net/txgbe/txgbe_rxtx.c | 260 +++++++++++++++++++++++++++++ 4 files changed, 298 insertions(+) diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini index 022e56d45..578ec05b0 100644 --- a/doc/guides/nics/features/txgbe.ini +++ b/doc/guides/nics/features/txgbe.ini @@ -15,6 +15,7 @@ LRO = Y TSO = Y Unicast MAC filter = Y Multicast MAC filter = Y +VMDq = Y SR-IOV = Y VLAN filter = Y Rate limitation = Y diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index c60df26c8..e14980a2e 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -943,6 +943,17 @@ txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) return 0; } +static void +txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + /* VLNCTL: enable vlan filtering and allow all vlan tags through */ + uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL); + + vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */ + wr32(hw, TXGBE_VLANCTL, vlanctrl); +} + static int txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) { @@ -1330,6 +1341,11 @@ txgbe_dev_start(struct rte_eth_dev *dev) goto error; } + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { + /* Enable vlan filtering for VMDq */ + txgbe_vmdq_vlan_hw_filter_enable(dev); + } + /* Restore vf rate limit */ if (vfinfo != NULL) { for (vf = 0; vf < pci_dev->max_vfs; vf++) @@ -2765,6 +2781,25 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on) return 0; } +uint32_t +txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val) +{ + uint32_t new_val = orig_val; + + if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) + new_val |= TXGBE_POOLETHCTL_UTA; + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC) + new_val |= TXGBE_POOLETHCTL_MCHA; + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) + new_val |= TXGBE_POOLETHCTL_UCHA; + if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) + new_val |= TXGBE_POOLETHCTL_BCA; + if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) + new_val |= TXGBE_POOLETHCTL_MCP; + + return new_val; +} + static int txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) { diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index ba9cab718..bf13e0378 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -256,6 +256,8 @@ void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev); int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev); +uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val); + int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf, uint16_t tx_rate, uint64_t q_msk); int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c index fd6a3f436..c8ca9e2d0 100644 --- a/drivers/net/txgbe/txgbe_rxtx.c +++ b/drivers/net/txgbe/txgbe_rxtx.c @@ -2555,6 +2555,145 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev) dev->data->nb_tx_queues = 0; } +static void +txgbe_rss_disable(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw; + + hw = TXGBE_DEV_HW(dev); + + wr32m(hw, TXGBE_RACTL, TXGBE_RACTL_RSSENA, 0); +} + +#define NUM_VFTA_REGISTERS 128 + +/* + * VMDq only support for 10 GbE NIC. + */ +static void +txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_vmdq_rx_conf *cfg; + struct txgbe_hw *hw; + enum rte_eth_nb_pools num_pools; + uint32_t mrqc, vt_ctl, vlanctrl; + uint32_t vmolr = 0; + int i; + + PMD_INIT_FUNC_TRACE(); + hw = TXGBE_DEV_HW(dev); + cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + num_pools = cfg->nb_queue_pools; + + txgbe_rss_disable(dev); + + /* enable vmdq */ + mrqc = TXGBE_PORTCTL_NUMVT_64; + wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mrqc); + + /* turn on virtualisation and set the default pool */ + vt_ctl = TXGBE_POOLCTL_RPLEN; + if (cfg->enable_default_pool) + vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool); + else + vt_ctl |= TXGBE_POOLCTL_DEFDSA; + + wr32(hw, TXGBE_POOLCTL, vt_ctl); + + for (i = 0; i < (int)num_pools; i++) { + vmolr = txgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr); + wr32(hw, TXGBE_POOLETHCTL(i), vmolr); + } + + /* enable vlan filtering and allow all vlan tags through */ + vlanctrl = rd32(hw, TXGBE_VLANCTL); + vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */ + wr32(hw, TXGBE_VLANCTL, vlanctrl); + + /* enable all vlan filters */ + for (i = 0; i < NUM_VFTA_REGISTERS; i++) + wr32(hw, TXGBE_VLANTBL(i), UINT32_MAX); + + /* pool enabling for receive - 64 */ + wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX); + if (num_pools == ETH_64_POOLS) + wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX); + + /* + * allow pools to read specific mac addresses + * In this case, all pools should be able to read from mac addr 0 + */ + wr32(hw, TXGBE_ETHADDRIDX, 0); + wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF); + wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF); + + /* set up filters for vlan tags as configured */ + for (i = 0; i < cfg->nb_pool_maps; i++) { + /* set vlan id in VF register and set the valid bit */ + wr32(hw, TXGBE_PSRVLANIDX, i); + wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA | + TXGBE_PSRVLAN_VID(cfg->pool_map[i].vlan_id))); + /* + * Put the allowed pools in VFB reg. As we only have 16 or 64 + * pools, we only need to use the first half of the register + * i.e. bits 0-31 + */ + if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0) + wr32(hw, TXGBE_PSRVLANPLM(0), + (cfg->pool_map[i].pools & UINT32_MAX)); + else + wr32(hw, TXGBE_PSRVLANPLM(1), + ((cfg->pool_map[i].pools >> 32) & UINT32_MAX)); + } + + /* Tx General Switch Control Enables VMDQ loopback */ + if (cfg->enable_loop_back) { + wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA); + for (i = 0; i < 64; i++) + wr32m(hw, TXGBE_POOLETHCTL(i), + TXGBE_POOLETHCTL_LLB, TXGBE_POOLETHCTL_LLB); + } + + txgbe_flush(hw); +} + +/* + * txgbe_vmdq_tx_hw_configure - Configure general VMDq TX parameters + * @hw: pointer to hardware structure + */ +static void +txgbe_vmdq_tx_hw_configure(struct txgbe_hw *hw) +{ + uint32_t reg; + uint32_t q; + + PMD_INIT_FUNC_TRACE(); + /*PF VF Transmit Enable*/ + wr32(hw, TXGBE_POOLTXENA(0), UINT32_MAX); + wr32(hw, TXGBE_POOLTXENA(1), UINT32_MAX); + + /* Disable the Tx desc arbiter */ + reg = rd32(hw, TXGBE_ARBTXCTL); + reg |= TXGBE_ARBTXCTL_DIA; + wr32(hw, TXGBE_ARBTXCTL, reg); + + wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, + TXGBE_PORTCTL_NUMVT_64); + + /* Disable drop for all queues */ + for (q = 0; q < 128; q++) { + u32 val = 1 << (q % 32); + wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val); + } + + /* Enable the Tx desc arbiter */ + reg = rd32(hw, TXGBE_ARBTXCTL); + reg &= ~TXGBE_ARBTXCTL_DIA; + wr32(hw, TXGBE_ARBTXCTL, reg); + + txgbe_flush(hw); +} + static int __rte_cold txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq) { @@ -2587,6 +2726,119 @@ txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq) return 0; } +static int +txgbe_config_vf_default(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t mrqc; + + mrqc = rd32(hw, TXGBE_PORTCTL); + mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK); + switch (RTE_ETH_DEV_SRIOV(dev).active) { + case ETH_64_POOLS: + mrqc |= TXGBE_PORTCTL_NUMVT_64; + break; + + case ETH_32_POOLS: + mrqc |= TXGBE_PORTCTL_NUMVT_32; + break; + + case ETH_16_POOLS: + mrqc |= TXGBE_PORTCTL_NUMVT_16; + break; + default: + PMD_INIT_LOG(ERR, + "invalid pool number in IOV mode"); + return 0; + } + + wr32(hw, TXGBE_PORTCTL, mrqc); + + return 0; +} + +static int +txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev) +{ + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * SRIOV inactive scheme + * VMDq multi-queue setting + */ + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_MQ_RX_VMDQ_ONLY: + txgbe_vmdq_rx_hw_configure(dev); + break; + + case ETH_MQ_RX_NONE: + default: + /* if mq_mode is none, disable rss mode.*/ + txgbe_rss_disable(dev); + break; + } + } else { + /* SRIOV active scheme + */ + switch (dev->data->dev_conf.rxmode.mq_mode) { + default: + txgbe_config_vf_default(dev); + break; + } + } + + return 0; +} + +static int +txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev) +{ + struct txgbe_hw *hw = TXGBE_DEV_HW(dev); + uint32_t mtqc; + uint32_t rttdcs; + + /* disable arbiter */ + rttdcs = rd32(hw, TXGBE_ARBTXCTL); + rttdcs |= TXGBE_ARBTXCTL_DIA; + wr32(hw, TXGBE_ARBTXCTL, rttdcs); + + if (RTE_ETH_DEV_SRIOV(dev).active == 0) { + /* + * SRIOV inactive scheme + * any DCB w/o VMDq multi-queue setting + */ + if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY) + txgbe_vmdq_tx_hw_configure(hw); + else + wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0); + } else { + switch (RTE_ETH_DEV_SRIOV(dev).active) { + /* + * SRIOV active scheme + * FIXME if support DCB together with VMDq & SRIOV + */ + case ETH_64_POOLS: + mtqc = TXGBE_PORTCTL_NUMVT_64; + break; + case ETH_32_POOLS: + mtqc = TXGBE_PORTCTL_NUMVT_32; + break; + case ETH_16_POOLS: + mtqc = TXGBE_PORTCTL_NUMVT_16; + break; + default: + mtqc = 0; + PMD_INIT_LOG(ERR, "invalid pool number in IOV mode"); + } + wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mtqc); + } + + /* re-enable arbiter */ + rttdcs &= ~TXGBE_ARBTXCTL_DIA; + wr32(hw, TXGBE_ARBTXCTL, rttdcs); + + return 0; +} + /** * txgbe_get_rscctl_maxdesc * @@ -2919,6 +3171,11 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev) if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) dev->data->scattered_rx = 1; + /* + * Device configured with multiple RX queues. + */ + txgbe_dev_mq_rx_configure(dev); + /* * Setup the Checksum Register. * Disable Full-Packet Checksum which is mutually exclusive with RSS. @@ -2980,6 +3237,9 @@ txgbe_dev_tx_init(struct rte_eth_dev *dev) wr32(hw, TXGBE_TXRP(txq->reg_idx), 0); wr32(hw, TXGBE_TXWP(txq->reg_idx), 0); } + + /* Device configured with multiple TX queues. */ + txgbe_dev_mq_tx_configure(dev); } /* -- 2.18.4