From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D11D5A04DC; Mon, 19 Oct 2020 10:57:05 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D25ADC8E2; Mon, 19 Oct 2020 10:53:07 +0200 (CEST) Received: from smtpproxy21.qq.com (smtpbg702.qq.com [203.205.195.102]) by dpdk.org (Postfix) with ESMTP id CB50EC82C for ; Mon, 19 Oct 2020 10:52:50 +0200 (CEST) X-QQ-mid: bizesmtp6t1603097565t8jy3kz3x Received: from localhost.localdomain.com (unknown [183.129.236.74]) by esmtp6.qq.com (ESMTP) with id ; Mon, 19 Oct 2020 16:52:45 +0800 (CST) X-QQ-SSF: 01400000002000C0C000B00A0000000 X-QQ-FEAT: cTSAXDkpFsaQgT2gHHiMvC1jUEBekzyct2UtyxWrnCYOV8+eNXdvD6OSAP+qa 8+TYceMYolVaJp+7i+C2yp6uMPCDpkTTotYSi1PdDpjeqmqoqTediLcnM06442ti8LGjNjD u0YDkHJfC3LVDwSBEXmVtqVDY7W9wcrAiS546bys4pR5G1IF6a+AHUa9DA5qsvSyQjUwIyf QacCFPHtJ8aNL/JZBnB+6DDREA5bR8MhyYU6PyeU4zjHIX3PomC3aMFMfQGabfgRXVrK19a ggNk/0sJb1lUJxMOJJ5kZX+xdaIKfpUQfyf+d9efvwvqYS69olVEXylGaGJ10jFM5i32rIj 2CRDpINtXeyCKi0u0P8GbN155t5XA== X-QQ-GoodBg: 2 From: Jiawen Wu To: dev@dpdk.org Cc: Jiawen Wu Date: Mon, 19 Oct 2020 16:53:31 +0800 Message-Id: <20201019085415.82207-15-jiawenwu@trustnetic.com> X-Mailer: git-send-email 2.18.4 In-Reply-To: <20201019085415.82207-1-jiawenwu@trustnetic.com> References: <20201019085415.82207-1-jiawenwu@trustnetic.com> X-QQ-SENDSIZE: 520 Feedback-ID: bizesmtp:trustnetic.com:qybgforeign:qybgforeign5 X-QQ-Bgrelay: 1 Subject: [dpdk-dev] [PATCH v4 14/58] net/txgbe: add device configure operation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add device configure operation. Signed-off-by: Jiawen Wu --- doc/guides/nics/txgbe.rst | 1 + drivers/net/txgbe/txgbe_ethdev.c | 195 +++++++++++++++++++++++++++++++ drivers/net/txgbe/txgbe_ethdev.h | 7 ++ 3 files changed, 203 insertions(+) diff --git a/doc/guides/nics/txgbe.rst b/doc/guides/nics/txgbe.rst index 994ea0583..78cb611c2 100644 --- a/doc/guides/nics/txgbe.rst +++ b/doc/guides/nics/txgbe.rst @@ -10,6 +10,7 @@ for Wangxun 10 Gigabit Ethernet NICs. Features -------- +- Multiple queues for TX and RX - Link state information Prerequisites diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c index 2c4f17cce..5e866461f 100644 --- a/drivers/net/txgbe/txgbe_ethdev.c +++ b/drivers/net/txgbe/txgbe_ethdev.c @@ -299,6 +299,200 @@ static struct rte_pci_driver rte_txgbe_pmd = { .remove = eth_txgbe_pci_remove, }; +static int +txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + + switch (nb_rx_q) { + case 1: + case 2: + RTE_ETH_DEV_SRIOV(dev).active = ETH_64_POOLS; + break; + case 4: + RTE_ETH_DEV_SRIOV(dev).active = ETH_32_POOLS; + break; + default: + return -EINVAL; + } + + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = + TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = + pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + return 0; +} + +static int +txgbe_check_mq_mode(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + uint16_t nb_tx_q = dev->data->nb_tx_queues; + + if (RTE_ETH_DEV_SRIOV(dev).active != 0) { + /* check multi-queue mode */ + switch (dev_conf->rxmode.mq_mode) { + case ETH_MQ_RX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV"); + break; + case ETH_MQ_RX_VMDQ_DCB_RSS: + /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ + PMD_INIT_LOG(ERR, "SRIOV active," + " unsupported mq_mode rx %d.", + dev_conf->rxmode.mq_mode); + return -EINVAL; + case ETH_MQ_RX_RSS: + case ETH_MQ_RX_VMDQ_RSS: + dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS; + if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) + if (txgbe_check_vf_rss_rxq_num(dev, nb_rx_q)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " invalid queue number" + " for VMDQ RSS, allowed" + " value are 1, 2 or 4."); + return -EINVAL; + } + break; + case ETH_MQ_RX_VMDQ_ONLY: + case ETH_MQ_RX_NONE: + /* if nothing mq mode configure, use default scheme */ + dev->data->dev_conf.rxmode.mq_mode = + ETH_MQ_RX_VMDQ_ONLY; + break; + default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ + /* SRIOV only works in VMDq enable mode */ + PMD_INIT_LOG(ERR, "SRIOV is active," + " wrong mq_mode rx %d.", + dev_conf->rxmode.mq_mode); + return -EINVAL; + } + + switch (dev_conf->txmode.mq_mode) { + case ETH_MQ_TX_VMDQ_DCB: + PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV"); + dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; + break; + default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ + dev->data->dev_conf.txmode.mq_mode = + ETH_MQ_TX_VMDQ_ONLY; + break; + } + + /* check valid queue number */ + if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) || + (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) { + PMD_INIT_LOG(ERR, "SRIOV is active," + " nb_rx_q=%d nb_tx_q=%d queue number" + " must be less than or equal to %d.", + nb_rx_q, nb_tx_q, + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool); + return -EINVAL; + } + } else { + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB_RSS) { + PMD_INIT_LOG(ERR, "VMDQ+DCB+RSS mq_mode is" + " not supported."); + return -EINVAL; + } + /* check configuration for vmdb+dcb mode */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_conf *conf; + + if (nb_rx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_rx_q != %d.", + TXGBE_VMDQ_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->rx_adv_conf.vmdq_dcb_conf; + if (!(conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_INIT_LOG(ERR, "VMDQ+DCB selected," + " nb_queue_pools must be %d or %d.", + ETH_16_POOLS, ETH_32_POOLS); + return -EINVAL; + } + } + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_tx_conf *conf; + + if (nb_tx_q != TXGBE_VMDQ_DCB_NB_QUEUES) { + PMD_INIT_LOG(ERR, "VMDQ+DCB, nb_tx_q != %d", + TXGBE_VMDQ_DCB_NB_QUEUES); + return -EINVAL; + } + conf = &dev_conf->tx_adv_conf.vmdq_dcb_tx_conf; + if (!(conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_INIT_LOG(ERR, "VMDQ+DCB selected," + " nb_queue_pools != %d and" + " nb_queue_pools != %d.", + ETH_16_POOLS, ETH_32_POOLS); + return -EINVAL; + } + } + + /* For DCB mode check our configuration before we go further */ + if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { + const struct rte_eth_dcb_rx_conf *conf; + + conf = &dev_conf->rx_adv_conf.dcb_rx_conf; + if (!(conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" + " and nb_tcs != %d.", + ETH_4_TCS, ETH_8_TCS); + return -EINVAL; + } + } + + if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { + const struct rte_eth_dcb_tx_conf *conf; + + conf = &dev_conf->tx_adv_conf.dcb_tx_conf; + if (!(conf->nb_tcs == ETH_4_TCS || + conf->nb_tcs == ETH_8_TCS)) { + PMD_INIT_LOG(ERR, "DCB selected, nb_tcs != %d" + " and nb_tcs != %d.", + ETH_4_TCS, ETH_8_TCS); + return -EINVAL; + } + } + } + return 0; +} + +static int +txgbe_dev_configure(struct rte_eth_dev *dev) +{ + struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev); + struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev); + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; + + /* multiple queue mode checking */ + ret = txgbe_check_mq_mode(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "txgbe_check_mq_mode fails with %d.", + ret); + return ret; + } + + /* set flag to update link status after init */ + intr->flags |= TXGBE_FLAG_NEED_LINK_UPDATE; + + /* + * Initialize to TRUE. If any of Rx queues doesn't meet the bulk + * allocation Rx preconditions we will reset it. + */ + adapter->rx_bulk_alloc_allowed = true; + + return 0; +} static void txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev) @@ -808,6 +1002,7 @@ txgbe_configure_msix(struct rte_eth_dev *dev) } static const struct eth_dev_ops txgbe_eth_dev_ops = { + .dev_configure = txgbe_dev_configure, .dev_infos_get = txgbe_dev_info_get, }; diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h index 2c13da38f..8dd6c36c2 100644 --- a/drivers/net/txgbe/txgbe_ethdev.h +++ b/drivers/net/txgbe/txgbe_ethdev.h @@ -19,6 +19,9 @@ * FreeBSD driver. */ #define TXGBE_HKEY_MAX_INDEX 10 +/*Default value of Max Rx Queue*/ +#define TXGBE_MAX_RX_QUEUE_NUM 128 +#define TXGBE_VMDQ_DCB_NB_QUEUES TXGBE_MAX_RX_QUEUE_NUM #define TXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */ @@ -51,8 +54,12 @@ struct txgbe_interrupt { struct txgbe_adapter { struct txgbe_hw hw; struct txgbe_interrupt intr; + bool rx_bulk_alloc_allowed; }; +#define TXGBE_DEV_ADAPTER(dev) \ + ((struct txgbe_adapter *)(dev)->data->dev_private) + #define TXGBE_DEV_HW(dev) \ (&((struct txgbe_adapter *)(dev)->data->dev_private)->hw) -- 2.18.4