From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 21EC05A80 for ; Mon, 19 Jan 2015 14:24:55 +0100 (CET) Received: from orsmga001.jf.intel.com ([10.7.209.18]) by fmsmga102.fm.intel.com with ESMTP; 19 Jan 2015 05:24:54 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.09,427,1418112000"; d="scan'208";a="639317045" Received: from unknown (HELO Sent) ([10.217.248.233]) by orsmga001.jf.intel.com with SMTP; 19 Jan 2015 05:24:53 -0800 Received: by Sent (sSMTP sendmail emulation); Mon, 19 Jan 2015 14:21:06 +0100 From: Pawel Wodkowski To: dev@dpdk.org Date: Mon, 19 Jan 2015 14:02:30 +0100 Message-Id: <1421672551-11652-4-git-send-email-pawelx.wodkowski@intel.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1421672551-11652-1-git-send-email-pawelx.wodkowski@intel.com> References: <1421077843-8492-1-git-send-email-michalx.k.jastrzebski@intel.com> <1421672551-11652-1-git-send-email-pawelx.wodkowski@intel.com> Subject: [dpdk-dev] [PATCH v2 3/4] pmd: add support for DCB in SRIOV mode for ixgbe driver. X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 19 Jan 2015 13:24:57 -0000 Add support for DCB in SRIOV mode. When no PFC is enabled this feature might be used as multiple queues for VF (up to 8 queues if VFs num is less or equal 16 or 4 if FVs num is less or equal 32). The PF must initializes RX in ETH_MQ_RX_VMDQ_DCB and TX in ETH_MQ_TX_VMDQ_DCB. VF should initialize Rx in ETH_MQ_RX_DCB and Tx in ETH_MQ_TX_DCB to use multiple queues and/or DCB. Signed-off-by: Pawel Wodkowski --- lib/librte_ether/rte_ethdev.c | 32 ++++++++++++++++------------ lib/librte_ether/rte_ethdev.h | 2 +- lib/librte_pmd_ixgbe/ixgbe_pf.c | 42 +++++++++++++++++++++++++++---------- lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 7 +++---- 4 files changed, 54 insertions(+), 29 deletions(-) diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index 85385f8..115465e 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -532,6 +532,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, const struct rte_eth_conf *dev_conf) { struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct rte_eth_dev_info dev_info; if (RTE_ETH_DEV_SRIOV(dev).active != 0) { /* check multi-queue mode */ @@ -553,8 +554,9 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, switch (dev_conf->rxmode.mq_mode) { case ETH_MQ_RX_VMDQ_DCB: + break; case ETH_MQ_RX_VMDQ_DCB_RSS: - /* DCB/RSS VMDQ in SRIOV mode, not implement yet */ + /* DCB+RSS VMDQ in SRIOV mode, not implement yet */ PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8 " SRIOV active, " "unsupported VMDQ mq_mode rx %u\n", @@ -589,13 +591,8 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, } switch (dev_conf->txmode.mq_mode) { - case ETH_MQ_TX_VMDQ_DCB: - /* DCB VMDQ in SRIOV mode, not implement yet */ - PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8 - " SRIOV active, " - "unsupported VMDQ mq_mode tx %u\n", - port_id, dev_conf->txmode.mq_mode); - return (-EINVAL); + case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV mode*/ + break; default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */ /* if nothing mq mode configure, use default scheme */ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY; @@ -612,7 +609,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return (-EINVAL); } } else { - /* For vmdb+dcb mode check our configuration before we go further */ + /* For vmdq+dcb mode check our configuration before we go further */ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { const struct rte_eth_vmdq_dcb_conf *conf; @@ -651,11 +648,20 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, } } - /* For DCB mode check our configuration before we go further */ + /* For DCB we need to obtain maximum number of queues dinamically, + * as this depends on max VF exported in PF */ + if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) || + (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) { + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + (*dev->dev_ops->dev_infos_get)(dev, &dev_info); + } + + /* For DCB mode check out configuration before we go further */ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { const struct rte_eth_dcb_rx_conf *conf; - if (nb_rx_q != ETH_DCB_NUM_QUEUES) { + if (nb_rx_q != dev_info.max_rx_queues) { PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q " "!= %d\n", port_id, ETH_DCB_NUM_QUEUES); @@ -675,7 +681,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { const struct rte_eth_dcb_tx_conf *conf; - if (nb_tx_q != ETH_DCB_NUM_QUEUES) { + if (nb_tx_q != dev_info.max_tx_queues) { PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q " "!= %d\n", port_id, ETH_DCB_NUM_QUEUES); @@ -802,7 +808,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, ETHER_MAX_LEN; } - /* multipe queue mode checking */ + /* multiple queue mode checking */ diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf); if (diag != 0) { PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n", diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h index ce0528f..6df3f29 100644 --- a/lib/librte_ether/rte_ethdev.h +++ b/lib/librte_ether/rte_ethdev.h @@ -299,7 +299,7 @@ enum rte_eth_rx_mq_mode { enum rte_eth_tx_mq_mode { ETH_MQ_TX_NONE = 0, /**< It is in neither DCB nor VT mode. */ ETH_MQ_TX_DCB, /**< For TX side,only DCB is on. */ - ETH_MQ_TX_VMDQ_DCB, /**< For TX side,both DCB and VT is on. */ + ETH_MQ_TX_VMDQ_DCB, /**< For TX side,both DCB and VT is on. */ ETH_MQ_TX_VMDQ_ONLY, /**< Only VT on, no DCB */ }; diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c index 93f6e43..b5f570d 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_pf.c +++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c @@ -231,19 +231,19 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) } IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - /* + /* * enable vlan filtering and allow all vlan tags through */ - vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */ - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); - /* VFTA - enable all vlan filters */ - for (i = 0; i < IXGBE_MAX_VFTA; i++) { - IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); - } + /* VFTA - enable all vlan filters */ + for (i = 0; i < IXGBE_MAX_VFTA; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); + } /* Enable MAC Anti-Spoofing */ hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num); @@ -513,6 +513,7 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + uint8_t pools; /* Verify if the PF supports the mbox APIs version or not */ switch (vfinfo[vf].api_version) { @@ -524,8 +525,27 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) } /* Notify VF of Rx and Tx queue number */ - msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; - msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) { + pools = dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf.nb_queue_pools; + if (pools <= 16) + msgbuf[IXGBE_VF_RX_QUEUES] = 8; + else if (pools <= 32) + msgbuf[IXGBE_VF_RX_QUEUES] = 4; + else + msgbuf[IXGBE_VF_RX_QUEUES] = 1; + } else + msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + + if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) { + pools = dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools; + if (pools <= 16) + msgbuf[IXGBE_VF_TX_QUEUES] = 8; + else if (pools <= 32) + msgbuf[IXGBE_VF_TX_QUEUES] = 4; + else + msgbuf[IXGBE_VF_TX_QUEUES] = 1; + } else + msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; /* Notify VF of default queue */ msgbuf[IXGBE_VF_DEF_QUEUE] = default_q; diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index 840bc07..eaed280 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -3166,10 +3166,9 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev) /* check support mq_mode for DCB */ if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) && - (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB)) - return; - - if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES) + (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) && + (dev_conf->txmode.mq_mode != ETH_MQ_TX_VMDQ_DCB) && + (dev_conf->txmode.mq_mode != ETH_MQ_TX_DCB)) return; /** Configure DCB hardware **/ -- 1.7.9.5