From: Pawel Wodkowski <pawelx.wodkowski@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v4 4/7] move rte_eth_dev_check_mq_mode() logic to driver
Date: Thu, 19 Feb 2015 16:54:46 +0100 [thread overview]
Message-ID: <1424361289-30718-5-git-send-email-pawelx.wodkowski@intel.com> (raw)
In-Reply-To: <1424361289-30718-1-git-send-email-pawelx.wodkowski@intel.com>
Function rte_eth_dev_check_mq_mode() is driver specific. It should be
done in PF configuration phase. This patch move igb/ixgbe driver
specific mq check and SRIOV configuration code to driver part. Also
rewriting log messages to be shorter and more descriptive.
Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
---
lib/librte_ether/rte_ethdev.c | 197 -----------------------------------
lib/librte_pmd_e1000/igb_ethdev.c | 43 ++++++++
lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 105 ++++++++++++++++++-
lib/librte_pmd_ixgbe/ixgbe_ethdev.h | 5 +-
lib/librte_pmd_ixgbe/ixgbe_pf.c | 202 +++++++++++++++++++++++++++++++-----
5 files changed, 327 insertions(+), 225 deletions(-)
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 4007054..aa27e39 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -502,195 +502,6 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
return (0);
}
-static int
-rte_eth_dev_check_vf_rss_rxq_num(uint8_t port_id, uint16_t nb_rx_q)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- switch (nb_rx_q) {
- case 1:
- case 2:
- RTE_ETH_DEV_SRIOV(dev).active =
- ETH_64_POOLS;
- break;
- case 4:
- RTE_ETH_DEV_SRIOV(dev).active =
- ETH_32_POOLS;
- break;
- default:
- return -EINVAL;
- }
-
- RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool = nb_rx_q;
- RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
- dev->pci_dev->max_vfs * nb_rx_q;
-
- return 0;
-}
-
-static int
-rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
- const struct rte_eth_conf *dev_conf)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-
- if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
- /* check multi-queue mode */
- if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
- (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
- (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
- /* SRIOV only works in VMDq enable mode */
- PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
- " SRIOV active, "
- "wrong VMDQ mq_mode rx %u tx %u\n",
- port_id,
- dev_conf->rxmode.mq_mode,
- dev_conf->txmode.mq_mode);
- return (-EINVAL);
- }
-
- switch (dev_conf->rxmode.mq_mode) {
- case ETH_MQ_RX_VMDQ_DCB:
- case ETH_MQ_RX_VMDQ_DCB_RSS:
- /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
- PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
- " SRIOV active, "
- "unsupported VMDQ mq_mode rx %u\n",
- port_id, dev_conf->rxmode.mq_mode);
- return (-EINVAL);
- case ETH_MQ_RX_RSS:
- PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
- " SRIOV active, "
- "Rx mq mode is changed from:"
- "mq_mode %u into VMDQ mq_mode %u\n",
- port_id,
- dev_conf->rxmode.mq_mode,
- dev->data->dev_conf.rxmode.mq_mode);
- case ETH_MQ_RX_VMDQ_RSS:
- dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
- if (nb_rx_q <= RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool)
- if (rte_eth_dev_check_vf_rss_rxq_num(port_id, nb_rx_q) != 0) {
- PMD_DEBUG_TRACE("ethdev port_id=%d"
- " SRIOV active, invalid queue"
- " number for VMDQ RSS, allowed"
- " value are 1, 2 or 4\n",
- port_id);
- return -EINVAL;
- }
- break;
- default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
- /* if nothing mq mode configure, use default scheme */
- dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
- if (RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool > 1)
- RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool = 1;
- break;
- }
-
- switch (dev_conf->txmode.mq_mode) {
- case ETH_MQ_TX_VMDQ_DCB:
- /* DCB VMDQ in SRIOV mode, not implement yet */
- PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
- " SRIOV active, "
- "unsupported VMDQ mq_mode tx %u\n",
- port_id, dev_conf->txmode.mq_mode);
- return (-EINVAL);
- default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
- /* if nothing mq mode configure, use default scheme */
- dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
- break;
- }
-
- /* check valid queue number */
- if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool) ||
- (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
- "queue number must less equal to %d\n",
- port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
- return (-EINVAL);
- }
- } else {
- /* For vmdb+dcb mode check our configuration before we go further */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
- const struct rte_eth_vmdq_dcb_conf *conf;
-
- if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
- "!= %d\n",
- port_id, ETH_VMDQ_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
- if (! (conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
- "nb_queue_pools must be %d or %d\n",
- port_id, ETH_16_POOLS, ETH_32_POOLS);
- return (-EINVAL);
- }
- }
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
- const struct rte_eth_vmdq_dcb_tx_conf *conf;
-
- if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
- "!= %d\n",
- port_id, ETH_VMDQ_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
- if (! (conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
- "nb_queue_pools != %d or nb_queue_pools "
- "!= %d\n",
- port_id, ETH_16_POOLS, ETH_32_POOLS);
- return (-EINVAL);
- }
- }
-
- /* For DCB mode check our configuration before we go further */
- if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
- const struct rte_eth_dcb_rx_conf *conf;
-
- if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
- "!= %d\n",
- port_id, ETH_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
- if (! (conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
- "nb_tcs != %d or nb_tcs "
- "!= %d\n",
- port_id, ETH_4_TCS, ETH_8_TCS);
- return (-EINVAL);
- }
- }
-
- if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
- const struct rte_eth_dcb_tx_conf *conf;
-
- if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
- "!= %d\n",
- port_id, ETH_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
- if (! (conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
- "nb_tcs != %d or nb_tcs "
- "!= %d\n",
- port_id, ETH_4_TCS, ETH_8_TCS);
- return (-EINVAL);
- }
- }
- }
- return 0;
-}
-
int
rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
@@ -798,14 +609,6 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
ETHER_MAX_LEN;
}
- /* multipe queue mode checking */
- diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
- if (diag != 0) {
- PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
- port_id, diag);
- return diag;
- }
-
/*
* Setup new number of RX/TX queues and reconfigure device.
*/
diff --git a/lib/librte_pmd_e1000/igb_ethdev.c b/lib/librte_pmd_e1000/igb_ethdev.c
index d451086..5c922df 100644
--- a/lib/librte_pmd_e1000/igb_ethdev.c
+++ b/lib/librte_pmd_e1000/igb_ethdev.c
@@ -742,6 +742,49 @@ eth_igb_configure(struct rte_eth_dev *dev)
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+ enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
+
+ if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+ /* Check multi-queue mode.
+ * To no break software we accept ETH_MQ_RX_NONE as this might be used
+ * to turn off VLAN filter.
+ *
+ * FIXME if support RSS together with VMDq & SRIOV
+ */
+ if (rx_mq_mode != ETH_MQ_RX_NONE &&
+ (rx_mq_mode & ETH_MQ_RX_VMDQ_ONLY) == 0) {
+ PMD_INIT_LOG(WARNING, " SRIOV active, RX mode %d is not supported."
+ "Driver will behave as in %d mode as fallback.",
+ rx_mq_mode, ETH_MQ_RX_NONE);
+ }
+
+ /* TX mode is not used in in this driver so mode might be ignored.*/
+ if (tx_mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+ /* SRIOV only works in VMDq enable mode */
+ PMD_INIT_LOG(WARNING, "TX mode %d is not supported in SRIOV. "
+ "Driver will behave as in %d mode as fallback.",
+ tx_mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+ }
+ } else {
+ /*
+ * To no break software that set invalid mode, only display warning if
+ * invalid mode is used.
+ */
+ if ((rx_mq_mode & (ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG))
+ != rx_mq_mode) {
+ PMD_INIT_LOG(WARNING, "RX mode %d is not supported. Driver will "
+ "behave as in %d mode as fallback.", rx_mq_mode,
+ rx_mq_mode & (ETH_MQ_RX_RSS_FLAG | ETH_MQ_RX_VMDQ_FLAG));
+ }
+
+ if (tx_mq_mode != ETH_MQ_TX_NONE) {
+ PMD_INIT_LOG(WARNING, "TX mode %d is not supported."
+ "Driver will behave as in %d mode as fallback.",
+ tx_mq_mode, ETH_MQ_TX_NONE);
+ }
+ }
+
PMD_INIT_FUNC_TRACE();
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
PMD_INIT_FUNC_TRACE();
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 02b9cda..8e9da3b 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -863,7 +863,8 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
"Failed to allocate %u bytes needed to store "
"MAC addresses",
ETHER_ADDR_LEN * hw->mac.num_rar_entries);
- return -ENOMEM;
+ diag = -ENOMEM;
+ goto error;
}
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
@@ -876,7 +877,8 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
PMD_INIT_LOG(ERR,
"Failed to allocate %d bytes needed to store MAC addresses",
ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
- return -ENOMEM;
+ diag = -ENOMEM;
+ goto error;
}
/* initialize the vfta */
@@ -886,7 +888,13 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
memset(hwstrip, 0, sizeof(*hwstrip));
/* initialize PF if max_vfs not zero */
- ixgbe_pf_host_init(eth_dev);
+ diag = ixgbe_pf_host_init(eth_dev);
+ if (diag < 0) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ goto error;
+ }
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
/* let hardware know driver is loaded */
@@ -918,6 +926,11 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
ixgbe_enable_intr(eth_dev);
return 0;
+
+error:
+ rte_free(eth_dev->data->hash_mac_addrs);
+ rte_free(eth_dev->data->mac_addrs);
+ return diag;
}
@@ -1434,7 +1447,93 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct rte_eth_dev_info dev_info;
+ int retval;
+
PMD_INIT_FUNC_TRACE();
+ retval = ixgbe_pf_configure_mq_sriov(dev);
+ if (retval <= 0)
+ return retval;
+
+ uint16_t nb_rx_q = dev->data->nb_rx_queues;
+ uint16_t nb_tx_q = dev->data->nb_rx_queues;
+
+ /* For DCB we need to obtain maximum number of queues dinamically,
+ * as this depends on max VF exported in PF. */
+ if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
+ (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
+ /* Use dev_infos_get field as this might be pointer to PF or VF. */
+ (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+ }
+
+ /* For vmdq+dcb mode check our configuration before we go further */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_conf *conf;
+
+ if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
+ PMD_INIT_LOG(ERR, " VMDQ+DCB, nb_rx_q != %d\n",
+ ETH_VMDQ_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
+ if (conf->nb_queue_pools != ETH_16_POOLS &&
+ conf->nb_queue_pools != ETH_32_POOLS) {
+ PMD_INIT_LOG(ERR, " VMDQ+DCB selected, "
+ "number of RX queue pools must be %d or %d\n",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return (-EINVAL);
+ }
+ } else if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ /* For DCB mode check out configuration before we go further */
+ const struct rte_eth_dcb_rx_conf *conf;
+
+ if (nb_rx_q != dev_info.max_rx_queues) {
+ PMD_INIT_LOG(ERR, " DCB, number of RX queues != %d\n",
+ ETH_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
+ if (conf->nb_tcs != ETH_4_TCS &&
+ conf->nb_tcs != ETH_8_TCS) {
+ PMD_INIT_LOG(ERR, " DCB, number of RX TC must be %d or %d\n",
+ ETH_4_TCS, ETH_8_TCS);
+ return (-EINVAL);
+ }
+ }
+
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_tx_conf *conf;
+
+ if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
+ PMD_INIT_LOG(ERR, " VMDQ+DCB, number of TX queues != %d\n",
+ ETH_VMDQ_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
+ if (conf->nb_queue_pools != ETH_16_POOLS &&
+ conf->nb_queue_pools != ETH_32_POOLS) {
+ PMD_INIT_LOG(ERR, " VMDQ+DCB selected, "
+ "number of TX qqueue pools must be %d or %d\n",
+ ETH_16_POOLS, ETH_32_POOLS);
+ return (-EINVAL);
+ }
+ } else if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ const struct rte_eth_dcb_tx_conf *conf;
+
+ if (nb_tx_q != dev_info.max_tx_queues) {
+ PMD_INIT_LOG(ERR, " DCB, number of queues must be %d\n",
+ ETH_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
+ if (conf->nb_tcs != ETH_4_TCS &&
+ conf->nb_tcs != ETH_8_TCS) {
+ PMD_INIT_LOG(ERR, " DCB, number of TX TC must be %d or %d\n",
+ ETH_4_TCS, ETH_8_TCS);
+ return (-EINVAL);
+ }
+ }
/* set flag to update link status after init */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
index 1383194..e70a6e8 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
@@ -348,11 +348,14 @@ void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
-void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
+int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
+int ixgbe_pf_configure_mq_sriov(struct rte_eth_dev *dev);
+
int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
+
#endif /* _IXGBE_ETHDEV_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c
index 4103e97..a7b9333 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_pf.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c
@@ -91,7 +91,7 @@ ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
return 0;
}
-void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
+int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
{
struct ixgbe_vf_info **vfinfo =
IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
@@ -101,39 +101,31 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int retval;
uint16_t vf_num;
- uint8_t nb_queue;
PMD_INIT_FUNC_TRACE();
- RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
- if (0 == (vf_num = dev_num_vf(eth_dev)))
- return;
+ /* Fill sriov structure using default configuration. */
+ retval = ixgbe_pf_configure_mq_sriov(eth_dev);
+ if (retval != 0) {
+ if (retval < 0)
+ PMD_INIT_LOG(ERR, " Setting up SRIOV with default device "
+ "configuration should not fail. This is a BUG.");
+ return 0;
+ }
+ vf_num = dev_num_vf(eth_dev);
*vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
- if (*vfinfo == NULL)
- rte_panic("Cannot allocate memory for private VF data\n");
+ if (*vfinfo == NULL) {
+ PMD_INIT_LOG(ERR, "Cannot allocate memory for private VF data.");
+ return (-ENOMEM);
+ }
memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
memset(uta_info,0,sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
- if (vf_num >= ETH_32_POOLS) {
- nb_queue = 2;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
- } else if (vf_num >= ETH_16_POOLS) {
- nb_queue = 4;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
- } else {
- nb_queue = 8;
- RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
- }
-
- RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
- RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
- RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
- RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
-
ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
/* init_mailbox_params */
@@ -142,7 +134,169 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
/* set mb interrupt mask */
ixgbe_mb_intr_setup(eth_dev);
- return;
+ return 0;
+}
+
+
+/*
+ * Function that make SRIOV configuration, based on device configuration,
+ * number of requested queues and number of VF created.
+ * Function returns:
+ * 1 - SRIOV is not enabled (no VF created)
+ * 0 - proper SRIOV configuration found.
+ * -EINVAL - no suitable SRIOV configuration found.
+ */
+int
+ixgbe_pf_configure_mq_sriov(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
+ uint16_t vf_num;
+
+ vf_num = dev_num_vf(dev);
+ if (vf_num == 0) {
+ memset(sriov, 0, sizeof(*sriov));
+ return 1;
+ }
+
+ /* Check multi-queue mode. */
+ if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
+ (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
+ (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
+ /* SRIOV only works in VMDq enable mode */
+ PMD_INIT_LOG(ERR, " SRIOV active, "
+ "invlaid VMDQ rx mode (%u) or tx (%u) mode.",
+ dev_conf->rxmode.mq_mode, dev_conf->txmode.mq_mode);
+ return (-EINVAL);
+ }
+
+ switch (dev_conf->rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_DCB:
+ if (vf_num <= ETH_16_POOLS)
+ sriov->nb_rx_q_per_pool = 8;
+ else if (vf_num <= ETH_32_POOLS)
+ sriov->nb_rx_q_per_pool = 4;
+ else {
+ PMD_INIT_LOG(ERR,
+ "DCB (SRIOV active) - VF count (%d) must be less or equal 32.",
+ vf_num);
+ return (-EINVAL);
+ }
+
+ if (dev->data->nb_rx_queues < sriov->nb_rx_q_per_pool) {
+ PMD_INIT_LOG(WARNING,
+ "DCB (SRIOV active) rx queues (%d) count is not equal %d.",
+ dev->data->nb_rx_queues,
+ sriov->nb_rx_q_per_pool);
+ }
+ break;
+ case ETH_MQ_RX_RSS:
+ PMD_INIT_LOG(INFO, "RSS (SRIOV active), "
+ "rx mq mode is changed from: mq_mode %u into VMDQ mq_mode %u.",
+ dev_conf->rxmode.mq_mode, dev->data->dev_conf.rxmode.mq_mode);
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
+ /* falltrought */
+ case ETH_MQ_RX_VMDQ_RSS:
+ if (vf_num >= ETH_64_POOLS) {
+ /* FIXME: Is vf_num > 64 realy supported by hardware? */
+ PMD_INIT_LOG(ERR, "RSS (SRIOV active), "
+ "VFs num must be less or equal 64.");
+ return (-EINVAL);
+ } else if (vf_num >= ETH_32_POOLS) {
+ if (dev->data->nb_rx_queues != 1 && dev->data->nb_rx_queues != 2) {
+ PMD_INIT_LOG(ERR, "RSS (SRIOV active, VF count >= 32),"
+ "invalid rx queues count %d. It must be 1 or 2.",
+ dev->data->nb_rx_queues);
+ return (-EINVAL);
+ }
+
+ sriov->nb_rx_q_per_pool = dev->data->nb_rx_queues;
+ } else {
+ /* FIXME: is VT(16) + RSS realy supported? */
+ if (dev->data->nb_rx_queues != 4) {
+ PMD_INIT_LOG(ERR, "RSS (SRIOV active, VFs count < 32), "
+ "invalid rx queues count %d. It must be 4.",
+ dev->data->nb_rx_queues);
+ return (-EINVAL);
+ }
+
+ sriov->nb_rx_q_per_pool = 4;
+ }
+ break;
+ default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
+ /* if nothing mq mode configure, use default scheme */
+ if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY) {
+ PMD_INIT_LOG(INFO, "Rx mq mode changed from %u into VMDQ %u.",
+ dev->data->dev_conf.rxmode.mq_mode, ETH_MQ_RX_VMDQ_ONLY);
+
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ }
+
+ /* queue 0 of each pool is used. */
+ sriov->nb_rx_q_per_pool = 1;
+ break;
+ }
+
+ switch (dev_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV mode*/
+ if (vf_num <= ETH_16_POOLS)
+ sriov->nb_tx_q_per_pool = 8;
+ else if (vf_num <= ETH_32_POOLS)
+ sriov->nb_tx_q_per_pool = 4;
+ else if (vf_num <= ETH_64_POOLS)
+ sriov->nb_tx_q_per_pool = 1;
+ else {
+ PMD_INIT_LOG(ERR, "DCB (SRIOV active), "
+ "VF count (%d) must be less or equal 64.",
+ vf_num);
+ return (-EINVAL);
+ }
+ break;
+ default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+ /* if nothing mq mode configure, use default scheme */
+ if (dev->data->dev_conf.txmode.mq_mode != ETH_MQ_TX_VMDQ_ONLY) {
+ PMD_INIT_LOG(INFO, "Tx mq mode is changed from %u into VMDQ %u.",
+ dev->data->dev_conf.txmode.mq_mode, ETH_MQ_TX_VMDQ_ONLY);
+
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+ }
+
+ /* queue 0 of each pool is used. */
+ sriov->nb_tx_q_per_pool = 1;
+ break;
+ }
+
+ sriov->def_vmdq_idx = vf_num;
+
+ /*
+ * Pools starts at 2xN, 4xN or 8xN
+ */
+ if (vf_num >= ETH_32_POOLS) {
+ /* This must be vf_num <= ETH_64_POOLS */
+ sriov->active = ETH_64_POOLS;
+ sriov->def_pool_q_idx = vf_num * 2;
+ } else if (vf_num >= ETH_16_POOLS) {
+ sriov->active = ETH_32_POOLS;
+ sriov->def_pool_q_idx = vf_num * 4;
+ } else {
+ sriov->active = ETH_16_POOLS;
+ sriov->def_pool_q_idx = vf_num * 8;
+ }
+
+ /* Check if available queus count is not less than allocated.*/
+ if (dev->data->nb_rx_queues > sriov->nb_rx_q_per_pool) {
+ PMD_INIT_LOG(ERR, "SRIOV active, rx queue count must less or equal %d.",
+ sriov->nb_rx_q_per_pool);
+ return (-EINVAL);
+ }
+
+ if (dev->data->nb_rx_queues > sriov->nb_tx_q_per_pool) {
+ PMD_INIT_LOG(ERR, "SRIOV active, tx queue count must less or equal %d.",
+ sriov->nb_tx_q_per_pool);
+ return (-EINVAL);
+ }
+
+ return 0;
}
int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
--
1.9.1
next prev parent reply other threads:[~2015-02-19 16:13 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-01-12 15:50 [dpdk-dev] [PATCH 0/2] Enable DCB in SRIOV mode for ixgbe driver Michal Jastrzebski
2015-01-12 15:50 ` [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe Michal Jastrzebski
2015-01-13 10:14 ` Vlad Zolotarov
2015-01-13 11:00 ` Wodkowski, PawelX
2015-01-14 1:00 ` Ouyang, Changchun
2015-01-12 15:50 ` [dpdk-dev] [PATCH 2/2] testpmd: fix dcb in vt mode Michal Jastrzebski
2015-01-13 10:15 ` Vlad Zolotarov
2015-01-13 11:08 ` Wodkowski, PawelX
2015-01-13 9:50 ` [dpdk-dev] [PATCH 0/2] Enable DCB in SRIOV mode for ixgbe driver Wodkowski, PawelX
2015-01-13 10:11 ` Vlad Zolotarov
2015-01-19 13:02 ` [dpdk-dev] [PATCH v2 0/4] " Pawel Wodkowski
2015-01-19 13:02 ` [dpdk-dev] [PATCH v2 1/4] ethdev: Allow zero rx/tx queues in SRIOV mode Pawel Wodkowski
2015-01-19 13:02 ` [dpdk-dev] [PATCH v2 2/4] ethdev: prevent changing of nb_q_per_pool in rte_eth_dev_check_mq_mode() Pawel Wodkowski
2015-01-20 1:32 ` Ouyang, Changchun
2015-01-20 9:09 ` Wodkowski, PawelX
2015-01-19 13:02 ` [dpdk-dev] [PATCH v2 3/4] pmd: add support for DCB in SRIOV mode for ixgbe driver Pawel Wodkowski
2015-01-20 1:56 ` Ouyang, Changchun
2015-01-20 6:52 ` Thomas Monjalon
2015-01-19 13:02 ` [dpdk-dev] [PATCH v2 4/4] testpmd: fix dcb in vt mode Pawel Wodkowski
2015-02-19 15:54 ` [dpdk-dev] [PATCH v4 0/7] Enable DCB in SRIOV mode for ixgbe driver Pawel Wodkowski
2015-02-19 15:54 ` [dpdk-dev] [PATCH v4 1/7] ethdev: Allow zero rx/tx queues in SRIOV mode Pawel Wodkowski
2015-02-19 15:54 ` [dpdk-dev] [PATCH v4 2/7] pmd igb: fix VMDQ mode checking Pawel Wodkowski
2015-02-19 15:54 ` [dpdk-dev] [PATCH v4 3/7] pmd: igb/ixgbe split nb_q_per_pool to rx and tx nb_q_per_pool Pawel Wodkowski
2015-02-25 3:24 ` Ouyang, Changchun
2015-02-25 7:47 ` Pawel Wodkowski
2015-02-19 15:54 ` Pawel Wodkowski [this message]
2015-02-25 6:14 ` [dpdk-dev] [PATCH v4 4/7] move rte_eth_dev_check_mq_mode() logic to driver Ouyang, Changchun
2015-02-25 9:57 ` Pawel Wodkowski
2015-06-09 4:06 ` Wu, Jingjing
2015-02-19 15:54 ` [dpdk-dev] [PATCH v4 5/7] pmd ixgbe: enable DCB in SRIOV Pawel Wodkowski
2015-02-25 3:36 ` Ouyang, Changchun
2015-02-25 11:29 ` Pawel Wodkowski
2015-02-19 15:54 ` [dpdk-dev] [PATCH v4 6/7] tespmd: fix DCB in SRIOV mode support Pawel Wodkowski
2015-02-19 15:54 ` [dpdk-dev] [PATCH v4 7/7] pmd ixgbe: fix vlan setting in in PF Pawel Wodkowski
2015-06-08 3:00 ` [dpdk-dev] [PATCH v4 0/7] Enable DCB in SRIOV mode for ixgbe driver Zhang, Helin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1424361289-30718-5-git-send-email-pawelx.wodkowski@intel.com \
--to=pawelx.wodkowski@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).