DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Chen Jing D(Mark)" <jing.d.chen@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 6/6] i40e: Add full VMDQ pools support
Date: Tue, 23 Sep 2014 21:14:07 +0800	[thread overview]
Message-ID: <1411478047-1251-7-git-send-email-jing.d.chen@intel.com> (raw)
In-Reply-To: <1411478047-1251-1-git-send-email-jing.d.chen@intel.com>

From: "Chen Jing D(Mark)" <jing.d.chen@intel.com>

1. Function i40e_vsi_* name change to i40e_dev_* since PF can contains
   more than 1 VSI after VMDQ enabled.
2. i40e_dev_rx/tx_queue_setup change to have capability of setup
   queues that belongs to VMDQ pools.
3. Add queue mapping. This will do a convertion between queue index
   that application used and real NIC queue index.
3. i40e_dev_start/stop change to have capability switching VMDQ queues.
4. i40e_pf_config_rss change to calculate actual main VSI queue numbers
   after VMDQ pools introduced.

Signed-off-by: Chen Jing D(Mark) <jing.d.chen@intel.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
Acked-by: Jijiang Liu <jijiang.liu@intel.com>
Acked-by: Huawei Xie <huawei.xie@intel.com>
---
 lib/librte_pmd_i40e/i40e_ethdev.c |  183 +++++++++++++++++++++++++------------
 lib/librte_pmd_i40e/i40e_ethdev.h |    4 +-
 lib/librte_pmd_i40e/i40e_rxtx.c   |  125 +++++++++++++++++++++-----
 3 files changed, 231 insertions(+), 81 deletions(-)

diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c b/lib/librte_pmd_i40e/i40e_ethdev.c
index 3185654..9009bd4 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -167,7 +167,7 @@ static int i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
 static int i40e_get_cap(struct i40e_hw *hw);
 static int i40e_pf_parameter_init(struct rte_eth_dev *dev);
 static int i40e_pf_setup(struct i40e_pf *pf);
-static int i40e_vsi_init(struct i40e_vsi *vsi);
+static int i40e_dev_rxtx_init(struct i40e_pf *pf);
 static int i40e_vmdq_setup(struct rte_eth_dev *dev);
 static void i40e_stat_update_32(struct i40e_hw *hw, uint32_t reg,
 		bool offset_loaded, uint64_t *offset, uint64_t *stat);
@@ -770,8 +770,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-	struct i40e_vsi *vsi = pf->main_vsi;
-	int ret;
+	struct i40e_vsi *main_vsi = pf->main_vsi;
+	int ret, i;
 
 	if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
 		(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
@@ -782,41 +782,53 @@ i40e_dev_start(struct rte_eth_dev *dev)
 	}
 
 	/* Initialize VSI */
-	ret = i40e_vsi_init(vsi);
+	ret = i40e_dev_rxtx_init(pf);
 	if (ret != I40E_SUCCESS) {
-		PMD_DRV_LOG(ERR, "Failed to init VSI");
+		PMD_DRV_LOG(ERR, "Failed to init rx/tx queues\n");
 		goto err_up;
 	}
 
 	/* Map queues with MSIX interrupt */
-	i40e_vsi_queues_bind_intr(vsi);
-	i40e_vsi_enable_queues_intr(vsi);
+	i40e_vsi_queues_bind_intr(main_vsi);
+	i40e_vsi_enable_queues_intr(main_vsi);
+
+	/* Map VMDQ VSI queues with MSIX interrupt */
+	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+		i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi);
+		i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi);
+	}
 
 	/* Enable all queues which have been configured */
-	ret = i40e_vsi_switch_queues(vsi, TRUE);
+	ret = i40e_dev_switch_queues(pf, TRUE);
 	if (ret != I40E_SUCCESS) {
 		PMD_DRV_LOG(ERR, "Failed to enable VSI");
 		goto err_up;
 	}
 
 	/* Enable receiving broadcast packets */
-	if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
-		ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+	ret = i40e_aq_set_vsi_broadcast(hw, main_vsi->seid, true, NULL);
+	if (ret != I40E_SUCCESS)
+		PMD_DRV_LOG(INFO, "fail to set vsi broadcast\n");
+
+	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+		ret = i40e_aq_set_vsi_broadcast(hw, pf->vmdq[i].vsi->seid,
+						true, NULL);
 		if (ret != I40E_SUCCESS)
-			PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
+			PMD_DRV_LOG(INFO, "fail to set vsi broadcast\n");
 	}
 
 	/* Apply link configure */
 	ret = i40e_apply_link_speed(dev);
 	if (I40E_SUCCESS != ret) {
-		PMD_DRV_LOG(ERR, "Fail to apply link setting");
+		PMD_DRV_LOG(ERR, "Fail to apply link setting\n");
 		goto err_up;
 	}
 
 	return I40E_SUCCESS;
 
 err_up:
-	i40e_vsi_switch_queues(vsi, FALSE);
+	i40e_dev_switch_queues(pf, FALSE);
+	i40e_dev_clear_queues(dev);
 
 	return ret;
 }
@@ -825,17 +837,26 @@ static void
 i40e_dev_stop(struct rte_eth_dev *dev)
 {
 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-	struct i40e_vsi *vsi = pf->main_vsi;
+	struct i40e_vsi *main_vsi = pf->main_vsi;
+	int i;
 
 	/* Disable all queues */
-	i40e_vsi_switch_queues(vsi, FALSE);
+	i40e_dev_switch_queues(pf, FALSE);
+
+	/* un-map queues with interrupt registers */
+	i40e_vsi_disable_queues_intr(main_vsi);
+	i40e_vsi_queues_unbind_intr(main_vsi);
+
+	for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
+		i40e_vsi_disable_queues_intr(pf->vmdq[i].vsi);
+		i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi);
+	}
+
+	/* Clear all queues and release memory */
+	i40e_dev_clear_queues(dev);
 
 	/* Set link down */
 	i40e_dev_set_link_down(dev);
-
-	/* un-map queues with interrupt registers */
-	i40e_vsi_disable_queues_intr(vsi);
-	i40e_vsi_queues_unbind_intr(vsi);
 }
 
 static void
@@ -3083,11 +3104,11 @@ i40e_switch_tx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
 
 /* Swith on or off the tx queues */
 static int
-i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
+i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
 {
-	struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
+	struct rte_eth_dev_data *dev_data = pf->dev_data;
 	struct i40e_tx_queue *txq;
-	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
+	struct rte_eth_dev *dev = pf->adapter->eth_dev;
 	uint16_t i;
 	int ret;
 
@@ -3095,8 +3116,9 @@ i40e_vsi_switch_tx_queues(struct i40e_vsi *vsi, bool on)
 		txq = dev_data->tx_queues[i];
 		/* Don't operate the queue if not configured or
 		 * if starting only per queue */
-		if (!txq->q_set || (on && txq->start_tx_per_q))
+		if (!txq || !txq->q_set || (on && txq->start_tx_per_q))
 			continue;
+
 		if (on)
 			ret = i40e_dev_tx_queue_start(dev, i);
 		else
@@ -3161,11 +3183,11 @@ i40e_switch_rx_queue(struct i40e_hw *hw, uint16_t q_idx, bool on)
 }
 /* Switch on or off the rx queues */
 static int
-i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
+i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
 {
-	struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi);
+	struct rte_eth_dev_data *dev_data = pf->dev_data;
 	struct i40e_rx_queue *rxq;
-	struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vsi);
+	struct rte_eth_dev *dev = pf->adapter->eth_dev;
 	uint16_t i;
 	int ret;
 
@@ -3173,7 +3195,7 @@ i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
 		rxq = dev_data->rx_queues[i];
 		/* Don't operate the queue if not configured or
 		 * if starting only per queue */
-		if (!rxq->q_set || (on && rxq->start_rx_per_q))
+		if (!rxq || !rxq->q_set || (on && rxq->start_rx_per_q))
 			continue;
 		if (on)
 			ret = i40e_dev_rx_queue_start(dev, i);
@@ -3188,26 +3210,26 @@ i40e_vsi_switch_rx_queues(struct i40e_vsi *vsi, bool on)
 
 /* Switch on or off all the rx/tx queues */
 int
-i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
+i40e_dev_switch_queues(struct i40e_pf *pf, bool on)
 {
 	int ret;
 
 	if (on) {
 		/* enable rx queues before enabling tx queues */
-		ret = i40e_vsi_switch_rx_queues(vsi, on);
+		ret = i40e_dev_switch_rx_queues(pf, on);
 		if (ret) {
-			PMD_DRV_LOG(ERR, "Failed to switch rx queues");
+			PMD_DRV_LOG(ERR, "Failed to switch rx queues\n");
 			return ret;
 		}
-		ret = i40e_vsi_switch_tx_queues(vsi, on);
+		ret = i40e_dev_switch_tx_queues(pf, on);
 	} else {
 		/* Stop tx queues before stopping rx queues */
-		ret = i40e_vsi_switch_tx_queues(vsi, on);
+		ret = i40e_dev_switch_tx_queues(pf, on);
 		if (ret) {
-			PMD_DRV_LOG(ERR, "Failed to switch tx queues");
+			PMD_DRV_LOG(ERR, "Failed to switch tx queues\n");
 			return ret;
 		}
-		ret = i40e_vsi_switch_rx_queues(vsi, on);
+		ret = i40e_dev_switch_rx_queues(pf, on);
 	}
 
 	return ret;
@@ -3215,15 +3237,18 @@ i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on)
 
 /* Initialize VSI for TX */
 static int
-i40e_vsi_tx_init(struct i40e_vsi *vsi)
+i40e_dev_tx_init(struct i40e_pf *pf)
 {
-	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
 	struct rte_eth_dev_data *data = pf->dev_data;
 	uint16_t i;
 	uint32_t ret = I40E_SUCCESS;
+	struct i40e_tx_queue *txq;
 
 	for (i = 0; i < data->nb_tx_queues; i++) {
-		ret = i40e_tx_queue_init(data->tx_queues[i]);
+		txq = data->tx_queues[i];
+		if (!txq || !txq->q_set)
+			continue;
+		ret = i40e_tx_queue_init(txq);
 		if (ret != I40E_SUCCESS)
 			break;
 	}
@@ -3233,16 +3258,20 @@ i40e_vsi_tx_init(struct i40e_vsi *vsi)
 
 /* Initialize VSI for RX */
 static int
-i40e_vsi_rx_init(struct i40e_vsi *vsi)
+i40e_dev_rx_init(struct i40e_pf *pf)
 {
-	struct i40e_pf *pf = I40E_VSI_TO_PF(vsi);
 	struct rte_eth_dev_data *data = pf->dev_data;
 	int ret = I40E_SUCCESS;
 	uint16_t i;
+	struct i40e_rx_queue *rxq;
 
 	i40e_pf_config_mq_rx(pf);
 	for (i = 0; i < data->nb_rx_queues; i++) {
-		ret = i40e_rx_queue_init(data->rx_queues[i]);
+		rxq = data->rx_queues[i];
+		if (!rxq || !rxq->q_set)
+			continue;
+
+		ret = i40e_rx_queue_init(rxq);
 		if (ret != I40E_SUCCESS) {
 			PMD_DRV_LOG(ERR, "Failed to do RX queue "
 				    "initialization");
@@ -3253,20 +3282,19 @@ i40e_vsi_rx_init(struct i40e_vsi *vsi)
 	return ret;
 }
 
-/* Initialize VSI */
 static int
-i40e_vsi_init(struct i40e_vsi *vsi)
+i40e_dev_rxtx_init(struct i40e_pf *pf)
 {
 	int err;
 
-	err = i40e_vsi_tx_init(vsi);
+	err = i40e_dev_tx_init(pf);
 	if (err) {
-		PMD_DRV_LOG(ERR, "Failed to do vsi TX initialization");
+		PMD_DRV_LOG(ERR, "Failed to do TX initialization");
 		return err;
 	}
-	err = i40e_vsi_rx_init(vsi);
+	err = i40e_dev_rx_init(pf);
 	if (err) {
-		PMD_DRV_LOG(ERR, "Failed to do vsi RX initialization");
+		PMD_DRV_LOG(ERR, "Failed to do RX initialization");
 		return err;
 	}
 
@@ -4253,6 +4281,26 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
 	return 0;
 }
 
+/* Calculate the maximum number of contiguous PF queues that are configured */
+static int
+i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)
+{
+	struct rte_eth_dev_data *data = pf->dev_data;
+	int i, num;
+	struct i40e_rx_queue *rxq;
+
+	num = 0;
+	for (i = 0; i < pf->lan_nb_qps; i++) {
+		rxq = data->rx_queues[i];
+		if (rxq && rxq->q_set)
+			num++;
+		else
+			break;
+	}
+
+	return num;
+}
+
 /* Configure RSS */
 static int
 i40e_pf_config_rss(struct i40e_pf *pf)
@@ -4260,7 +4308,25 @@ i40e_pf_config_rss(struct i40e_pf *pf)
 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
 	struct rte_eth_rss_conf rss_conf;
 	uint32_t i, lut = 0;
-	uint16_t j, num = i40e_align_floor(pf->dev_data->nb_rx_queues);
+	uint16_t j, num;
+
+	/*
+	 * If both VMDQ and RSS enabled, not all of PF queues are configured.
+	 * It's necessary to calulate the actual PF queues that are configured.
+	 */
+	if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
+		num = i40e_pf_calc_configured_queues_num(pf);
+		num = i40e_align_floor(num);
+	} else
+		num = i40e_align_floor(pf->dev_data->nb_rx_queues);
+
+	PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
+			num);
+
+	if (num == 0) {
+		PMD_INIT_LOG(ERR, "No PF queues are configured to enable RSS");
+		return -ENOTSUP;
+	}
 
 	for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
 		if (j == num)
@@ -4292,16 +4358,19 @@ i40e_pf_config_rss(struct i40e_pf *pf)
 static int
 i40e_pf_config_mq_rx(struct i40e_pf *pf)
 {
-	if (!pf->dev_data->sriov.active) {
-		switch (pf->dev_data->dev_conf.rxmode.mq_mode) {
-		case ETH_MQ_RX_RSS:
-			i40e_pf_config_rss(pf);
-			break;
-		default:
-			i40e_pf_disable_rss(pf);
-			break;
-		}
+	int ret = 0;
+	enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
+
+	if (mq_mode & ETH_MQ_RX_DCB_FLAG) {
+		PMD_INIT_LOG(ERR, "i40e doesn't support DCB yet");
+		return -ENOTSUP;
 	}
 
-	return 0;
+	/* RSS setup */
+	if (mq_mode & ETH_MQ_RX_RSS_FLAG)
+		ret = i40e_pf_config_rss(pf);
+	else
+		i40e_pf_disable_rss(pf);
+
+	return ret;
 }
diff --git a/lib/librte_pmd_i40e/i40e_ethdev.h b/lib/librte_pmd_i40e/i40e_ethdev.h
index b06de05..9ad5611 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.h
+++ b/lib/librte_pmd_i40e/i40e_ethdev.h
@@ -305,7 +305,7 @@ struct i40e_adapter {
 	};
 };
 
-int i40e_vsi_switch_queues(struct i40e_vsi *vsi, bool on);
+int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
 int i40e_vsi_release(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
 				enum i40e_vsi_type type,
@@ -357,7 +357,7 @@ i40e_get_vsi_from_adapter(struct i40e_adapter *adapter)
 		return pf->main_vsi;
 	}
 }
-#define I40E_DEV_PRIVATE_TO_VSI(adapter) \
+#define I40E_DEV_PRIVATE_TO_MAIN_VSI(adapter) \
 	i40e_get_vsi_from_adapter((struct i40e_adapter *)adapter)
 
 /* I40E_VSI_TO */
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 099699c..c6facea 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -1443,14 +1443,58 @@ i40e_xmit_pkts_simple(void *tx_queue,
 	return nb_tx;
 }
 
+/*
+ * Find the VSI the queue belongs to. 'queue_idx' is the queue index
+ * application used, which assume having sequential ones. But from driver's
+ * perspective, it's different. For example, q0 belongs to FDIR VSI, q1-q64
+ * to MAIN VSI, , q65-96 to SRIOV VSIs, q97-128 to VMDQ VSIs. For application
+ * running on host, q1-64 and q97-128 can be used, total 96 queues. They can
+ * use queue_idx from 0 to 95 to access queues, while real queue would be
+ * different. This function will do a queue mapping to find VSI the queue
+ * belongs to.
+ */
+static struct i40e_vsi*
+i40e_pf_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
+{
+	/* the queue in MAIN VSI range */
+	if (queue_idx < pf->main_vsi->nb_qps)
+		return pf->main_vsi;
+
+	queue_idx -= pf->main_vsi->nb_qps;
+
+	/* queue_idx is greater than VMDQ VSIs range */
+	if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) {
+		PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?");
+		return NULL;
+	}
+
+	return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi;
+}
+
+static uint16_t
+i40e_get_queue_offset_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
+{
+	/* the queue in MAIN VSI range */
+	if (queue_idx < pf->main_vsi->nb_qps)
+		return queue_idx;
+
+	/* It's VMDQ queues */
+	queue_idx -= pf->main_vsi->nb_qps;
+
+	if (pf->nb_cfg_vmdq_vsi)
+		return queue_idx % pf->vmdq_nb_qps;
+	else {
+		PMD_INIT_LOG(ERR, "Fail to get queue offset");
+		return (uint16_t)(-1);
+	}
+}
+
 int
 i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-	struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
 	struct i40e_rx_queue *rxq;
 	int err = -1;
-	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
-	uint16_t q_base = vsi->base_queue;
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -1468,7 +1512,7 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 		/* Init the RX tail regieter. */
 		I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
 
-		err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, TRUE);
+		err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
@@ -1485,16 +1529,18 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 int
 i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-	struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
 	struct i40e_rx_queue *rxq;
 	int err;
-	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
-	uint16_t q_base = vsi->base_queue;
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if (rx_queue_id < dev->data->nb_rx_queues) {
 		rxq = dev->data->rx_queues[rx_queue_id];
 
-		err = i40e_switch_rx_queue(hw, rx_queue_id + q_base, FALSE);
+		/*
+		* rx_queue_id is queue id aplication refers to, while
+		* rxq->reg_idx is the real queue index.
+		*/
+		err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -1511,15 +1557,20 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 int
 i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-	struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
 	int err = -1;
-	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
-	uint16_t q_base = vsi->base_queue;
+	struct i40e_tx_queue *txq;
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	PMD_INIT_FUNC_TRACE();
 
 	if (tx_queue_id < dev->data->nb_tx_queues) {
-		err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, TRUE);
+		txq = dev->data->tx_queues[tx_queue_id];
+
+		/*
+		* tx_queue_id is queue id aplication refers to, while
+		* rxq->reg_idx is the real queue index.
+		*/
+		err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
 		if (err)
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
 				    tx_queue_id);
@@ -1531,16 +1582,18 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 int
 i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-	struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
 	struct i40e_tx_queue *txq;
 	int err;
-	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
-	uint16_t q_base = vsi->base_queue;
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	if (tx_queue_id < dev->data->nb_tx_queues) {
 		txq = dev->data->tx_queues[tx_queue_id];
 
-		err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, FALSE);
+		/*
+		* tx_queue_id is queue id aplication refers to, while
+		* txq->reg_idx is the real queue index.
+		*/
+		err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
 
 		if (err) {
 			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
@@ -1563,14 +1616,23 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 			const struct rte_eth_rxconf *rx_conf,
 			struct rte_mempool *mp)
 {
-	struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
+	struct i40e_vsi *vsi;
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct i40e_rx_queue *rxq;
 	const struct rte_memzone *rz;
 	uint32_t ring_size;
 	uint16_t len;
 	int use_def_burst_func = 1;
 
-	if (!vsi || queue_idx >= vsi->nb_qps) {
+	if (hw->mac.type == I40E_MAC_VF) {
+		struct i40e_vf *vf =
+			I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+		vsi = &vf->vsi;
+	} else
+		vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+
+	if (vsi == NULL) {
 		PMD_DRV_LOG(ERR, "VSI not available or queue "
 			    "index exceeds the maximum");
 		return I40E_ERR_PARAM;
@@ -1603,7 +1665,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	rxq->nb_rx_desc = nb_desc;
 	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 	rxq->queue_id = queue_idx;
-	rxq->reg_idx = vsi->base_queue + queue_idx;
+	if (hw->mac.type == I40E_MAC_VF)
+		rxq->reg_idx = queue_idx;
+	else /* PF device */
+		rxq->reg_idx = vsi->base_queue +
+			i40e_get_queue_offset_by_qindex(pf, queue_idx);
+
 	rxq->port_id = dev->data->port_id;
 	rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
 							0 : ETHER_CRC_LEN);
@@ -1761,13 +1828,22 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			unsigned int socket_id,
 			const struct rte_eth_txconf *tx_conf)
 {
-	struct i40e_vsi *vsi = I40E_DEV_PRIVATE_TO_VSI(dev->data->dev_private);
+	struct i40e_vsi *vsi;
+	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct i40e_tx_queue *txq;
 	const struct rte_memzone *tz;
 	uint32_t ring_size;
 	uint16_t tx_rs_thresh, tx_free_thresh;
 
-	if (!vsi || queue_idx >= vsi->nb_qps) {
+	if (hw->mac.type == I40E_MAC_VF) {
+		struct i40e_vf *vf =
+			I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+		vsi = &vf->vsi;
+	} else
+		vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+
+	if (vsi == NULL) {
 		PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
 			    "exceeds the maximum", queue_idx);
 		return I40E_ERR_PARAM;
@@ -1891,7 +1967,12 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->hthresh = tx_conf->tx_thresh.hthresh;
 	txq->wthresh = tx_conf->tx_thresh.wthresh;
 	txq->queue_id = queue_idx;
-	txq->reg_idx = vsi->base_queue + queue_idx;
+	if (hw->mac.type == I40E_MAC_VF)
+		txq->reg_idx = queue_idx;
+	else /* PF device */
+		txq->reg_idx = vsi->base_queue +
+			i40e_get_queue_offset_by_qindex(pf, queue_idx);
+
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
 	txq->vsi = vsi;
-- 
1.7.7.6

  parent reply	other threads:[~2014-09-23 13:11 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-23 13:14 [dpdk-dev] [PATCH 0/6] i40e VMDQ support Chen Jing D(Mark)
2014-09-23 13:14 ` [dpdk-dev] [PATCH 1/6] ether: enhancement for " Chen Jing D(Mark)
2014-10-14 14:09   ` Thomas Monjalon
2014-10-15  6:59     ` Chen, Jing D
2014-10-15  8:10       ` Thomas Monjalon
2014-10-15  9:47         ` Chen, Jing D
2014-10-15  9:59           ` Thomas Monjalon
2014-10-16 10:07   ` [dpdk-dev] [PATCH v2 0/6] i40e " Chen Jing D(Mark)
2014-10-16 10:07     ` [dpdk-dev] [PATCH v2 1/6] ether: enhancement for " Chen Jing D(Mark)
2014-11-03 22:17       ` Thomas Monjalon
2014-11-04  5:50         ` Chen, Jing D
2014-11-04  8:53           ` Thomas Monjalon
2014-11-04  8:59             ` Chen, Jing D
2014-10-16 10:07     ` [dpdk-dev] [PATCH v2 2/6] igb: change for VMDQ arguments expansion Chen Jing D(Mark)
2014-11-03 18:37       ` Thomas Monjalon
2014-11-04  5:26         ` Chen, Jing D
2014-10-16 10:07     ` [dpdk-dev] [PATCH v2 3/6] ixgbe: " Chen Jing D(Mark)
2014-10-16 10:07     ` [dpdk-dev] [PATCH v2 4/6] i40e: add VMDQ support Chen Jing D(Mark)
2014-11-03 18:33       ` Thomas Monjalon
2014-11-04  5:22         ` Chen, Jing D
2014-10-16 10:07     ` [dpdk-dev] [PATCH v2 5/6] i40e: macaddr add/del enhancement Chen Jing D(Mark)
2014-10-16 10:07     ` [dpdk-dev] [PATCH v2 6/6] i40e: Add full VMDQ pools support Chen Jing D(Mark)
2014-10-21  3:30     ` [dpdk-dev] [PATCH v2 0/6] i40e VMDQ support Cao, Min
2014-11-03  7:54     ` Chen, Jing D
2014-11-04 10:01   ` [dpdk-dev] [PATCH v3 " Chen Jing D(Mark)
2014-11-04 10:01     ` [dpdk-dev] [PATCH v3 1/6] ether: enhancement for " Chen Jing D(Mark)
2014-11-04 10:01     ` [dpdk-dev] [PATCH v3 2/6] igb: change for VMDQ arguments expansion Chen Jing D(Mark)
2014-11-04 10:01     ` [dpdk-dev] [PATCH v3 3/6] ixgbe: " Chen Jing D(Mark)
2014-11-04 10:01     ` [dpdk-dev] [PATCH v3 4/6] i40e: add VMDQ support Chen Jing D(Mark)
2014-11-04 10:01     ` [dpdk-dev] [PATCH v3 5/6] i40e: macaddr add/del enhancement Chen Jing D(Mark)
2014-11-04 10:01     ` [dpdk-dev] [PATCH v3 6/6] i40e: Add full VMDQ pools support Chen Jing D(Mark)
2014-11-04 11:19     ` [dpdk-dev] [PATCH v3 0/6] i40e VMDQ support Ananyev, Konstantin
2014-11-04 23:17       ` Thomas Monjalon
2014-12-11  6:09     ` Cao, Min
2014-09-23 13:14 ` [dpdk-dev] [PATCH 2/6] igb: change for VMDQ arguments expansion Chen Jing D(Mark)
2014-09-23 13:14 ` [dpdk-dev] [PATCH 3/6] ixgbe: " Chen Jing D(Mark)
2014-09-23 13:14 ` [dpdk-dev] [PATCH 4/6] i40e: add VMDQ support Chen Jing D(Mark)
2014-10-13 16:14   ` De Lara Guarch, Pablo
2014-09-23 13:14 ` [dpdk-dev] [PATCH 5/6] i40e: macaddr add/del enhancement Chen Jing D(Mark)
2014-10-14 14:25   ` Thomas Monjalon
2014-10-15  7:01     ` Chen, Jing D
2014-09-23 13:14 ` Chen Jing D(Mark) [this message]
2014-10-10 10:45 ` [dpdk-dev] [PATCH 0/6] i40e VMDQ support Ananyev, Konstantin
2014-10-14  8:27 ` Chen, Jing D
2014-10-21  3:30 ` Cao, Min

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1411478047-1251-7-git-send-email-jing.d.chen@intel.com \
    --to=jing.d.chen@intel.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).