DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
@ 2015-01-12 14:43 Michal Jastrzebski
  2015-01-12 15:46 ` Jastrzebski, MichalX K
  2015-01-13 10:08 ` Vlad Zolotarov
  0 siblings, 2 replies; 10+ messages in thread
From: Michal Jastrzebski @ 2015-01-12 14:43 UTC (permalink / raw)
  To: dev

Date: Mon, 12 Jan 2015 15:39:40 +0100
Message-Id: <1421073581-6644-2-git-send-email-michalx.k.jastrzebski@intel.com>
X-Mailer: git-send-email 2.1.1
In-Reply-To: <1421073581-6644-1-git-send-email-michalx.k.jastrzebski@intel.com>
References: <1421073581-6644-1-git-send-email-michalx.k.jastrzebski@intel.com>

From: Pawel Wodkowski <pawelx.wodkowski@intel.com>


This patch add support for DCB in SRIOV mode. When no PFC

is enabled this feature might be used as multiple queues

(up to 8 or 4) for VF.



It incorporate following modifications:

 - Allow zero rx/tx queues to be passed to rte_eth_dev_configure().

   Rationale:

   in SRIOV mode PF use first free VF to RX/TX. If VF count

   is 16 or 32 all recources are assigned to VFs so PF can

   be used only for configuration.

 - split nb_q_per_pool to nb_rx_q_per_pool and nb_tx_q_per_pool

   Rationale:

   rx and tx number of queue might be different if RX and TX are

   configured in different mode. This allow to inform VF about

   proper number of queues.

 - extern mailbox API for DCB mode



Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>

---

 lib/librte_ether/rte_ethdev.c       |   84 +++++++++++++++++++++---------

 lib/librte_ether/rte_ethdev.h       |    5 +-

 lib/librte_pmd_e1000/igb_pf.c       |    3 +-

 lib/librte_pmd_ixgbe/ixgbe_ethdev.c |   10 ++--

 lib/librte_pmd_ixgbe/ixgbe_ethdev.h |    1 +

 lib/librte_pmd_ixgbe/ixgbe_pf.c     |   98 ++++++++++++++++++++++++++++++-----

 lib/librte_pmd_ixgbe/ixgbe_rxtx.c   |    7 ++-

 7 files changed, 159 insertions(+), 49 deletions(-)



diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c

index 95f2ceb..4c1a494 100644

--- a/lib/librte_ether/rte_ethdev.c

+++ b/lib/librte_ether/rte_ethdev.c

@@ -333,7 +333,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)

 		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",

 				sizeof(dev->data->rx_queues[0]) * nb_queues,

 				RTE_CACHE_LINE_SIZE);

-		if (dev->data->rx_queues == NULL) {

+		if (dev->data->rx_queues == NULL && nb_queues > 0) {

 			dev->data->nb_rx_queues = 0;

 			return -(ENOMEM);

 		}

@@ -475,7 +475,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)

 		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",

 				sizeof(dev->data->tx_queues[0]) * nb_queues,

 				RTE_CACHE_LINE_SIZE);

-		if (dev->data->tx_queues == NULL) {

+		if (dev->data->tx_queues == NULL && nb_queues > 0) {

 			dev->data->nb_tx_queues = 0;

 			return -(ENOMEM);

 		}

@@ -507,6 +507,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,

 		      const struct rte_eth_conf *dev_conf)

 {

 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];

+	struct rte_eth_dev_info dev_info;

 

 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {

 		/* check multi-queue mode */

@@ -524,11 +525,33 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,

 			return (-EINVAL);

 		}

 

+		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) &&

+			(dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB)) {

+			enum rte_eth_nb_pools rx_pools =

+						dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools;

+			enum rte_eth_nb_pools tx_pools =

+						dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools;

+

+			if (rx_pools != tx_pools) {

+				/* Only equal number of pools is supported when

+				 * DCB+VMDq in SRIOV */

+				PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8

+						" SRIOV active, DCB+VMDQ mode, "

+						"number of rx and tx pools is not eqaul\n",

+						port_id);

+				return (-EINVAL);

+			}

+		}

+

+		uint16_t nb_rx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool;

+		uint16_t nb_tx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;

+

 		switch (dev_conf->rxmode.mq_mode) {

-		case ETH_MQ_RX_VMDQ_RSS:

 		case ETH_MQ_RX_VMDQ_DCB:

+			break;

+		case ETH_MQ_RX_VMDQ_RSS:

 		case ETH_MQ_RX_VMDQ_DCB_RSS:

-			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */

+			/* RSS, DCB+RSS VMDQ in SRIOV mode, not implement yet */

 			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8

 					" SRIOV active, "

 					"unsupported VMDQ mq_mode rx %u\n",

@@ -537,37 +560,32 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,

 		default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */

 			/* if nothing mq mode configure, use default scheme */

 			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;

-			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)

-				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;

+			if (nb_rx_q_per_pool > 1)

+				nb_rx_q_per_pool = 1;

 			break;

 		}

 

 		switch (dev_conf->txmode.mq_mode) {

-		case ETH_MQ_TX_VMDQ_DCB:

-			/* DCB VMDQ in SRIOV mode, not implement yet */

-			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8

-					" SRIOV active, "

-					"unsupported VMDQ mq_mode tx %u\n",

-					port_id, dev_conf->txmode.mq_mode);

-			return (-EINVAL);

+		case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV mode*/

+			break;

 		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */

 			/* if nothing mq mode configure, use default scheme */

 			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;

-			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)

-				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;

+			if (nb_tx_q_per_pool > 1)

+				nb_tx_q_per_pool = 1;

 			break;

 		}

 

 		/* check valid queue number */

-		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||

-		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {

+		if (nb_rx_q > nb_rx_q_per_pool || nb_tx_q > nb_tx_q_per_pool) {

 			PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "

-				    "queue number must less equal to %d\n",

-					port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);

+				    "rx/tx queue number must less equal to %d/%d\n",

+					port_id, RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool,

+					RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool);

 			return (-EINVAL);

 		}

 	} else {

-		/* For vmdb+dcb mode check our configuration before we go further */

+		/* For vmdq+dcb mode check our configuration before we go further */

 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {

 			const struct rte_eth_vmdq_dcb_conf *conf;

 

@@ -606,11 +624,20 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,

 			}

 		}

 

+		/* For DCB we need to obtain maximum number of queues dinamically,

+		 * as this depends on max VF exported in PF */

+		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||

+			(dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {

+

+				FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);

+				(*dev->dev_ops->dev_infos_get)(dev, &dev_info);

+		}

+

 		/* For DCB mode check our configuration before we go further */

 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {

 			const struct rte_eth_dcb_rx_conf *conf;

 

-			if (nb_rx_q != ETH_DCB_NUM_QUEUES) {

+			if (nb_rx_q != dev_info.max_rx_queues) {

 				PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "

 						"!= %d\n",

 						port_id, ETH_DCB_NUM_QUEUES);

@@ -630,7 +657,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,

 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {

 			const struct rte_eth_dcb_tx_conf *conf;

 

-			if (nb_tx_q != ETH_DCB_NUM_QUEUES) {

+			if (nb_tx_q != dev_info.max_tx_queues) {

 				PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "

 						"!= %d\n",

 						port_id, ETH_DCB_NUM_QUEUES);

@@ -690,7 +717,10 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,

 	}

 	if (nb_rx_q == 0) {

 		PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);

-		return (-EINVAL);

+		/* In SRIOV there can be no free resource for PF. So permit use only

+		 * for configuration. */

+		if (RTE_ETH_DEV_SRIOV(dev).active == 0)

+			return (-EINVAL);

 	}

 

 	if (nb_tx_q > dev_info.max_tx_queues) {

@@ -698,9 +728,13 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,

 				port_id, nb_tx_q, dev_info.max_tx_queues);

 		return (-EINVAL);

 	}

+

 	if (nb_tx_q == 0) {

 		PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);

-		return (-EINVAL);

+		/* In SRIOV there can be no free resource for PF. So permit use only

+		 * for configuration. */

+		if (RTE_ETH_DEV_SRIOV(dev).active == 0)

+			return (-EINVAL);

 	}

 

 	/* Copy the dev_conf parameter into the dev structure */

@@ -750,7 +784,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,

 							ETHER_MAX_LEN;

 	}

 

-	/* multipe queue mode checking */

+	/* multiple queue mode checking */

 	diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);

 	if (diag != 0) {

 		PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",

diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h

index ce0528f..04fda83 100644

--- a/lib/librte_ether/rte_ethdev.h

+++ b/lib/librte_ether/rte_ethdev.h

@@ -299,7 +299,7 @@ enum rte_eth_rx_mq_mode {

 enum rte_eth_tx_mq_mode {

 	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */

 	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */

-	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */

+	ETH_MQ_TX_VMDQ_DCB,     /**< For TX side,both DCB and VT is on. */

 	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */

 };

 

@@ -1569,7 +1569,8 @@ struct rte_eth_dev {

 

 struct rte_eth_dev_sriov {

 	uint8_t active;               /**< SRIOV is active with 16, 32 or 64 pools */

-	uint8_t nb_q_per_pool;        /**< rx queue number per pool */

+	uint8_t nb_rx_q_per_pool;        /**< rx queue number per pool */

+	uint8_t nb_tx_q_per_pool;        /**< tx queue number per pool */

 	uint16_t def_vmdq_idx;        /**< Default pool num used for PF */

 	uint16_t def_pool_q_idx;      /**< Default pool queue start reg index */

 };

diff --git a/lib/librte_pmd_e1000/igb_pf.c b/lib/librte_pmd_e1000/igb_pf.c

index bc3816a..9d2f858 100644

--- a/lib/librte_pmd_e1000/igb_pf.c

+++ b/lib/librte_pmd_e1000/igb_pf.c

@@ -115,7 +115,8 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)

 		rte_panic("Cannot allocate memory for private VF data\n");

 

 	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;

-	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;

+	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;

+	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;

 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;

 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);

 

diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c

index 3fc3738..347f03c 100644

--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c

+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c

@@ -3555,14 +3555,14 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,

 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);

 	struct ixgbe_vf_info *vfinfo =

 		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));

-	uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;

+	uint8_t  nb_tx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;

 	uint32_t queue_stride =

 		IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;

 	uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;

-	uint32_t queue_end = queue_idx + nb_q_per_pool - 1;

+	uint32_t tx_queue_end = queue_idx + nb_tx_q_per_pool - 1;

 	uint16_t total_rate = 0;

 

-	if (queue_end >= hw->mac.max_tx_queues)

+	if (tx_queue_end >= hw->mac.max_tx_queues)

 		return -EINVAL;

 

 	if (vfinfo != NULL) {

@@ -3577,7 +3577,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,

 		return -EINVAL;

 

 	/* Store tx_rate for this vf. */

-	for (idx = 0; idx < nb_q_per_pool; idx++) {

+	for (idx = 0; idx < nb_tx_q_per_pool; idx++) {

 		if (((uint64_t)0x1 << idx) & q_msk) {

 			if (vfinfo[vf].tx_rate[idx] != tx_rate)

 				vfinfo[vf].tx_rate[idx] = tx_rate;

@@ -3595,7 +3595,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,

 	}

 

 	/* Set RTTBCNRC of each queue/pool for vf X  */

-	for (; queue_idx <= queue_end; queue_idx++) {

+	for (; queue_idx <= tx_queue_end; queue_idx++) {

 		if (0x1 & q_msk)

 			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);

 		q_msk = q_msk >> 1;

diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h

index ca99170..ebf16e9 100644

--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h

+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h

@@ -159,6 +159,7 @@ struct ixgbe_vf_info {

 	uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];

 	uint16_t vlan_count;

 	uint8_t spoofchk_enabled;

+	unsigned int vf_api;

 };

 

 /*

diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c

index 51da1fd..4d30bcf 100644

--- a/lib/librte_pmd_ixgbe/ixgbe_pf.c

+++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c

@@ -127,7 +127,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)

 		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;

 	}

 

-	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;

+	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;

+	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;

 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;

 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);

 

@@ -189,7 +190,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)

 	hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);

 

 	/*

-	 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode

+	 * SW must set GCR_EXT.VT_Mode the same as GPIE.VT_Mode

 	 */

 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);

 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;

@@ -214,19 +215,19 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)

 	}

 

 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);

-        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);

+	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);

 

-        /*

+	/*

 	 * enable vlan filtering and allow all vlan tags through

 	 */

-        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);

-        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */

-        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);

+	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);

+	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */

+	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);

 

-        /* VFTA - enable all vlan filters */

-        for (i = 0; i < IXGBE_MAX_VFTA; i++) {

-                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);

-        }

+	/* VFTA - enable all vlan filters */

+	for (i = 0; i < IXGBE_MAX_VFTA; i++) {

+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);

+	}

 

 	/* Enable MAC Anti-Spoofing */

 	hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);

@@ -369,6 +370,73 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)

 }

 

 static int

+ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)

+{

+	struct ixgbe_vf_info *vfinfo =

+		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));

+	int api = msgbuf[1];

+

+	switch (api) {

+	case ixgbe_mbox_api_10:

+	case ixgbe_mbox_api_11:

+		vfinfo[vf].vf_api = api;

+		return 0;

+	default:

+		break;

+	}

+

+	RTE_LOG(DEBUG, PMD, "VF %d requested invalid api version %u\n", vf, api);

+	return -1;

+}

+

+static int

+ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)

+{

+	struct ixgbe_vf_info *vfinfo =

+		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));

+	struct ixgbe_dcb_config *dcb_cfg =

+			IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);

+

+	uint8_t num_tcs = dcb_cfg->num_tcs.pg_tcs;

+

+	/* verify the PF is supporting the correct APIs */

+	switch (vfinfo[vf].vf_api) {

+	case ixgbe_mbox_api_10:

+	case ixgbe_mbox_api_11:

+		break;

+	default:

+		return -1;

+	}

+

+	if (RTE_ETH_DEV_SRIOV(dev).active) {

+		if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB)

+			msgbuf[IXGBE_VF_TX_QUEUES] = num_tcs;

+		else

+			msgbuf[IXGBE_VF_TX_QUEUES] = 1;

+

+		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB)

+			msgbuf[IXGBE_VF_RX_QUEUES] = num_tcs;

+		else

+			msgbuf[IXGBE_VF_RX_QUEUES] = 1;

+	}	else {

+		/* only allow 1 Tx queue for bandwidth limiting */

+		msgbuf[IXGBE_VF_TX_QUEUES] = 1;

+		msgbuf[IXGBE_VF_RX_QUEUES] = 1;

+	}

+

+	/* notify VF of need for VLAN tag stripping, and correct queue */

+	if (num_tcs)

+		msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;

+	else

+		msgbuf[IXGBE_VF_TRANS_VLAN] = 0;

+

+	/* notify VF of default queue */

+	msgbuf[IXGBE_VF_DEF_QUEUE] = 0;

+

+	return 0;

+}

+

+static int

 ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)

 {

 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);

@@ -512,6 +580,12 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)

 	case IXGBE_VF_SET_VLAN:

 		retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);

 		break;

+	case IXGBE_VF_API_NEGOTIATE:

+		retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);

+		break;

+	case IXGBE_VF_GET_QUEUES:

+		retval = ixgbe_get_vf_queues(dev, vf, msgbuf);

+		break;

 	default:

 		PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);

 		retval = IXGBE_ERR_MBX;

@@ -526,7 +600,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)

 

 	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;

 

-	ixgbe_write_mbx(hw, msgbuf, 1, vf);

+	ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);

 

 	return retval;

 }

diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c

index e10d6a2..49b44fe 100644

--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c

+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c

@@ -3166,10 +3166,9 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)

 

 	/* check support mq_mode for DCB */

 	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&

-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))

-		return;

-

-	if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)

+	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&

+	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_VMDQ_DCB) &&

+	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_DCB))

 		return;

 

 	/** Configure DCB hardware **/

-- 

1.7.9.5

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
  2015-01-12 14:43 [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe Michal Jastrzebski
@ 2015-01-12 15:46 ` Jastrzebski, MichalX K
  2015-01-13 10:02   ` Vlad Zolotarov
  2015-01-13 10:08 ` Vlad Zolotarov
  1 sibling, 1 reply; 10+ messages in thread
From: Jastrzebski, MichalX K @ 2015-01-12 15:46 UTC (permalink / raw)
  To: Jastrzebski, MichalX K, dev

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Michal Jastrzebski
> Sent: Monday, January 12, 2015 3:43 PM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
> 
> Date: Mon, 12 Jan 2015 15:39:40 +0100
> Message-Id: <1421073581-6644-2-git-send-email-
> michalx.k.jastrzebski@intel.com>
> X-Mailer: git-send-email 2.1.1
> In-Reply-To: <1421073581-6644-1-git-send-email-
> michalx.k.jastrzebski@intel.com>
> References: <1421073581-6644-1-git-send-email-
> michalx.k.jastrzebski@intel.com>
> 
> From: Pawel Wodkowski <pawelx.wodkowski@intel.com>
> 
> 
> This patch add support for DCB in SRIOV mode. When no PFC
> 
> is enabled this feature might be used as multiple queues
> 
> (up to 8 or 4) for VF.
> 
> 
> 
> It incorporate following modifications:
> 
>  - Allow zero rx/tx queues to be passed to rte_eth_dev_configure().
> 
>    Rationale:
> 
>    in SRIOV mode PF use first free VF to RX/TX. If VF count
> 
>    is 16 or 32 all recources are assigned to VFs so PF can
> 
>    be used only for configuration.
> 
>  - split nb_q_per_pool to nb_rx_q_per_pool and nb_tx_q_per_pool
> 
>    Rationale:
> 
>    rx and tx number of queue might be different if RX and TX are
> 
>    configured in different mode. This allow to inform VF about
> 
>    proper number of queues.
> 
>  - extern mailbox API for DCB mode
> 
> 
> 
> Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
> 
> ---
> 
>  lib/librte_ether/rte_ethdev.c       |   84 +++++++++++++++++++++---------
> 
>  lib/librte_ether/rte_ethdev.h       |    5 +-
> 
>  lib/librte_pmd_e1000/igb_pf.c       |    3 +-
> 
>  lib/librte_pmd_ixgbe/ixgbe_ethdev.c |   10 ++--
> 
>  lib/librte_pmd_ixgbe/ixgbe_ethdev.h |    1 +
> 
>  lib/librte_pmd_ixgbe/ixgbe_pf.c     |   98 ++++++++++++++++++++++++++++++--
> ---
> 
>  lib/librte_pmd_ixgbe/ixgbe_rxtx.c   |    7 ++-
> 
>  7 files changed, 159 insertions(+), 49 deletions(-)
> 
> 
> 
> diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
> 
> index 95f2ceb..4c1a494 100644
> 
> --- a/lib/librte_ether/rte_ethdev.c
> 
> +++ b/lib/librte_ether/rte_ethdev.c
> 
> @@ -333,7 +333,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev
> *dev, uint16_t nb_queues)
> 
>  		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
> 
>  				sizeof(dev->data->rx_queues[0]) * nb_queues,
> 
>  				RTE_CACHE_LINE_SIZE);
> 
> -		if (dev->data->rx_queues == NULL) {
> 
> +		if (dev->data->rx_queues == NULL && nb_queues > 0) {
> 
>  			dev->data->nb_rx_queues = 0;
> 
>  			return -(ENOMEM);
> 
>  		}
> 
> @@ -475,7 +475,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev
> *dev, uint16_t nb_queues)
> 
>  		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
> 
>  				sizeof(dev->data->tx_queues[0]) * nb_queues,
> 
>  				RTE_CACHE_LINE_SIZE);
> 
> -		if (dev->data->tx_queues == NULL) {
> 
> +		if (dev->data->tx_queues == NULL && nb_queues > 0) {
> 
>  			dev->data->nb_tx_queues = 0;
> 
>  			return -(ENOMEM);
> 
>  		}
> 
> @@ -507,6 +507,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
> uint16_t nb_rx_q, uint16_t nb_tx_q,
> 
>  		      const struct rte_eth_conf *dev_conf)
> 
>  {
> 
>  	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
> 
> +	struct rte_eth_dev_info dev_info;
> 
> 
> 
>  	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
> 
>  		/* check multi-queue mode */
> 
> @@ -524,11 +525,33 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
> uint16_t nb_rx_q, uint16_t nb_tx_q,
> 
>  			return (-EINVAL);
> 
>  		}
> 
> 
> 
> +		if ((dev_conf->rxmode.mq_mode ==
> ETH_MQ_RX_VMDQ_DCB) &&
> 
> +			(dev_conf->txmode.mq_mode ==
> ETH_MQ_TX_VMDQ_DCB)) {
> 
> +			enum rte_eth_nb_pools rx_pools =
> 
> +						dev_conf-
> >rx_adv_conf.vmdq_dcb_conf.nb_queue_pools;
> 
> +			enum rte_eth_nb_pools tx_pools =
> 
> +						dev_conf-
> >tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools;
> 
> +
> 
> +			if (rx_pools != tx_pools) {
> 
> +				/* Only equal number of pools is supported
> when
> 
> +				 * DCB+VMDq in SRIOV */
> 
> +				PMD_DEBUG_TRACE("ethdev port_id=%"
> PRIu8
> 
> +						" SRIOV active, DCB+VMDQ
> mode, "
> 
> +						"number of rx and tx pools is
> not eqaul\n",
> 
> +						port_id);
> 
> +				return (-EINVAL);
> 
> +			}
> 
> +		}
> 
> +
> 
> +		uint16_t nb_rx_q_per_pool =
> RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool;
> 
> +		uint16_t nb_tx_q_per_pool =
> RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
> 
> +
> 
>  		switch (dev_conf->rxmode.mq_mode) {
> 
> -		case ETH_MQ_RX_VMDQ_RSS:
> 
>  		case ETH_MQ_RX_VMDQ_DCB:
> 
> +			break;
> 
> +		case ETH_MQ_RX_VMDQ_RSS:
> 
>  		case ETH_MQ_RX_VMDQ_DCB_RSS:
> 
> -			/* DCB/RSS VMDQ in SRIOV mode, not implement yet
> */
> 
> +			/* RSS, DCB+RSS VMDQ in SRIOV mode, not
> implement yet */
> 
>  			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
> 
>  					" SRIOV active, "
> 
>  					"unsupported VMDQ mq_mode rx
> %u\n",
> 
> @@ -537,37 +560,32 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
> uint16_t nb_rx_q, uint16_t nb_tx_q,
> 
>  		default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE
> */
> 
>  			/* if nothing mq mode configure, use default scheme
> */
> 
>  			dev->data->dev_conf.rxmode.mq_mode =
> ETH_MQ_RX_VMDQ_ONLY;
> 
> -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
> 
> -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
> 1;
> 
> +			if (nb_rx_q_per_pool > 1)
> 
> +				nb_rx_q_per_pool = 1;
> 
>  			break;
> 
>  		}
> 
> 
> 
>  		switch (dev_conf->txmode.mq_mode) {
> 
> -		case ETH_MQ_TX_VMDQ_DCB:
> 
> -			/* DCB VMDQ in SRIOV mode, not implement yet */
> 
> -			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
> 
> -					" SRIOV active, "
> 
> -					"unsupported VMDQ mq_mode tx
> %u\n",
> 
> -					port_id, dev_conf-
> >txmode.mq_mode);
> 
> -			return (-EINVAL);
> 
> +		case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV
> mode*/
> 
> +			break;
> 
>  		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE
> */
> 
>  			/* if nothing mq mode configure, use default scheme
> */
> 
>  			dev->data->dev_conf.txmode.mq_mode =
> ETH_MQ_TX_VMDQ_ONLY;
> 
> -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
> 
> -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
> 1;
> 
> +			if (nb_tx_q_per_pool > 1)
> 
> +				nb_tx_q_per_pool = 1;
> 
>  			break;
> 
>  		}
> 
> 
> 
>  		/* check valid queue number */
> 
> -		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
> 
> -		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
> 
> +		if (nb_rx_q > nb_rx_q_per_pool || nb_tx_q >
> nb_tx_q_per_pool) {
> 
>  			PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV
> active, "
> 
> -				    "queue number must less equal to %d\n",
> 
> -					port_id,
> RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
> 
> +				    "rx/tx queue number must less equal to
> %d/%d\n",
> 
> +					port_id,
> RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool,
> 
> +
> 	RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool);
> 
>  			return (-EINVAL);
> 
>  		}
> 
>  	} else {
> 
> -		/* For vmdb+dcb mode check our configuration before we go
> further */
> 
> +		/* For vmdq+dcb mode check our configuration before we go
> further */
> 
>  		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB)
> {
> 
>  			const struct rte_eth_vmdq_dcb_conf *conf;
> 
> 
> 
> @@ -606,11 +624,20 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
> uint16_t nb_rx_q, uint16_t nb_tx_q,
> 
>  			}
> 
>  		}
> 
> 
> 
> +		/* For DCB we need to obtain maximum number of queues
> dinamically,
> 
> +		 * as this depends on max VF exported in PF */
> 
> +		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
> 
> +			(dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB))
> {
> 
> +
> 
> +				FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >dev_infos_get, -ENOTSUP);
> 
> +				(*dev->dev_ops->dev_infos_get)(dev,
> &dev_info);
> 
> +		}
> 
> +
> 
>  		/* For DCB mode check our configuration before we go further
> */
> 
>  		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
> 
>  			const struct rte_eth_dcb_rx_conf *conf;
> 
> 
> 
> -			if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
> 
> +			if (nb_rx_q != dev_info.max_rx_queues) {
> 
>  				PMD_DEBUG_TRACE("ethdev port_id=%d
> DCB, nb_rx_q "
> 
>  						"!= %d\n",
> 
>  						port_id,
> ETH_DCB_NUM_QUEUES);
> 
> @@ -630,7 +657,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
> uint16_t nb_rx_q, uint16_t nb_tx_q,
> 
>  		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
> 
>  			const struct rte_eth_dcb_tx_conf *conf;
> 
> 
> 
> -			if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
> 
> +			if (nb_tx_q != dev_info.max_tx_queues) {
> 
>  				PMD_DEBUG_TRACE("ethdev port_id=%d
> DCB, nb_tx_q "
> 
>  						"!= %d\n",
> 
>  						port_id,
> ETH_DCB_NUM_QUEUES);
> 
> @@ -690,7 +717,10 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t
> nb_rx_q, uint16_t nb_tx_q,
> 
>  	}
> 
>  	if (nb_rx_q == 0) {
> 
>  		PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n",
> port_id);
> 
> -		return (-EINVAL);
> 
> +		/* In SRIOV there can be no free resource for PF. So permit use
> only
> 
> +		 * for configuration. */
> 
> +		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
> 
> +			return (-EINVAL);
> 
>  	}
> 
> 
> 
>  	if (nb_tx_q > dev_info.max_tx_queues) {
> 
> @@ -698,9 +728,13 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t
> nb_rx_q, uint16_t nb_tx_q,
> 
>  				port_id, nb_tx_q, dev_info.max_tx_queues);
> 
>  		return (-EINVAL);
> 
>  	}
> 
> +
> 
>  	if (nb_tx_q == 0) {
> 
>  		PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n",
> port_id);
> 
> -		return (-EINVAL);
> 
> +		/* In SRIOV there can be no free resource for PF. So permit use
> only
> 
> +		 * for configuration. */
> 
> +		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
> 
> +			return (-EINVAL);
> 
>  	}
> 
> 
> 
>  	/* Copy the dev_conf parameter into the dev structure */
> 
> @@ -750,7 +784,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t
> nb_rx_q, uint16_t nb_tx_q,
> 
>  							ETHER_MAX_LEN;
> 
>  	}
> 
> 
> 
> -	/* multipe queue mode checking */
> 
> +	/* multiple queue mode checking */
> 
>  	diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q,
> dev_conf);
> 
>  	if (diag != 0) {
> 
>  		PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode =
> %d\n",
> 
> diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
> 
> index ce0528f..04fda83 100644
> 
> --- a/lib/librte_ether/rte_ethdev.h
> 
> +++ b/lib/librte_ether/rte_ethdev.h
> 
> @@ -299,7 +299,7 @@ enum rte_eth_rx_mq_mode {
> 
>  enum rte_eth_tx_mq_mode {
> 
>  	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
> 
>  	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
> 
> -	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is
> on. */
> 
> +	ETH_MQ_TX_VMDQ_DCB,     /**< For TX side,both DCB and VT is on.
> */
> 
>  	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
> 
>  };
> 
> 
> 
> @@ -1569,7 +1569,8 @@ struct rte_eth_dev {
> 
> 
> 
>  struct rte_eth_dev_sriov {
> 
>  	uint8_t active;               /**< SRIOV is active with 16, 32 or 64 pools */
> 
> -	uint8_t nb_q_per_pool;        /**< rx queue number per pool */
> 
> +	uint8_t nb_rx_q_per_pool;        /**< rx queue number per pool */
> 
> +	uint8_t nb_tx_q_per_pool;        /**< tx queue number per pool */
> 
>  	uint16_t def_vmdq_idx;        /**< Default pool num used for PF */
> 
>  	uint16_t def_pool_q_idx;      /**< Default pool queue start reg index */
> 
>  };
> 
> diff --git a/lib/librte_pmd_e1000/igb_pf.c b/lib/librte_pmd_e1000/igb_pf.c
> 
> index bc3816a..9d2f858 100644
> 
> --- a/lib/librte_pmd_e1000/igb_pf.c
> 
> +++ b/lib/librte_pmd_e1000/igb_pf.c
> 
> @@ -115,7 +115,8 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
> 
>  		rte_panic("Cannot allocate memory for private VF data\n");
> 
> 
> 
>  	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
> 
> -	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
> 
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
> 
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
> 
>  	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
> 
>  	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num *
> nb_queue);
> 
> 
> 
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> 
> index 3fc3738..347f03c 100644
> 
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> 
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> 
> @@ -3555,14 +3555,14 @@ static int ixgbe_set_vf_rate_limit(struct
> rte_eth_dev *dev, uint16_t vf,
> 
>  	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> 
>  	struct ixgbe_vf_info *vfinfo =
> 
>  		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data-
> >dev_private));
> 
> -	uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
> 
> +	uint8_t  nb_tx_q_per_pool =
> RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
> 
>  	uint32_t queue_stride =
> 
>  		IXGBE_MAX_RX_QUEUE_NUM /
> RTE_ETH_DEV_SRIOV(dev).active;
> 
>  	uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
> 
> -	uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
> 
> +	uint32_t tx_queue_end = queue_idx + nb_tx_q_per_pool - 1;
> 
>  	uint16_t total_rate = 0;
> 
> 
> 
> -	if (queue_end >= hw->mac.max_tx_queues)
> 
> +	if (tx_queue_end >= hw->mac.max_tx_queues)
> 
>  		return -EINVAL;
> 
> 
> 
>  	if (vfinfo != NULL) {
> 
> @@ -3577,7 +3577,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev
> *dev, uint16_t vf,
> 
>  		return -EINVAL;
> 
> 
> 
>  	/* Store tx_rate for this vf. */
> 
> -	for (idx = 0; idx < nb_q_per_pool; idx++) {
> 
> +	for (idx = 0; idx < nb_tx_q_per_pool; idx++) {
> 
>  		if (((uint64_t)0x1 << idx) & q_msk) {
> 
>  			if (vfinfo[vf].tx_rate[idx] != tx_rate)
> 
>  				vfinfo[vf].tx_rate[idx] = tx_rate;
> 
> @@ -3595,7 +3595,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev
> *dev, uint16_t vf,
> 
>  	}
> 
> 
> 
>  	/* Set RTTBCNRC of each queue/pool for vf X  */
> 
> -	for (; queue_idx <= queue_end; queue_idx++) {
> 
> +	for (; queue_idx <= tx_queue_end; queue_idx++) {
> 
>  		if (0x1 & q_msk)
> 
>  			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
> 
>  		q_msk = q_msk >> 1;
> 
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> 
> index ca99170..ebf16e9 100644
> 
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> 
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> 
> @@ -159,6 +159,7 @@ struct ixgbe_vf_info {
> 
>  	uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
> 
>  	uint16_t vlan_count;
> 
>  	uint8_t spoofchk_enabled;
> 
> +	unsigned int vf_api;
> 
>  };
> 
> 
> 
>  /*
> 
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c
> 
> index 51da1fd..4d30bcf 100644
> 
> --- a/lib/librte_pmd_ixgbe/ixgbe_pf.c
> 
> +++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c
> 
> @@ -127,7 +127,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
> 
>  		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
> 
>  	}
> 
> 
> 
> -	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
> 
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
> 
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
> 
>  	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
> 
>  	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num *
> nb_queue);
> 
> 
> 
> @@ -189,7 +190,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev
> *eth_dev)
> 
>  	hw->mac.ops.set_vmdq(hw, 0,
> RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
> 
> 
> 
>  	/*
> 
> -	 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
> 
> +	 * SW must set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
> 
>  	 */
> 
>  	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
> 
>  	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
> 
> @@ -214,19 +215,19 @@ int ixgbe_pf_host_configure(struct rte_eth_dev
> *eth_dev)
> 
>  	}
> 
> 
> 
>  	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
> 
> -        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
> 
> +	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
> 
> 
> 
> -        /*
> 
> +	/*
> 
>  	 * enable vlan filtering and allow all vlan tags through
> 
>  	 */
> 
> -        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
> 
> -        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
> 
> -        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
> 
> +	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
> 
> +	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
> 
> +	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
> 
> 
> 
> -        /* VFTA - enable all vlan filters */
> 
> -        for (i = 0; i < IXGBE_MAX_VFTA; i++) {
> 
> -                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
> 
> -        }
> 
> +	/* VFTA - enable all vlan filters */
> 
> +	for (i = 0; i < IXGBE_MAX_VFTA; i++) {
> 
> +		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
> 
> +	}
> 
> 
> 
>  	/* Enable MAC Anti-Spoofing */
> 
>  	hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
> 
> @@ -369,6 +370,73 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf,
> uint32_t *msgbuf)
> 
>  }
> 
> 
> 
>  static int
> 
> +ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t
> *msgbuf)
> 
> +{
> 
> +	struct ixgbe_vf_info *vfinfo =
> 
> +		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data-
> >dev_private));
> 
> +	int api = msgbuf[1];
> 
> +
> 
> +	switch (api) {
> 
> +	case ixgbe_mbox_api_10:
> 
> +	case ixgbe_mbox_api_11:
> 
> +		vfinfo[vf].vf_api = api;
> 
> +		return 0;
> 
> +	default:
> 
> +		break;
> 
> +	}
> 
> +
> 
> +	RTE_LOG(DEBUG, PMD, "VF %d requested invalid api version %u\n", vf,
> api);
> 
> +	return -1;
> 
> +}
> 
> +
> 
> +static int
> 
> +ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
> 
> +{
> 
> +	struct ixgbe_vf_info *vfinfo =
> 
> +		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data-
> >dev_private));
> 
> +	struct ixgbe_dcb_config *dcb_cfg =
> 
> +			IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data-
> >dev_private);
> 
> +
> 
> +	uint8_t num_tcs = dcb_cfg->num_tcs.pg_tcs;
> 
> +
> 
> +	/* verify the PF is supporting the correct APIs */
> 
> +	switch (vfinfo[vf].vf_api) {
> 
> +	case ixgbe_mbox_api_10:
> 
> +	case ixgbe_mbox_api_11:
> 
> +		break;
> 
> +	default:
> 
> +		return -1;
> 
> +	}
> 
> +
> 
> +	if (RTE_ETH_DEV_SRIOV(dev).active) {
> 
> +		if (dev->data->dev_conf.rxmode.mq_mode ==
> ETH_MQ_RX_VMDQ_DCB)
> 
> +			msgbuf[IXGBE_VF_TX_QUEUES] = num_tcs;
> 
> +		else
> 
> +			msgbuf[IXGBE_VF_TX_QUEUES] = 1;
> 
> +
> 
> +		if (dev->data->dev_conf.txmode.mq_mode ==
> ETH_MQ_TX_VMDQ_DCB)
> 
> +			msgbuf[IXGBE_VF_RX_QUEUES] = num_tcs;
> 
> +		else
> 
> +			msgbuf[IXGBE_VF_RX_QUEUES] = 1;
> 
> +	}	else {
> 
> +		/* only allow 1 Tx queue for bandwidth limiting */
> 
> +		msgbuf[IXGBE_VF_TX_QUEUES] = 1;
> 
> +		msgbuf[IXGBE_VF_RX_QUEUES] = 1;
> 
> +	}
> 
> +
> 
> +	/* notify VF of need for VLAN tag stripping, and correct queue */
> 
> +	if (num_tcs)
> 
> +		msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
> 
> +	else
> 
> +		msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
> 
> +
> 
> +	/* notify VF of default queue */
> 
> +	msgbuf[IXGBE_VF_DEF_QUEUE] = 0;
> 
> +
> 
> +	return 0;
> 
> +}
> 
> +
> 
> +static int
> 
>  ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t
> *msgbuf)
> 
>  {
> 
>  	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> 
> @@ -512,6 +580,12 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev,
> uint16_t vf)
> 
>  	case IXGBE_VF_SET_VLAN:
> 
>  		retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
> 
>  		break;
> 
> +	case IXGBE_VF_API_NEGOTIATE:
> 
> +		retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
> 
> +		break;
> 
> +	case IXGBE_VF_GET_QUEUES:
> 
> +		retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
> 
> +		break;
> 
>  	default:
> 
>  		PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x",
> (unsigned)msgbuf[0]);
> 
>  		retval = IXGBE_ERR_MBX;
> 
> @@ -526,7 +600,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev,
> uint16_t vf)
> 
> 
> 
>  	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
> 
> 
> 
> -	ixgbe_write_mbx(hw, msgbuf, 1, vf);
> 
> +	ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
> 
> 
> 
>  	return retval;
> 
>  }
> 
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> 
> index e10d6a2..49b44fe 100644
> 
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> 
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> 
> @@ -3166,10 +3166,9 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
> 
> 
> 
>  	/* check support mq_mode for DCB */
> 
>  	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
> 
> -	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
> 
> -		return;
> 
> -
> 
> -	if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
> 
> +	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
> 
> +	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_VMDQ_DCB) &&
> 
> +	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_DCB))
> 
>  		return;
> 
> 
> 
>  	/** Configure DCB hardware **/
> 
> --
> 
> 1.7.9.5
> 
> 
Self nacked - because of wrong message format.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
  2015-01-12 15:46 ` Jastrzebski, MichalX K
@ 2015-01-13 10:02   ` Vlad Zolotarov
  0 siblings, 0 replies; 10+ messages in thread
From: Vlad Zolotarov @ 2015-01-13 10:02 UTC (permalink / raw)
  To: Jastrzebski, MichalX K, dev


On 01/12/15 17:46, Jastrzebski, MichalX K wrote:
>> -----Original Message-----
>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Michal Jastrzebski
>> Sent: Monday, January 12, 2015 3:43 PM
>> To: dev@dpdk.org
>> Subject: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
>>
>> Date: Mon, 12 Jan 2015 15:39:40 +0100
>> Message-Id: <1421073581-6644-2-git-send-email-
>> michalx.k.jastrzebski@intel.com>
>> X-Mailer: git-send-email 2.1.1
>> In-Reply-To: <1421073581-6644-1-git-send-email-
>> michalx.k.jastrzebski@intel.com>
>> References: <1421073581-6644-1-git-send-email-
>> michalx.k.jastrzebski@intel.com>
>>
>> From: Pawel Wodkowski <pawelx.wodkowski@intel.com>
>>
>>
>> This patch add support for DCB in SRIOV mode. When no PFC
>>
>> is enabled this feature might be used as multiple queues
>>
>> (up to 8 or 4) for VF.
>>
>>
>>
>> It incorporate following modifications:
>>
>>   - Allow zero rx/tx queues to be passed to rte_eth_dev_configure().
>>
>>     Rationale:
>>
>>     in SRIOV mode PF use first free VF to RX/TX. If VF count
>>
>>     is 16 or 32 all recources are assigned to VFs so PF can
>>
>>     be used only for configuration.
>>
>>   - split nb_q_per_pool to nb_rx_q_per_pool and nb_tx_q_per_pool
>>
>>     Rationale:
>>
>>     rx and tx number of queue might be different if RX and TX are
>>
>>     configured in different mode. This allow to inform VF about
>>
>>     proper number of queues.
>>
>>   - extern mailbox API for DCB mode
>>
>>
>>
>> Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
>>
>> ---
>>
>>   lib/librte_ether/rte_ethdev.c       |   84 +++++++++++++++++++++---------
>>
>>   lib/librte_ether/rte_ethdev.h       |    5 +-
>>
>>   lib/librte_pmd_e1000/igb_pf.c       |    3 +-
>>
>>   lib/librte_pmd_ixgbe/ixgbe_ethdev.c |   10 ++--
>>
>>   lib/librte_pmd_ixgbe/ixgbe_ethdev.h |    1 +
>>
>>   lib/librte_pmd_ixgbe/ixgbe_pf.c     |   98 ++++++++++++++++++++++++++++++--
>> ---
>>
>>   lib/librte_pmd_ixgbe/ixgbe_rxtx.c   |    7 ++-
>>
>>   7 files changed, 159 insertions(+), 49 deletions(-)
>>
>>
>>
>> diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
>>
>> index 95f2ceb..4c1a494 100644
>>
>> --- a/lib/librte_ether/rte_ethdev.c
>>
>> +++ b/lib/librte_ether/rte_ethdev.c
>>
>> @@ -333,7 +333,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev
>> *dev, uint16_t nb_queues)
>>
>>   		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
>>
>>   				sizeof(dev->data->rx_queues[0]) * nb_queues,
>>
>>   				RTE_CACHE_LINE_SIZE);
>>
>> -		if (dev->data->rx_queues == NULL) {
>>
>> +		if (dev->data->rx_queues == NULL && nb_queues > 0) {
>>
>>   			dev->data->nb_rx_queues = 0;
>>
>>   			return -(ENOMEM);
>>
>>   		}
>>
>> @@ -475,7 +475,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev
>> *dev, uint16_t nb_queues)
>>
>>   		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
>>
>>   				sizeof(dev->data->tx_queues[0]) * nb_queues,
>>
>>   				RTE_CACHE_LINE_SIZE);
>>
>> -		if (dev->data->tx_queues == NULL) {
>>
>> +		if (dev->data->tx_queues == NULL && nb_queues > 0) {
>>
>>   			dev->data->nb_tx_queues = 0;
>>
>>   			return -(ENOMEM);
>>
>>   		}
>>
>> @@ -507,6 +507,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
>> uint16_t nb_rx_q, uint16_t nb_tx_q,
>>
>>   		      const struct rte_eth_conf *dev_conf)
>>
>>   {
>>
>>   	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
>>
>> +	struct rte_eth_dev_info dev_info;
>>
>>
>>
>>   	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
>>
>>   		/* check multi-queue mode */
>>
>> @@ -524,11 +525,33 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
>> uint16_t nb_rx_q, uint16_t nb_tx_q,
>>
>>   			return (-EINVAL);
>>
>>   		}
>>
>>
>>
>> +		if ((dev_conf->rxmode.mq_mode ==
>> ETH_MQ_RX_VMDQ_DCB) &&
>>
>> +			(dev_conf->txmode.mq_mode ==
>> ETH_MQ_TX_VMDQ_DCB)) {
>>
>> +			enum rte_eth_nb_pools rx_pools =
>>
>> +						dev_conf-
>>> rx_adv_conf.vmdq_dcb_conf.nb_queue_pools;
>> +			enum rte_eth_nb_pools tx_pools =
>>
>> +						dev_conf-
>>> tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools;
>> +
>>
>> +			if (rx_pools != tx_pools) {
>>
>> +				/* Only equal number of pools is supported
>> when
>>
>> +				 * DCB+VMDq in SRIOV */
>>
>> +				PMD_DEBUG_TRACE("ethdev port_id=%"
>> PRIu8
>>
>> +						" SRIOV active, DCB+VMDQ
>> mode, "
>>
>> +						"number of rx and tx pools is
>> not eqaul\n",
>>
>> +						port_id);
>>
>> +				return (-EINVAL);
>>
>> +			}
>>
>> +		}
>>
>> +
>>
>> +		uint16_t nb_rx_q_per_pool =
>> RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool;
>>
>> +		uint16_t nb_tx_q_per_pool =
>> RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
>>
>> +
>>
>>   		switch (dev_conf->rxmode.mq_mode) {
>>
>> -		case ETH_MQ_RX_VMDQ_RSS:
>>
>>   		case ETH_MQ_RX_VMDQ_DCB:
>>
>> +			break;
>>
>> +		case ETH_MQ_RX_VMDQ_RSS:
>>
>>   		case ETH_MQ_RX_VMDQ_DCB_RSS:
>>
>> -			/* DCB/RSS VMDQ in SRIOV mode, not implement yet
>> */
>>
>> +			/* RSS, DCB+RSS VMDQ in SRIOV mode, not
>> implement yet */
>>
>>   			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
>>
>>   					" SRIOV active, "
>>
>>   					"unsupported VMDQ mq_mode rx
>> %u\n",
>>
>> @@ -537,37 +560,32 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
>> uint16_t nb_rx_q, uint16_t nb_tx_q,
>>
>>   		default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE
>> */
>>
>>   			/* if nothing mq mode configure, use default scheme
>> */
>>
>>   			dev->data->dev_conf.rxmode.mq_mode =
>> ETH_MQ_RX_VMDQ_ONLY;
>>
>> -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
>>
>> -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
>> 1;
>>
>> +			if (nb_rx_q_per_pool > 1)
>>
>> +				nb_rx_q_per_pool = 1;
>>
>>   			break;
>>
>>   		}
>>
>>
>>
>>   		switch (dev_conf->txmode.mq_mode) {
>>
>> -		case ETH_MQ_TX_VMDQ_DCB:
>>
>> -			/* DCB VMDQ in SRIOV mode, not implement yet */
>>
>> -			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
>>
>> -					" SRIOV active, "
>>
>> -					"unsupported VMDQ mq_mode tx
>> %u\n",
>>
>> -					port_id, dev_conf-
>>> txmode.mq_mode);
>> -			return (-EINVAL);
>>
>> +		case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV
>> mode*/
>>
>> +			break;
>>
>>   		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE
>> */
>>
>>   			/* if nothing mq mode configure, use default scheme
>> */
>>
>>   			dev->data->dev_conf.txmode.mq_mode =
>> ETH_MQ_TX_VMDQ_ONLY;
>>
>> -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
>>
>> -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
>> 1;
>>
>> +			if (nb_tx_q_per_pool > 1)
>>
>> +				nb_tx_q_per_pool = 1;
>>
>>   			break;
>>
>>   		}
>>
>>
>>
>>   		/* check valid queue number */
>>
>> -		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
>>
>> -		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
>>
>> +		if (nb_rx_q > nb_rx_q_per_pool || nb_tx_q >
>> nb_tx_q_per_pool) {
>>
>>   			PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV
>> active, "
>>
>> -				    "queue number must less equal to %d\n",
>>
>> -					port_id,
>> RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
>>
>> +				    "rx/tx queue number must less equal to
>> %d/%d\n",
>>
>> +					port_id,
>> RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool,
>>
>> +
>> 	RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool);
>>
>>   			return (-EINVAL);
>>
>>   		}
>>
>>   	} else {
>>
>> -		/* For vmdb+dcb mode check our configuration before we go
>> further */
>>
>> +		/* For vmdq+dcb mode check our configuration before we go
>> further */
>>
>>   		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB)
>> {
>>
>>   			const struct rte_eth_vmdq_dcb_conf *conf;
>>
>>
>>
>> @@ -606,11 +624,20 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
>> uint16_t nb_rx_q, uint16_t nb_tx_q,
>>
>>   			}
>>
>>   		}
>>
>>
>>
>> +		/* For DCB we need to obtain maximum number of queues
>> dinamically,
>>
>> +		 * as this depends on max VF exported in PF */
>>
>> +		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
>>
>> +			(dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB))
>> {
>>
>> +
>>
>> +				FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
>>> dev_infos_get, -ENOTSUP);
>> +				(*dev->dev_ops->dev_infos_get)(dev,
>> &dev_info);
>>
>> +		}
>>
>> +
>>
>>   		/* For DCB mode check our configuration before we go further
>> */
>>
>>   		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
>>
>>   			const struct rte_eth_dcb_rx_conf *conf;
>>
>>
>>
>> -			if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
>>
>> +			if (nb_rx_q != dev_info.max_rx_queues) {
>>
>>   				PMD_DEBUG_TRACE("ethdev port_id=%d
>> DCB, nb_rx_q "
>>
>>   						"!= %d\n",
>>
>>   						port_id,
>> ETH_DCB_NUM_QUEUES);
>>
>> @@ -630,7 +657,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
>> uint16_t nb_rx_q, uint16_t nb_tx_q,
>>
>>   		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
>>
>>   			const struct rte_eth_dcb_tx_conf *conf;
>>
>>
>>
>> -			if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
>>
>> +			if (nb_tx_q != dev_info.max_tx_queues) {
>>
>>   				PMD_DEBUG_TRACE("ethdev port_id=%d
>> DCB, nb_tx_q "
>>
>>   						"!= %d\n",
>>
>>   						port_id,
>> ETH_DCB_NUM_QUEUES);
>>
>> @@ -690,7 +717,10 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t
>> nb_rx_q, uint16_t nb_tx_q,
>>
>>   	}
>>
>>   	if (nb_rx_q == 0) {
>>
>>   		PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n",
>> port_id);
>>
>> -		return (-EINVAL);
>>
>> +		/* In SRIOV there can be no free resource for PF. So permit use
>> only
>>
>> +		 * for configuration. */
>>
>> +		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
>>
>> +			return (-EINVAL);
>>
>>   	}
>>
>>
>>
>>   	if (nb_tx_q > dev_info.max_tx_queues) {
>>
>> @@ -698,9 +728,13 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t
>> nb_rx_q, uint16_t nb_tx_q,
>>
>>   				port_id, nb_tx_q, dev_info.max_tx_queues);
>>
>>   		return (-EINVAL);
>>
>>   	}
>>
>> +
>>
>>   	if (nb_tx_q == 0) {
>>
>>   		PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n",
>> port_id);
>>
>> -		return (-EINVAL);
>>
>> +		/* In SRIOV there can be no free resource for PF. So permit use
>> only
>>
>> +		 * for configuration. */
>>
>> +		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
>>
>> +			return (-EINVAL);
>>
>>   	}
>>
>>
>>
>>   	/* Copy the dev_conf parameter into the dev structure */
>>
>> @@ -750,7 +784,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t
>> nb_rx_q, uint16_t nb_tx_q,
>>
>>   							ETHER_MAX_LEN;
>>
>>   	}
>>
>>
>>
>> -	/* multipe queue mode checking */
>>
>> +	/* multiple queue mode checking */
>>
>>   	diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q,
>> dev_conf);
>>
>>   	if (diag != 0) {
>>
>>   		PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode =
>> %d\n",
>>
>> diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
>>
>> index ce0528f..04fda83 100644
>>
>> --- a/lib/librte_ether/rte_ethdev.h
>>
>> +++ b/lib/librte_ether/rte_ethdev.h
>>
>> @@ -299,7 +299,7 @@ enum rte_eth_rx_mq_mode {
>>
>>   enum rte_eth_tx_mq_mode {
>>
>>   	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
>>
>>   	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
>>
>> -	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is
>> on. */
>>
>> +	ETH_MQ_TX_VMDQ_DCB,     /**< For TX side,both DCB and VT is on.
>> */
>>
>>   	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
>>
>>   };
>>
>>
>>
>> @@ -1569,7 +1569,8 @@ struct rte_eth_dev {
>>
>>
>>
>>   struct rte_eth_dev_sriov {
>>
>>   	uint8_t active;               /**< SRIOV is active with 16, 32 or 64 pools */
>>
>> -	uint8_t nb_q_per_pool;        /**< rx queue number per pool */
>>
>> +	uint8_t nb_rx_q_per_pool;        /**< rx queue number per pool */
>>
>> +	uint8_t nb_tx_q_per_pool;        /**< tx queue number per pool */
>>
>>   	uint16_t def_vmdq_idx;        /**< Default pool num used for PF */
>>
>>   	uint16_t def_pool_q_idx;      /**< Default pool queue start reg index */
>>
>>   };
>>
>> diff --git a/lib/librte_pmd_e1000/igb_pf.c b/lib/librte_pmd_e1000/igb_pf.c
>>
>> index bc3816a..9d2f858 100644
>>
>> --- a/lib/librte_pmd_e1000/igb_pf.c
>>
>> +++ b/lib/librte_pmd_e1000/igb_pf.c
>>
>> @@ -115,7 +115,8 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
>>
>>   		rte_panic("Cannot allocate memory for private VF data\n");
>>
>>
>>
>>   	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
>>
>> -	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
>>
>> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
>>
>> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
>>
>>   	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
>>
>>   	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num *
>> nb_queue);
>>
>>
>>
>> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
>> b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
>>
>> index 3fc3738..347f03c 100644
>>
>> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
>>
>> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
>>
>> @@ -3555,14 +3555,14 @@ static int ixgbe_set_vf_rate_limit(struct
>> rte_eth_dev *dev, uint16_t vf,
>>
>>   	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
>>> dev_private);
>>   	struct ixgbe_vf_info *vfinfo =
>>
>>   		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data-
>>> dev_private));
>> -	uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
>>
>> +	uint8_t  nb_tx_q_per_pool =
>> RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
>>
>>   	uint32_t queue_stride =
>>
>>   		IXGBE_MAX_RX_QUEUE_NUM /
>> RTE_ETH_DEV_SRIOV(dev).active;
>>
>>   	uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
>>
>> -	uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
>>
>> +	uint32_t tx_queue_end = queue_idx + nb_tx_q_per_pool - 1;
>>
>>   	uint16_t total_rate = 0;
>>
>>
>>
>> -	if (queue_end >= hw->mac.max_tx_queues)
>>
>> +	if (tx_queue_end >= hw->mac.max_tx_queues)
>>
>>   		return -EINVAL;
>>
>>
>>
>>   	if (vfinfo != NULL) {
>>
>> @@ -3577,7 +3577,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev
>> *dev, uint16_t vf,
>>
>>   		return -EINVAL;
>>
>>
>>
>>   	/* Store tx_rate for this vf. */
>>
>> -	for (idx = 0; idx < nb_q_per_pool; idx++) {
>>
>> +	for (idx = 0; idx < nb_tx_q_per_pool; idx++) {
>>
>>   		if (((uint64_t)0x1 << idx) & q_msk) {
>>
>>   			if (vfinfo[vf].tx_rate[idx] != tx_rate)
>>
>>   				vfinfo[vf].tx_rate[idx] = tx_rate;
>>
>> @@ -3595,7 +3595,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev
>> *dev, uint16_t vf,
>>
>>   	}
>>
>>
>>
>>   	/* Set RTTBCNRC of each queue/pool for vf X  */
>>
>> -	for (; queue_idx <= queue_end; queue_idx++) {
>>
>> +	for (; queue_idx <= tx_queue_end; queue_idx++) {
>>
>>   		if (0x1 & q_msk)
>>
>>   			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
>>
>>   		q_msk = q_msk >> 1;
>>
>> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
>> b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
>>
>> index ca99170..ebf16e9 100644
>>
>> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
>>
>> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
>>
>> @@ -159,6 +159,7 @@ struct ixgbe_vf_info {
>>
>>   	uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
>>
>>   	uint16_t vlan_count;
>>
>>   	uint8_t spoofchk_enabled;
>>
>> +	unsigned int vf_api;
>>
>>   };
>>
>>
>>
>>   /*
>>
>> diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c
>>
>> index 51da1fd..4d30bcf 100644
>>
>> --- a/lib/librte_pmd_ixgbe/ixgbe_pf.c
>>
>> +++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c
>>
>> @@ -127,7 +127,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
>>
>>   		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
>>
>>   	}
>>
>>
>>
>> -	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
>>
>> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
>>
>> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
>>
>>   	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
>>
>>   	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num *
>> nb_queue);
>>
>>
>>
>> @@ -189,7 +190,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev
>> *eth_dev)
>>
>>   	hw->mac.ops.set_vmdq(hw, 0,
>> RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
>>
>>
>>
>>   	/*
>>
>> -	 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
>>
>> +	 * SW must set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
>>
>>   	 */
>>
>>   	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
>>
>>   	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
>>
>> @@ -214,19 +215,19 @@ int ixgbe_pf_host_configure(struct rte_eth_dev
>> *eth_dev)
>>
>>   	}
>>
>>
>>
>>   	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
>>
>> -        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
>>
>> +	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
>>
>>
>>
>> -        /*
>>
>> +	/*
>>
>>   	 * enable vlan filtering and allow all vlan tags through
>>
>>   	 */
>>
>> -        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
>>
>> -        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
>>
>> -        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
>>
>> +	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
>>
>> +	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
>>
>> +	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
>>
>>
>>
>> -        /* VFTA - enable all vlan filters */
>>
>> -        for (i = 0; i < IXGBE_MAX_VFTA; i++) {
>>
>> -                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
>>
>> -        }
>>
>> +	/* VFTA - enable all vlan filters */
>>
>> +	for (i = 0; i < IXGBE_MAX_VFTA; i++) {
>>
>> +		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
>>
>> +	}
>>
>>
>>
>>   	/* Enable MAC Anti-Spoofing */
>>
>>   	hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
>>
>> @@ -369,6 +370,73 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf,
>> uint32_t *msgbuf)
>>
>>   }
>>
>>
>>
>>   static int
>>
>> +ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t
>> *msgbuf)
>>
>> +{
>>
>> +	struct ixgbe_vf_info *vfinfo =
>>
>> +		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data-
>>> dev_private));
>> +	int api = msgbuf[1];
>>
>> +
>>
>> +	switch (api) {
>>
>> +	case ixgbe_mbox_api_10:
>>
>> +	case ixgbe_mbox_api_11:
>>
>> +		vfinfo[vf].vf_api = api;
>>
>> +		return 0;
>>
>> +	default:
>>
>> +		break;
>>
>> +	}
>>
>> +
>>
>> +	RTE_LOG(DEBUG, PMD, "VF %d requested invalid api version %u\n", vf,
>> api);
>>
>> +	return -1;
>>
>> +}
>>
>> +
>>
>> +static int
>>
>> +ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
>>
>> +{
>>
>> +	struct ixgbe_vf_info *vfinfo =
>>
>> +		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data-
>>> dev_private));
>> +	struct ixgbe_dcb_config *dcb_cfg =
>>
>> +			IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data-
>>> dev_private);
>> +
>>
>> +	uint8_t num_tcs = dcb_cfg->num_tcs.pg_tcs;
>>
>> +
>>
>> +	/* verify the PF is supporting the correct APIs */
>>
>> +	switch (vfinfo[vf].vf_api) {
>>
>> +	case ixgbe_mbox_api_10:
>>
>> +	case ixgbe_mbox_api_11:
>>
>> +		break;
>>
>> +	default:
>>
>> +		return -1;
>>
>> +	}
>>
>> +
>>
>> +	if (RTE_ETH_DEV_SRIOV(dev).active) {
>>
>> +		if (dev->data->dev_conf.rxmode.mq_mode ==
>> ETH_MQ_RX_VMDQ_DCB)
>>
>> +			msgbuf[IXGBE_VF_TX_QUEUES] = num_tcs;
>>
>> +		else
>>
>> +			msgbuf[IXGBE_VF_TX_QUEUES] = 1;
>>
>> +
>>
>> +		if (dev->data->dev_conf.txmode.mq_mode ==
>> ETH_MQ_TX_VMDQ_DCB)
>>
>> +			msgbuf[IXGBE_VF_RX_QUEUES] = num_tcs;
>>
>> +		else
>>
>> +			msgbuf[IXGBE_VF_RX_QUEUES] = 1;
>>
>> +	}	else {
>>
>> +		/* only allow 1 Tx queue for bandwidth limiting */
>>
>> +		msgbuf[IXGBE_VF_TX_QUEUES] = 1;
>>
>> +		msgbuf[IXGBE_VF_RX_QUEUES] = 1;
>>
>> +	}
>>
>> +
>>
>> +	/* notify VF of need for VLAN tag stripping, and correct queue */
>>
>> +	if (num_tcs)
>>
>> +		msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
>>
>> +	else
>>
>> +		msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
>>
>> +
>>
>> +	/* notify VF of default queue */
>>
>> +	msgbuf[IXGBE_VF_DEF_QUEUE] = 0;
>>
>> +
>>
>> +	return 0;
>>
>> +}
>>
>> +
>>
>> +static int
>>
>>   ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t
>> *msgbuf)
>>
>>   {
>>
>>   	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data-
>>> dev_private);
>> @@ -512,6 +580,12 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev,
>> uint16_t vf)
>>
>>   	case IXGBE_VF_SET_VLAN:
>>
>>   		retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
>>
>>   		break;
>>
>> +	case IXGBE_VF_API_NEGOTIATE:
>>
>> +		retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
>>
>> +		break;
>>
>> +	case IXGBE_VF_GET_QUEUES:
>>
>> +		retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
>>
>> +		break;
>>
>>   	default:
>>
>>   		PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x",
>> (unsigned)msgbuf[0]);
>>
>>   		retval = IXGBE_ERR_MBX;
>>
>> @@ -526,7 +600,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev,
>> uint16_t vf)
>>
>>
>>
>>   	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
>>
>>
>>
>> -	ixgbe_write_mbx(hw, msgbuf, 1, vf);
>>
>> +	ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
>>
>>
>>
>>   	return retval;
>>
>>   }
>>
>> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
>> b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
>>
>> index e10d6a2..49b44fe 100644
>>
>> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
>>
>> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
>>
>> @@ -3166,10 +3166,9 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
>>
>>
>>
>>   	/* check support mq_mode for DCB */
>>
>>   	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
>>
>> -	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
>>
>> -		return;
>>
>> -
>>
>> -	if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
>>
>> +	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
>>
>> +	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_VMDQ_DCB) &&
>>
>> +	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_DCB))
>>
>>   		return;
>>
>>
>>
>>   	/** Configure DCB hardware **/
>>
>> --
>>
>> 1.7.9.5
>>
>>
> Self nacked - because of wrong message format.

Yeah, there is something really wrong with this email formatting.... ;) 
Note that since u (i guess) haven't used 'git send-email' for this 
series - it doesn't look like a series (at least in my thunderbird).

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
  2015-01-12 14:43 [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe Michal Jastrzebski
  2015-01-12 15:46 ` Jastrzebski, MichalX K
@ 2015-01-13 10:08 ` Vlad Zolotarov
  2015-01-14  0:51   ` Ouyang, Changchun
  1 sibling, 1 reply; 10+ messages in thread
From: Vlad Zolotarov @ 2015-01-13 10:08 UTC (permalink / raw)
  To: Michal Jastrzebski, dev


On 01/12/15 16:43, Michal Jastrzebski wrote:
> Date: Mon, 12 Jan 2015 15:39:40 +0100
> Message-Id: <1421073581-6644-2-git-send-email-michalx.k.jastrzebski@intel.com>
> X-Mailer: git-send-email 2.1.1
> In-Reply-To: <1421073581-6644-1-git-send-email-michalx.k.jastrzebski@intel.com>
> References: <1421073581-6644-1-git-send-email-michalx.k.jastrzebski@intel.com>
>
> From: Pawel Wodkowski <pawelx.wodkowski@intel.com>
>
>
> This patch add support for DCB in SRIOV mode. When no PFC
>
> is enabled this feature might be used as multiple queues
>
> (up to 8 or 4) for VF.
>
>
>
> It incorporate following modifications:
>
>   - Allow zero rx/tx queues to be passed to rte_eth_dev_configure().
>
>     Rationale:
>
>     in SRIOV mode PF use first free VF to RX/TX. If VF count
>
>     is 16 or 32 all recources are assigned to VFs so PF can
>
>     be used only for configuration.
>
>   - split nb_q_per_pool to nb_rx_q_per_pool and nb_tx_q_per_pool
>
>     Rationale:
>
>     rx and tx number of queue might be different if RX and TX are
>
>     configured in different mode. This allow to inform VF about
>
>     proper number of queues.


Nice move! Ouyang, this is a nice answer to my recent remarks about your 
PATCH4 in "Enable VF RSS for Niantic" series.

Michal, could u, pls., respin this series after fixing the formatting 
and (maybe) using "git send-email" for sending? ;)

thanks,
vlad


>
>   - extern mailbox API for DCB mode
>
>
>
> Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
>
> ---
>
>   lib/librte_ether/rte_ethdev.c       |   84 +++++++++++++++++++++---------
>
>   lib/librte_ether/rte_ethdev.h       |    5 +-
>
>   lib/librte_pmd_e1000/igb_pf.c       |    3 +-
>
>   lib/librte_pmd_ixgbe/ixgbe_ethdev.c |   10 ++--
>
>   lib/librte_pmd_ixgbe/ixgbe_ethdev.h |    1 +
>
>   lib/librte_pmd_ixgbe/ixgbe_pf.c     |   98 ++++++++++++++++++++++++++++++-----
>
>   lib/librte_pmd_ixgbe/ixgbe_rxtx.c   |    7 ++-
>
>   7 files changed, 159 insertions(+), 49 deletions(-)
>
>
>
> diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
>
> index 95f2ceb..4c1a494 100644
>
> --- a/lib/librte_ether/rte_ethdev.c
>
> +++ b/lib/librte_ether/rte_ethdev.c
>
> @@ -333,7 +333,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
>
>   		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
>
>   				sizeof(dev->data->rx_queues[0]) * nb_queues,
>
>   				RTE_CACHE_LINE_SIZE);
>
> -		if (dev->data->rx_queues == NULL) {
>
> +		if (dev->data->rx_queues == NULL && nb_queues > 0) {
>
>   			dev->data->nb_rx_queues = 0;
>
>   			return -(ENOMEM);
>
>   		}
>
> @@ -475,7 +475,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
>
>   		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
>
>   				sizeof(dev->data->tx_queues[0]) * nb_queues,
>
>   				RTE_CACHE_LINE_SIZE);
>
> -		if (dev->data->tx_queues == NULL) {
>
> +		if (dev->data->tx_queues == NULL && nb_queues > 0) {
>
>   			dev->data->nb_tx_queues = 0;
>
>   			return -(ENOMEM);
>
>   		}
>
> @@ -507,6 +507,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>
>   		      const struct rte_eth_conf *dev_conf)
>
>   {
>
>   	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
>
> +	struct rte_eth_dev_info dev_info;
>
>   
>
>   	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
>
>   		/* check multi-queue mode */
>
> @@ -524,11 +525,33 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>
>   			return (-EINVAL);
>
>   		}
>
>   
>
> +		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) &&
>
> +			(dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB)) {
>
> +			enum rte_eth_nb_pools rx_pools =
>
> +						dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools;
>
> +			enum rte_eth_nb_pools tx_pools =
>
> +						dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools;
>
> +
>
> +			if (rx_pools != tx_pools) {
>
> +				/* Only equal number of pools is supported when
>
> +				 * DCB+VMDq in SRIOV */
>
> +				PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
>
> +						" SRIOV active, DCB+VMDQ mode, "
>
> +						"number of rx and tx pools is not eqaul\n",
>
> +						port_id);
>
> +				return (-EINVAL);
>
> +			}
>
> +		}
>
> +
>
> +		uint16_t nb_rx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool;
>
> +		uint16_t nb_tx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
>
> +
>
>   		switch (dev_conf->rxmode.mq_mode) {
>
> -		case ETH_MQ_RX_VMDQ_RSS:
>
>   		case ETH_MQ_RX_VMDQ_DCB:
>
> +			break;
>
> +		case ETH_MQ_RX_VMDQ_RSS:
>
>   		case ETH_MQ_RX_VMDQ_DCB_RSS:
>
> -			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
>
> +			/* RSS, DCB+RSS VMDQ in SRIOV mode, not implement yet */
>
>   			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
>
>   					" SRIOV active, "
>
>   					"unsupported VMDQ mq_mode rx %u\n",
>
> @@ -537,37 +560,32 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>
>   		default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
>
>   			/* if nothing mq mode configure, use default scheme */
>
>   			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
>
> -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
>
> -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
>
> +			if (nb_rx_q_per_pool > 1)
>
> +				nb_rx_q_per_pool = 1;
>
>   			break;
>
>   		}
>
>   
>
>   		switch (dev_conf->txmode.mq_mode) {
>
> -		case ETH_MQ_TX_VMDQ_DCB:
>
> -			/* DCB VMDQ in SRIOV mode, not implement yet */
>
> -			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
>
> -					" SRIOV active, "
>
> -					"unsupported VMDQ mq_mode tx %u\n",
>
> -					port_id, dev_conf->txmode.mq_mode);
>
> -			return (-EINVAL);
>
> +		case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV mode*/
>
> +			break;
>
>   		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
>
>   			/* if nothing mq mode configure, use default scheme */
>
>   			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
>
> -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
>
> -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
>
> +			if (nb_tx_q_per_pool > 1)
>
> +				nb_tx_q_per_pool = 1;
>
>   			break;
>
>   		}
>
>   
>
>   		/* check valid queue number */
>
> -		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
>
> -		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
>
> +		if (nb_rx_q > nb_rx_q_per_pool || nb_tx_q > nb_tx_q_per_pool) {
>
>   			PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
>
> -				    "queue number must less equal to %d\n",
>
> -					port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
>
> +				    "rx/tx queue number must less equal to %d/%d\n",
>
> +					port_id, RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool,
>
> +					RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool);
>
>   			return (-EINVAL);
>
>   		}
>
>   	} else {
>
> -		/* For vmdb+dcb mode check our configuration before we go further */
>
> +		/* For vmdq+dcb mode check our configuration before we go further */
>
>   		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
>
>   			const struct rte_eth_vmdq_dcb_conf *conf;
>
>   
>
> @@ -606,11 +624,20 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>
>   			}
>
>   		}
>
>   
>
> +		/* For DCB we need to obtain maximum number of queues dinamically,
>
> +		 * as this depends on max VF exported in PF */
>
> +		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
>
> +			(dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
>
> +
>
> +				FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
>
> +				(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
>
> +		}
>
> +
>
>   		/* For DCB mode check our configuration before we go further */
>
>   		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
>
>   			const struct rte_eth_dcb_rx_conf *conf;
>
>   
>
> -			if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
>
> +			if (nb_rx_q != dev_info.max_rx_queues) {
>
>   				PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
>
>   						"!= %d\n",
>
>   						port_id, ETH_DCB_NUM_QUEUES);
>
> @@ -630,7 +657,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>
>   		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
>
>   			const struct rte_eth_dcb_tx_conf *conf;
>
>   
>
> -			if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
>
> +			if (nb_tx_q != dev_info.max_tx_queues) {
>
>   				PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
>
>   						"!= %d\n",
>
>   						port_id, ETH_DCB_NUM_QUEUES);
>
> @@ -690,7 +717,10 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>
>   	}
>
>   	if (nb_rx_q == 0) {
>
>   		PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
>
> -		return (-EINVAL);
>
> +		/* In SRIOV there can be no free resource for PF. So permit use only
>
> +		 * for configuration. */
>
> +		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
>
> +			return (-EINVAL);
>
>   	}
>
>   
>
>   	if (nb_tx_q > dev_info.max_tx_queues) {
>
> @@ -698,9 +728,13 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>
>   				port_id, nb_tx_q, dev_info.max_tx_queues);
>
>   		return (-EINVAL);
>
>   	}
>
> +
>
>   	if (nb_tx_q == 0) {
>
>   		PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
>
> -		return (-EINVAL);
>
> +		/* In SRIOV there can be no free resource for PF. So permit use only
>
> +		 * for configuration. */
>
> +		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
>
> +			return (-EINVAL);
>
>   	}
>
>   
>
>   	/* Copy the dev_conf parameter into the dev structure */
>
> @@ -750,7 +784,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>
>   							ETHER_MAX_LEN;
>
>   	}
>
>   
>
> -	/* multipe queue mode checking */
>
> +	/* multiple queue mode checking */
>
>   	diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
>
>   	if (diag != 0) {
>
>   		PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
>
> diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
>
> index ce0528f..04fda83 100644
>
> --- a/lib/librte_ether/rte_ethdev.h
>
> +++ b/lib/librte_ether/rte_ethdev.h
>
> @@ -299,7 +299,7 @@ enum rte_eth_rx_mq_mode {
>
>   enum rte_eth_tx_mq_mode {
>
>   	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
>
>   	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
>
> -	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
>
> +	ETH_MQ_TX_VMDQ_DCB,     /**< For TX side,both DCB and VT is on. */
>
>   	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
>
>   };
>
>   
>
> @@ -1569,7 +1569,8 @@ struct rte_eth_dev {
>
>   
>
>   struct rte_eth_dev_sriov {
>
>   	uint8_t active;               /**< SRIOV is active with 16, 32 or 64 pools */
>
> -	uint8_t nb_q_per_pool;        /**< rx queue number per pool */
>
> +	uint8_t nb_rx_q_per_pool;        /**< rx queue number per pool */
>
> +	uint8_t nb_tx_q_per_pool;        /**< tx queue number per pool */

>
>   	uint16_t def_vmdq_idx;        /**< Default pool num used for PF */
>
>   	uint16_t def_pool_q_idx;      /**< Default pool queue start reg index */
>
>   };
>
> diff --git a/lib/librte_pmd_e1000/igb_pf.c b/lib/librte_pmd_e1000/igb_pf.c
>
> index bc3816a..9d2f858 100644
>
> --- a/lib/librte_pmd_e1000/igb_pf.c
>
> +++ b/lib/librte_pmd_e1000/igb_pf.c
>
> @@ -115,7 +115,8 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
>
>   		rte_panic("Cannot allocate memory for private VF data\n");
>
>   
>
>   	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
>
> -	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
>
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
>
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
>
>   	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
>
>   	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
>
>   
>
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
>
> index 3fc3738..347f03c 100644
>
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
>
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
>
> @@ -3555,14 +3555,14 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
>
>   	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>
>   	struct ixgbe_vf_info *vfinfo =
>
>   		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
>
> -	uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
>
> +	uint8_t  nb_tx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
>
>   	uint32_t queue_stride =
>
>   		IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
>
>   	uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
>
> -	uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
>
> +	uint32_t tx_queue_end = queue_idx + nb_tx_q_per_pool - 1;
>
>   	uint16_t total_rate = 0;
>
>   
>
> -	if (queue_end >= hw->mac.max_tx_queues)
>
> +	if (tx_queue_end >= hw->mac.max_tx_queues)
>
>   		return -EINVAL;
>
>   
>
>   	if (vfinfo != NULL) {
>
> @@ -3577,7 +3577,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
>
>   		return -EINVAL;
>
>   
>
>   	/* Store tx_rate for this vf. */
>
> -	for (idx = 0; idx < nb_q_per_pool; idx++) {
>
> +	for (idx = 0; idx < nb_tx_q_per_pool; idx++) {
>
>   		if (((uint64_t)0x1 << idx) & q_msk) {
>
>   			if (vfinfo[vf].tx_rate[idx] != tx_rate)
>
>   				vfinfo[vf].tx_rate[idx] = tx_rate;
>
> @@ -3595,7 +3595,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
>
>   	}
>
>   
>
>   	/* Set RTTBCNRC of each queue/pool for vf X  */
>
> -	for (; queue_idx <= queue_end; queue_idx++) {
>
> +	for (; queue_idx <= tx_queue_end; queue_idx++) {
>
>   		if (0x1 & q_msk)
>
>   			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
>
>   		q_msk = q_msk >> 1;
>
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
>
> index ca99170..ebf16e9 100644
>
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
>
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
>
> @@ -159,6 +159,7 @@ struct ixgbe_vf_info {
>
>   	uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
>
>   	uint16_t vlan_count;
>
>   	uint8_t spoofchk_enabled;
>
> +	unsigned int vf_api;
>
>   };
>
>   
>
>   /*
>
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c
>
> index 51da1fd..4d30bcf 100644
>
> --- a/lib/librte_pmd_ixgbe/ixgbe_pf.c
>
> +++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c
>
> @@ -127,7 +127,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
>
>   		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
>
>   	}
>
>   
>
> -	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
>
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
>
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
>
>   	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
>
>   	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
>
>   
>
> @@ -189,7 +190,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
>
>   	hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
>
>   
>
>   	/*
>
> -	 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
>
> +	 * SW must set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
>
>   	 */
>
>   	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
>
>   	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
>
> @@ -214,19 +215,19 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
>
>   	}
>
>   
>
>   	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
>
> -        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
>
> +	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
>
>   
>
> -        /*
>
> +	/*
>
>   	 * enable vlan filtering and allow all vlan tags through
>
>   	 */
>
> -        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
>
> -        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
>
> -        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
>
> +	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
>
> +	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
>
> +	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
>
>   
>
> -        /* VFTA - enable all vlan filters */
>
> -        for (i = 0; i < IXGBE_MAX_VFTA; i++) {
>
> -                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
>
> -        }
>
> +	/* VFTA - enable all vlan filters */
>
> +	for (i = 0; i < IXGBE_MAX_VFTA; i++) {
>
> +		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
>
> +	}
>
>   
>
>   	/* Enable MAC Anti-Spoofing */
>
>   	hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
>
> @@ -369,6 +370,73 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
>
>   }
>
>   
>
>   static int
>
> +ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
>
> +{
>
> +	struct ixgbe_vf_info *vfinfo =
>
> +		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
>
> +	int api = msgbuf[1];
>
> +
>
> +	switch (api) {
>
> +	case ixgbe_mbox_api_10:
>
> +	case ixgbe_mbox_api_11:
>
> +		vfinfo[vf].vf_api = api;
>
> +		return 0;
>
> +	default:
>
> +		break;
>
> +	}
>
> +
>
> +	RTE_LOG(DEBUG, PMD, "VF %d requested invalid api version %u\n", vf, api);
>
> +	return -1;
>
> +}
>
> +
>
> +static int
>
> +ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
>
> +{
>
> +	struct ixgbe_vf_info *vfinfo =
>
> +		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
>
> +	struct ixgbe_dcb_config *dcb_cfg =
>
> +			IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
>
> +
>
> +	uint8_t num_tcs = dcb_cfg->num_tcs.pg_tcs;
>
> +
>
> +	/* verify the PF is supporting the correct APIs */
>
> +	switch (vfinfo[vf].vf_api) {
>
> +	case ixgbe_mbox_api_10:
>
> +	case ixgbe_mbox_api_11:
>
> +		break;
>
> +	default:
>
> +		return -1;
>
> +	}
>
> +
>
> +	if (RTE_ETH_DEV_SRIOV(dev).active) {
>
> +		if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB)
>
> +			msgbuf[IXGBE_VF_TX_QUEUES] = num_tcs;
>
> +		else
>
> +			msgbuf[IXGBE_VF_TX_QUEUES] = 1;
>
> +
>
> +		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB)
>
> +			msgbuf[IXGBE_VF_RX_QUEUES] = num_tcs;
>
> +		else
>
> +			msgbuf[IXGBE_VF_RX_QUEUES] = 1;
>
> +	}	else {
>
> +		/* only allow 1 Tx queue for bandwidth limiting */
>
> +		msgbuf[IXGBE_VF_TX_QUEUES] = 1;
>
> +		msgbuf[IXGBE_VF_RX_QUEUES] = 1;
>
> +	}
>
> +
>
> +	/* notify VF of need for VLAN tag stripping, and correct queue */
>
> +	if (num_tcs)
>
> +		msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
>
> +	else
>
> +		msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
>
> +
>
> +	/* notify VF of default queue */
>
> +	msgbuf[IXGBE_VF_DEF_QUEUE] = 0;
>
> +
>
> +	return 0;
>
> +}
>
> +
>
> +static int
>
>   ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
>
>   {
>
>   	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>
> @@ -512,6 +580,12 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
>
>   	case IXGBE_VF_SET_VLAN:
>
>   		retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
>
>   		break;
>
> +	case IXGBE_VF_API_NEGOTIATE:
>
> +		retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
>
> +		break;
>
> +	case IXGBE_VF_GET_QUEUES:
>
> +		retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
>
> +		break;
>
>   	default:
>
>   		PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
>
>   		retval = IXGBE_ERR_MBX;
>
> @@ -526,7 +600,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
>
>   
>
>   	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
>
>   
>
> -	ixgbe_write_mbx(hw, msgbuf, 1, vf);
>
> +	ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
>
>   
>
>   	return retval;
>
>   }
>
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
>
> index e10d6a2..49b44fe 100644
>
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
>
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
>
> @@ -3166,10 +3166,9 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
>
>   
>
>   	/* check support mq_mode for DCB */
>
>   	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
>
> -	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
>
> -		return;
>
> -
>
> -	if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
>
> +	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
>
> +	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_VMDQ_DCB) &&
>
> +	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_DCB))
>
>   		return;
>
>   
>
>   	/** Configure DCB hardware **/
>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
  2015-01-13 10:08 ` Vlad Zolotarov
@ 2015-01-14  0:51   ` Ouyang, Changchun
  2015-01-14  9:46     ` Wodkowski, PawelX
  0 siblings, 1 reply; 10+ messages in thread
From: Ouyang, Changchun @ 2015-01-14  0:51 UTC (permalink / raw)
  To: Vlad Zolotarov, Jastrzebski, MichalX K, dev



> -----Original Message-----
> From: Vlad Zolotarov [mailto:vladz@cloudius-systems.com]
> Sent: Tuesday, January 13, 2015 6:09 PM
> To: Jastrzebski, MichalX K; dev@dpdk.org
> Cc: Ouyang, Changchun
> Subject: Re: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
> 
> 
> On 01/12/15 16:43, Michal Jastrzebski wrote:
> > Date: Mon, 12 Jan 2015 15:39:40 +0100
> > Message-Id:
> > <1421073581-6644-2-git-send-email-michalx.k.jastrzebski@intel.com>
> > X-Mailer: git-send-email 2.1.1
> > In-Reply-To:
> > <1421073581-6644-1-git-send-email-michalx.k.jastrzebski@intel.com>
> > References:
> > <1421073581-6644-1-git-send-email-michalx.k.jastrzebski@intel.com>
> >
> > From: Pawel Wodkowski <pawelx.wodkowski@intel.com>
> >
> >
> > This patch add support for DCB in SRIOV mode. When no PFC
> >
> > is enabled this feature might be used as multiple queues
> >
> > (up to 8 or 4) for VF.
> >
> >
> >
> > It incorporate following modifications:
> >
> >   - Allow zero rx/tx queues to be passed to rte_eth_dev_configure().
> >
> >     Rationale:
> >
> >     in SRIOV mode PF use first free VF to RX/TX. If VF count
> >
> >     is 16 or 32 all recources are assigned to VFs so PF can
> >
> >     be used only for configuration.
> >
> >   - split nb_q_per_pool to nb_rx_q_per_pool and nb_tx_q_per_pool
> >
> >     Rationale:
> >
> >     rx and tx number of queue might be different if RX and TX are
> >
> >     configured in different mode. This allow to inform VF about
> >
> >     proper number of queues.
> 
> 
> Nice move! Ouyang, this is a nice answer to my recent remarks about your
> PATCH4 in "Enable VF RSS for Niantic" series.

After I respond your last comments, I see this,  :-), I am sure we both agree it is the right way to resolve it in vmdq dcb case.

> Michal, could u, pls., respin this series after fixing the formatting and (maybe)
> using "git send-email" for sending? ;)
> 
> thanks,
> vlad
> 
> 
> >
> >   - extern mailbox API for DCB mode
> >
> >
> >
> > Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
> >
> > ---
> >
> >   lib/librte_ether/rte_ethdev.c       |   84 +++++++++++++++++++++---------
> >
> >   lib/librte_ether/rte_ethdev.h       |    5 +-
> >
> >   lib/librte_pmd_e1000/igb_pf.c       |    3 +-
> >
> >   lib/librte_pmd_ixgbe/ixgbe_ethdev.c |   10 ++--
> >
> >   lib/librte_pmd_ixgbe/ixgbe_ethdev.h |    1 +
> >
> >   lib/librte_pmd_ixgbe/ixgbe_pf.c     |   98
> ++++++++++++++++++++++++++++++-----
> >
> >   lib/librte_pmd_ixgbe/ixgbe_rxtx.c   |    7 ++-
> >
> >   7 files changed, 159 insertions(+), 49 deletions(-)
> >
> >
> >
> > diff --git a/lib/librte_ether/rte_ethdev.c
> > b/lib/librte_ether/rte_ethdev.c
> >
> > index 95f2ceb..4c1a494 100644
> >
> > --- a/lib/librte_ether/rte_ethdev.c
> >
> > +++ b/lib/librte_ether/rte_ethdev.c
> >
> > @@ -333,7 +333,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev
> > *dev, uint16_t nb_queues)
> >
> >   		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
> >
> >   				sizeof(dev->data->rx_queues[0]) *
> nb_queues,
> >
> >   				RTE_CACHE_LINE_SIZE);
> >
> > -		if (dev->data->rx_queues == NULL) {
> >
> > +		if (dev->data->rx_queues == NULL && nb_queues > 0) {
> >
> >   			dev->data->nb_rx_queues = 0;
> >
> >   			return -(ENOMEM);
> >
> >   		}
> >
> > @@ -475,7 +475,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev
> > *dev, uint16_t nb_queues)
> >
> >   		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
> >
> >   				sizeof(dev->data->tx_queues[0]) *
> nb_queues,
> >
> >   				RTE_CACHE_LINE_SIZE);
> >
> > -		if (dev->data->tx_queues == NULL) {
> >
> > +		if (dev->data->tx_queues == NULL && nb_queues > 0) {
> >
> >   			dev->data->nb_tx_queues = 0;
> >
> >   			return -(ENOMEM);
> >
> >   		}
> >
> > @@ -507,6 +507,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
> > uint16_t nb_rx_q, uint16_t nb_tx_q,
> >
> >   		      const struct rte_eth_conf *dev_conf)
> >
> >   {
> >
> >   	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
> >
> > +	struct rte_eth_dev_info dev_info;
> >
> >
> >
> >   	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
> >
> >   		/* check multi-queue mode */
> >
> > @@ -524,11 +525,33 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
> > uint16_t nb_rx_q, uint16_t nb_tx_q,
> >
> >   			return (-EINVAL);
> >
> >   		}
> >
> >
> >
> > +		if ((dev_conf->rxmode.mq_mode ==
> ETH_MQ_RX_VMDQ_DCB) &&
> >
> > +			(dev_conf->txmode.mq_mode ==
> ETH_MQ_TX_VMDQ_DCB)) {
> >
> > +			enum rte_eth_nb_pools rx_pools =
> >
> > +						dev_conf-
> >rx_adv_conf.vmdq_dcb_conf.nb_queue_pools;
> >
> > +			enum rte_eth_nb_pools tx_pools =
> >
> > +						dev_conf-
> >tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools;
> >
> > +
> >
> > +			if (rx_pools != tx_pools) {
> >
> > +				/* Only equal number of pools is supported
> when
> >
> > +				 * DCB+VMDq in SRIOV */
> >
> > +				PMD_DEBUG_TRACE("ethdev port_id=%"
> PRIu8
> >
> > +						" SRIOV active, DCB+VMDQ
> mode, "
> >
> > +						"number of rx and tx pools is
> not eqaul\n",
> >
> > +						port_id);
> >
> > +				return (-EINVAL);
> >
> > +			}
> >
> > +		}
> >
> > +
> >
> > +		uint16_t nb_rx_q_per_pool =
> > +RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool;
> >
> > +		uint16_t nb_tx_q_per_pool =
> > +RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
> >
> > +
> >
> >   		switch (dev_conf->rxmode.mq_mode) {
> >
> > -		case ETH_MQ_RX_VMDQ_RSS:
> >
> >   		case ETH_MQ_RX_VMDQ_DCB:
> >
> > +			break;
> >
> > +		case ETH_MQ_RX_VMDQ_RSS:
> >
> >   		case ETH_MQ_RX_VMDQ_DCB_RSS:
> >
> > -			/* DCB/RSS VMDQ in SRIOV mode, not implement
> yet */
> >
> > +			/* RSS, DCB+RSS VMDQ in SRIOV mode, not
> implement yet */
> >
> >   			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
> >
> >   					" SRIOV active, "
> >
> >   					"unsupported VMDQ mq_mode
> rx %u\n",
> >
> > @@ -537,37 +560,32 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
> > uint16_t nb_rx_q, uint16_t nb_tx_q,
> >
> >   		default: /* ETH_MQ_RX_VMDQ_ONLY or
> ETH_MQ_RX_NONE */
> >
> >   			/* if nothing mq mode configure, use default scheme
> */
> >
> >   			dev->data->dev_conf.rxmode.mq_mode =
> ETH_MQ_RX_VMDQ_ONLY;
> >
> > -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
> >
> > -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
> 1;
> >
> > +			if (nb_rx_q_per_pool > 1)
> >
> > +				nb_rx_q_per_pool = 1;
> >
> >   			break;
> >
> >   		}
> >
> >
> >
> >   		switch (dev_conf->txmode.mq_mode) {
> >
> > -		case ETH_MQ_TX_VMDQ_DCB:
> >
> > -			/* DCB VMDQ in SRIOV mode, not implement yet */
> >
> > -			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
> >
> > -					" SRIOV active, "
> >
> > -					"unsupported VMDQ mq_mode
> tx %u\n",
> >
> > -					port_id, dev_conf-
> >txmode.mq_mode);
> >
> > -			return (-EINVAL);
> >
> > +		case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV
> mode*/
> >
> > +			break;
> >
> >   		default: /* ETH_MQ_TX_VMDQ_ONLY or
> ETH_MQ_TX_NONE */
> >
> >   			/* if nothing mq mode configure, use default scheme
> */
> >
> >   			dev->data->dev_conf.txmode.mq_mode =
> ETH_MQ_TX_VMDQ_ONLY;
> >
> > -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
> >
> > -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
> 1;
> >
> > +			if (nb_tx_q_per_pool > 1)
> >
> > +				nb_tx_q_per_pool = 1;
> >
> >   			break;
> >
> >   		}
> >
> >
> >
> >   		/* check valid queue number */
> >
> > -		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
> >
> > -		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
> >
> > +		if (nb_rx_q > nb_rx_q_per_pool || nb_tx_q >
> nb_tx_q_per_pool) {
> >
> >   			PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV
> active, "
> >
> > -				    "queue number must less equal to %d\n",
> >
> > -					port_id,
> RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
> >
> > +				    "rx/tx queue number must less equal
> to %d/%d\n",
> >
> > +					port_id,
> RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool,
> >
> > +
> 	RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool);
> >
> >   			return (-EINVAL);
> >
> >   		}
> >
> >   	} else {
> >
> > -		/* For vmdb+dcb mode check our configuration before we
> go further */
> >
> > +		/* For vmdq+dcb mode check our configuration before we
> go further
> > +*/
> >
> >   		if (dev_conf->rxmode.mq_mode ==
> ETH_MQ_RX_VMDQ_DCB) {
> >
> >   			const struct rte_eth_vmdq_dcb_conf *conf;
> >
> >
> >
> > @@ -606,11 +624,20 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
> > uint16_t nb_rx_q, uint16_t nb_tx_q,
> >
> >   			}
> >
> >   		}
> >
> >
> >
> > +		/* For DCB we need to obtain maximum number of queues
> dinamically,
> >
> > +		 * as this depends on max VF exported in PF */
> >
> > +		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
> >
> > +			(dev_conf->txmode.mq_mode ==
> ETH_MQ_TX_DCB)) {
> >
> > +
> >
> > +				FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >dev_infos_get, -ENOTSUP);
> >
> > +				(*dev->dev_ops->dev_infos_get)(dev,
> &dev_info);
> >
> > +		}
> >
> > +
> >
> >   		/* For DCB mode check our configuration before we go
> further */
> >
> >   		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
> >
> >   			const struct rte_eth_dcb_rx_conf *conf;
> >
> >
> >
> > -			if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
> >
> > +			if (nb_rx_q != dev_info.max_rx_queues) {
> >
> >   				PMD_DEBUG_TRACE("ethdev port_id=%d
> DCB, nb_rx_q "
> >
> >   						"!= %d\n",
> >
> >   						port_id,
> ETH_DCB_NUM_QUEUES);
> >
> > @@ -630,7 +657,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id,
> > uint16_t nb_rx_q, uint16_t nb_tx_q,
> >
> >   		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
> >
> >   			const struct rte_eth_dcb_tx_conf *conf;
> >
> >
> >
> > -			if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
> >
> > +			if (nb_tx_q != dev_info.max_tx_queues) {
> >
> >   				PMD_DEBUG_TRACE("ethdev port_id=%d
> DCB, nb_tx_q "
> >
> >   						"!= %d\n",
> >
> >   						port_id,
> ETH_DCB_NUM_QUEUES);
> >
> > @@ -690,7 +717,10 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t
> > nb_rx_q, uint16_t nb_tx_q,
> >
> >   	}
> >
> >   	if (nb_rx_q == 0) {
> >
> >   		PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n",
> port_id);
> >
> > -		return (-EINVAL);
> >
> > +		/* In SRIOV there can be no free resource for PF. So permit
> use
> > +only
> >
> > +		 * for configuration. */
> >
> > +		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
> >
> > +			return (-EINVAL);
> >
> >   	}
> >
> >
> >
> >   	if (nb_tx_q > dev_info.max_tx_queues) {
> >
> > @@ -698,9 +728,13 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t
> > nb_rx_q, uint16_t nb_tx_q,
> >
> >   				port_id, nb_tx_q, dev_info.max_tx_queues);
> >
> >   		return (-EINVAL);
> >
> >   	}
> >
> > +
> >
> >   	if (nb_tx_q == 0) {
> >
> >   		PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n",
> port_id);
> >
> > -		return (-EINVAL);
> >
> > +		/* In SRIOV there can be no free resource for PF. So permit
> use
> > +only
> >
> > +		 * for configuration. */
> >
> > +		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
> >
> > +			return (-EINVAL);
> >
> >   	}
> >
> >
> >
> >   	/* Copy the dev_conf parameter into the dev structure */
> >
> > @@ -750,7 +784,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t
> > nb_rx_q, uint16_t nb_tx_q,
> >
> >   							ETHER_MAX_LEN;
> >
> >   	}
> >
> >
> >
> > -	/* multipe queue mode checking */
> >
> > +	/* multiple queue mode checking */
> >
> >   	diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q,
> > dev_conf);
> >
> >   	if (diag != 0) {
> >
> >   		PMD_DEBUG_TRACE("port%d
> rte_eth_dev_check_mq_mode = %d\n",
> >
> > diff --git a/lib/librte_ether/rte_ethdev.h
> > b/lib/librte_ether/rte_ethdev.h
> >
> > index ce0528f..04fda83 100644
> >
> > --- a/lib/librte_ether/rte_ethdev.h
> >
> > +++ b/lib/librte_ether/rte_ethdev.h
> >
> > @@ -299,7 +299,7 @@ enum rte_eth_rx_mq_mode {
> >
> >   enum rte_eth_tx_mq_mode {
> >
> >   	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
> >
> >   	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
> >
> > -	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is
> on. */
> >
> > +	ETH_MQ_TX_VMDQ_DCB,     /**< For TX side,both DCB and VT is on.
> */
> >
> >   	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
> >
> >   };
> >
> >
> >
> > @@ -1569,7 +1569,8 @@ struct rte_eth_dev {
> >
> >
> >
> >   struct rte_eth_dev_sriov {
> >
> >   	uint8_t active;               /**< SRIOV is active with 16, 32 or 64 pools */
> >
> > -	uint8_t nb_q_per_pool;        /**< rx queue number per pool */
> >
> > +	uint8_t nb_rx_q_per_pool;        /**< rx queue number per pool */
> >
> > +	uint8_t nb_tx_q_per_pool;        /**< tx queue number per pool */
> 
> >
> >   	uint16_t def_vmdq_idx;        /**< Default pool num used for PF */
> >
> >   	uint16_t def_pool_q_idx;      /**< Default pool queue start reg index
> */
> >
> >   };
> >
> > diff --git a/lib/librte_pmd_e1000/igb_pf.c
> > b/lib/librte_pmd_e1000/igb_pf.c
> >
> > index bc3816a..9d2f858 100644
> >
> > --- a/lib/librte_pmd_e1000/igb_pf.c
> >
> > +++ b/lib/librte_pmd_e1000/igb_pf.c
> >
> > @@ -115,7 +115,8 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
> >
> >   		rte_panic("Cannot allocate memory for private VF data\n");
> >
> >
> >
> >   	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
> >
> > -	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
> >
> > +	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
> >
> > +	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
> >
> >   	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
> >
> >   	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx =
> (uint16_t)(vf_num *
> > nb_queue);
> >
> >
> >
> > diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> > b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> >
> > index 3fc3738..347f03c 100644
> >
> > --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> >
> > +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> >
> > @@ -3555,14 +3555,14 @@ static int ixgbe_set_vf_rate_limit(struct
> > rte_eth_dev *dev, uint16_t vf,
> >
> >   	struct ixgbe_hw *hw =
> > IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> >
> >   	struct ixgbe_vf_info *vfinfo =
> >
> >   		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data-
> >dev_private));
> >
> > -	uint8_t  nb_q_per_pool =
> RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
> >
> > +	uint8_t  nb_tx_q_per_pool =
> RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
> >
> >   	uint32_t queue_stride =
> >
> >   		IXGBE_MAX_RX_QUEUE_NUM /
> RTE_ETH_DEV_SRIOV(dev).active;
> >
> >   	uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
> >
> > -	uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
> >
> > +	uint32_t tx_queue_end = queue_idx + nb_tx_q_per_pool - 1;
> >
> >   	uint16_t total_rate = 0;
> >
> >
> >
> > -	if (queue_end >= hw->mac.max_tx_queues)
> >
> > +	if (tx_queue_end >= hw->mac.max_tx_queues)
> >
> >   		return -EINVAL;
> >
> >
> >
> >   	if (vfinfo != NULL) {
> >
> > @@ -3577,7 +3577,7 @@ static int ixgbe_set_vf_rate_limit(struct
> > rte_eth_dev *dev, uint16_t vf,
> >
> >   		return -EINVAL;
> >
> >
> >
> >   	/* Store tx_rate for this vf. */
> >
> > -	for (idx = 0; idx < nb_q_per_pool; idx++) {
> >
> > +	for (idx = 0; idx < nb_tx_q_per_pool; idx++) {
> >
> >   		if (((uint64_t)0x1 << idx) & q_msk) {
> >
> >   			if (vfinfo[vf].tx_rate[idx] != tx_rate)
> >
> >   				vfinfo[vf].tx_rate[idx] = tx_rate;
> >
> > @@ -3595,7 +3595,7 @@ static int ixgbe_set_vf_rate_limit(struct
> > rte_eth_dev *dev, uint16_t vf,
> >
> >   	}
> >
> >
> >
> >   	/* Set RTTBCNRC of each queue/pool for vf X  */
> >
> > -	for (; queue_idx <= queue_end; queue_idx++) {
> >
> > +	for (; queue_idx <= tx_queue_end; queue_idx++) {
> >
> >   		if (0x1 & q_msk)
> >
> >   			ixgbe_set_queue_rate_limit(dev, queue_idx,
> tx_rate);
> >
> >   		q_msk = q_msk >> 1;
> >
> > diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> > b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> >
> > index ca99170..ebf16e9 100644
> >
> > --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> >
> > +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> >
> > @@ -159,6 +159,7 @@ struct ixgbe_vf_info {
> >
> >   	uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
> >
> >   	uint16_t vlan_count;
> >
> >   	uint8_t spoofchk_enabled;
> >
> > +	unsigned int vf_api;
> >
> >   };
> >
> >
> >
> >   /*
> >
> > diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c
> > b/lib/librte_pmd_ixgbe/ixgbe_pf.c
> >
> > index 51da1fd..4d30bcf 100644
> >
> > --- a/lib/librte_pmd_ixgbe/ixgbe_pf.c
> >
> > +++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c
> >
> > @@ -127,7 +127,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev
> > *eth_dev)
> >
> >   		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
> >
> >   	}
> >
> >
> >
> > -	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
> >
> > +	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
> >
> > +	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
> >
> >   	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
> >
> >   	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx =
> (uint16_t)(vf_num *
> > nb_queue);
> >
> >
> >
> > @@ -189,7 +190,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev
> > *eth_dev)
> >
> >   	hw->mac.ops.set_vmdq(hw, 0,
> > RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
> >
> >
> >
> >   	/*
> >
> > -	 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
> >
> > +	 * SW must set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
> >
> >   	 */
> >
> >   	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
> >
> >   	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
> >
> > @@ -214,19 +215,19 @@ int ixgbe_pf_host_configure(struct rte_eth_dev
> > *eth_dev)
> >
> >   	}
> >
> >
> >
> >   	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
> >
> > -        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
> >
> > +	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
> >
> >
> >
> > -        /*
> >
> > +	/*
> >
> >   	 * enable vlan filtering and allow all vlan tags through
> >
> >   	 */
> >
> > -        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
> >
> > -        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
> >
> > -        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
> >
> > +	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
> >
> > +	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
> >
> > +	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
> >
> >
> >
> > -        /* VFTA - enable all vlan filters */
> >
> > -        for (i = 0; i < IXGBE_MAX_VFTA; i++) {
> >
> > -                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
> >
> > -        }
> >
> > +	/* VFTA - enable all vlan filters */
> >
> > +	for (i = 0; i < IXGBE_MAX_VFTA; i++) {
> >
> > +		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
> >
> > +	}
> >
> >
> >
> >   	/* Enable MAC Anti-Spoofing */
> >
> >   	hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
> >
> > @@ -369,6 +370,73 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t
> > vf, uint32_t *msgbuf)
> >
> >   }
> >
> >
> >
> >   static int
> >
> > +ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t
> > +*msgbuf)
> >
> > +{
> >
> > +	struct ixgbe_vf_info *vfinfo =
> >
> > +		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data-
> >dev_private));
> >
> > +	int api = msgbuf[1];
> >
> > +
> >
> > +	switch (api) {
> >
> > +	case ixgbe_mbox_api_10:
> >
> > +	case ixgbe_mbox_api_11:
> >
> > +		vfinfo[vf].vf_api = api;
> >
> > +		return 0;
> >
> > +	default:
> >
> > +		break;
> >
> > +	}
> >
> > +
> >
> > +	RTE_LOG(DEBUG, PMD, "VF %d requested invalid api version %u\n",
> vf,
> > +api);
> >
> > +	return -1;
> >
> > +}
> >
> > +
> >
> > +static int
> >
> > +ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t
> > +*msgbuf)
> >
> > +{
> >
> > +	struct ixgbe_vf_info *vfinfo =
> >
> > +		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data-
> >dev_private));
> >
> > +	struct ixgbe_dcb_config *dcb_cfg =
> >
> > +			IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data-
> >dev_private);
> >
> > +
> >
> > +	uint8_t num_tcs = dcb_cfg->num_tcs.pg_tcs;
> >
> > +
> >
> > +	/* verify the PF is supporting the correct APIs */
> >
> > +	switch (vfinfo[vf].vf_api) {
> >
> > +	case ixgbe_mbox_api_10:
> >
> > +	case ixgbe_mbox_api_11:
> >
> > +		break;
> >
> > +	default:
> >
> > +		return -1;
> >
> > +	}
> >
> > +
> >
> > +	if (RTE_ETH_DEV_SRIOV(dev).active) {
> >
> > +		if (dev->data->dev_conf.rxmode.mq_mode ==
> ETH_MQ_RX_VMDQ_DCB)
> >
> > +			msgbuf[IXGBE_VF_TX_QUEUES] = num_tcs;
> >
> > +		else
> >
> > +			msgbuf[IXGBE_VF_TX_QUEUES] = 1;
> >
> > +
> >
> > +		if (dev->data->dev_conf.txmode.mq_mode ==
> ETH_MQ_TX_VMDQ_DCB)
> >
> > +			msgbuf[IXGBE_VF_RX_QUEUES] = num_tcs;
> >
> > +		else
> >
> > +			msgbuf[IXGBE_VF_RX_QUEUES] = 1;
> >
> > +	}	else {
> >
> > +		/* only allow 1 Tx queue for bandwidth limiting */
> >
> > +		msgbuf[IXGBE_VF_TX_QUEUES] = 1;
> >
> > +		msgbuf[IXGBE_VF_RX_QUEUES] = 1;
> >
> > +	}
> >
> > +
> >
> > +	/* notify VF of need for VLAN tag stripping, and correct queue */
> >
> > +	if (num_tcs)
> >
> > +		msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
> >
> > +	else
> >
> > +		msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
> >
> > +
> >
> > +	/* notify VF of default queue */
> >
> > +	msgbuf[IXGBE_VF_DEF_QUEUE] = 0;
> >
> > +
> >
> > +	return 0;
> >
> > +}
> >
> > +
> >
> > +static int
> >
> >   ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t
> > *msgbuf)
> >
> >   {
> >
> >   	struct ixgbe_hw *hw =
> > IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> >
> > @@ -512,6 +580,12 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev,
> > uint16_t vf)
> >
> >   	case IXGBE_VF_SET_VLAN:
> >
> >   		retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
> >
> >   		break;
> >
> > +	case IXGBE_VF_API_NEGOTIATE:
> >
> > +		retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
> >
> > +		break;
> >
> > +	case IXGBE_VF_GET_QUEUES:
> >
> > +		retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
> >
> > +		break;
> >
> >   	default:
> >
> >   		PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x",
> (unsigned)msgbuf[0]);
> >
> >   		retval = IXGBE_ERR_MBX;
> >
> > @@ -526,7 +600,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev,
> > uint16_t vf)
> >
> >
> >
> >   	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
> >
> >
> >
> > -	ixgbe_write_mbx(hw, msgbuf, 1, vf);
> >
> > +	ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
> >
> >
> >
> >   	return retval;
> >
> >   }
> >
> > diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> > b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> >
> > index e10d6a2..49b44fe 100644
> >
> > --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> >
> > +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> >
> > @@ -3166,10 +3166,9 @@ void ixgbe_configure_dcb(struct rte_eth_dev
> > *dev)
> >
> >
> >
> >   	/* check support mq_mode for DCB */
> >
> >   	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
> >
> > -	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
> >
> > -		return;
> >
> > -
> >
> > -	if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
> >
> > +	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
> >
> > +	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_VMDQ_DCB) &&
> >
> > +	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_DCB))
> >
> >   		return;
> >
> >
> >
> >   	/** Configure DCB hardware **/
> >

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
  2015-01-14  0:51   ` Ouyang, Changchun
@ 2015-01-14  9:46     ` Wodkowski, PawelX
  0 siblings, 0 replies; 10+ messages in thread
From: Wodkowski, PawelX @ 2015-01-14  9:46 UTC (permalink / raw)
  To: Ouyang, Changchun, Vlad Zolotarov, Jastrzebski, MichalX K, dev

> > >
> > >   - split nb_q_per_pool to nb_rx_q_per_pool and nb_tx_q_per_pool
> > >
> > >     Rationale:
> > >
> > >     rx and tx number of queue might be different if RX and TX are
> > >
> > >     configured in different mode. This allow to inform VF about
> > >
> > >     proper number of queues.
> >
> >
> > Nice move! Ouyang, this is a nice answer to my recent remarks about your
> > PATCH4 in "Enable VF RSS for Niantic" series.
> 
> After I respond your last comments, I see this,  :-), I am sure we both agree it is
> the right way to resolve it in vmdq dcb case.
> 

I am now dividing this patch with your suggestions and I am little confused.

In this (DCB in SRIOV) case the primary cause for spliting nb_q_per_pool into
nb_rx_q_per_pool and nb_tx_q_per_pool was because of this code:

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index af9e261..be3afe4 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -537,8 +537,8 @@
 		default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
 			/* if nothing mq mode configure, use default scheme */
 			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
-			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
-				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+			if (RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool > 1)
+				RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool = 1;
 			break;
 		}
 
@@ -553,17 +553,18 @@
 		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
 			/* if nothing mq mode configure, use default scheme */
 			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
-			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
-				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+			if (RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool > 1)
+				RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool = 1;
 			break;
 		}
 
 		/* check valid queue number */
-		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
-		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool) ||
+		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool)) {
 			PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
-				    "queue number must less equal to %d\n",
-					port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+				    "rx/tx queue number must less equal to %d/%d\n",
+					port_id, RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool,
+					RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool);
 			return (-EINVAL);
 		}
 	} else {
--

This introduced an issue when RX and TX was configure in different way. The problem was
that the RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool as common for RX and TX and it is
changed. So I did the above. But when testpmd was adjusted for DCB in SRIOV there 
was another issue. Testpmd is pre-configuring ports by default and since
nb_rx_q_per_pool  and nb_tx_q_per_pool was already reset to 1 there was no way to 
use it for DCB in SRIOV. So I did another modification:

> +		uint16_t nb_rx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool;
> +		uint16_t nb_tx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
> +
>   		switch (dev_conf->rxmode.mq_mode) {
> -		case ETH_MQ_RX_VMDQ_RSS:
>   		case ETH_MQ_RX_VMDQ_DCB:
> +			break;
> +		case ETH_MQ_RX_VMDQ_RSS:
>   		case ETH_MQ_RX_VMDQ_DCB_RSS:
> -			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
> +			/* RSS, DCB+RSS VMDQ in SRIOV mode, not implement yet */
>   			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
>   					" SRIOV active, "
>   					"unsupported VMDQ mq_mode rx %u\n",
> @@ -537,37 +560,32 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>   		default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
>   			/* if nothing mq mode configure, use default scheme */
>   			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
> -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
> -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
> +			if (nb_rx_q_per_pool > 1)
> +				nb_rx_q_per_pool = 1;
>   			break;
>   		}
>   
>   		switch (dev_conf->txmode.mq_mode) {
> -		case ETH_MQ_TX_VMDQ_DCB:
> -			/* DCB VMDQ in SRIOV mode, not implement yet */
> -			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
> -					" SRIOV active, "
> -					"unsupported VMDQ mq_mode tx %u\n",
> -					port_id, dev_conf->txmode.mq_mode);
> -			return (-EINVAL);
> +		case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV mode*/
> +			break;
>   		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
>   			/* if nothing mq mode configure, use default scheme */
>   			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
> -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
> -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
> +			if (nb_tx_q_per_pool > 1)
> +				nb_tx_q_per_pool = 1;
>   			break;
>   		}
>   
>   		/* check valid queue number */
> -		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
> -		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
> +		if (nb_rx_q > nb_rx_q_per_pool || nb_tx_q > nb_tx_q_per_pool) {
>   			PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
> -				    "queue number must less equal to %d\n",
> -					port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
> +				    "rx/tx queue number must less equal to %d/%d\n",
> +					port_id, RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool,
> +					RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool);
>   			return (-EINVAL);
>   		}

For this point I think that splitting RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool might be not
needed. From my point  of view (DCB), since nb_q_per_pool is untouched, I think I can stay with:

> +		uint16_t nb_rx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
> +		uint16_t nb_tx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
> +

What do you think? I noticed that you was discussing some issue about nb_q_per_pool in face
of RSS functionality. Can you spoke about my doubts in face of that RSS?

Pawel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
  2015-01-13 10:14   ` Vlad Zolotarov
  2015-01-13 11:00     ` Wodkowski, PawelX
@ 2015-01-14  1:00     ` Ouyang, Changchun
  1 sibling, 0 replies; 10+ messages in thread
From: Ouyang, Changchun @ 2015-01-14  1:00 UTC (permalink / raw)
  To: Vlad Zolotarov, Jastrzebski, MichalX K, dev



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Vlad Zolotarov
> Sent: Tuesday, January 13, 2015 6:14 PM
> To: Jastrzebski, MichalX K; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
> 
> 
> On 01/12/15 17:50, Michal Jastrzebski wrote:
> > From: Pawel Wodkowski <pawelx.wodkowski@intel.com>
> >
> > This patch add support for DCB in SRIOV mode. When no PFC is enabled
> > this feature might be used as multiple queues (up to 8 or 4) for VF.
> >
> > It incorporate following modifications:
> >   - Allow zero rx/tx queues to be passed to rte_eth_dev_configure().
> >     Rationale:
> >     in SRIOV mode PF use first free VF to RX/TX. If VF count
> >     is 16 or 32 all recources are assigned to VFs so PF can
> >     be used only for configuration.
> >   - split nb_q_per_pool to nb_rx_q_per_pool and nb_tx_q_per_pool
> >     Rationale:
> >     rx and tx number of queue might be different if RX and TX are
> >     configured in different mode. This allow to inform VF about
> >     proper number of queues.
> >   - extern mailbox API for DCB mode
> 
> IMHO each bullet above is worth a separate patch. ;) It would be much easier
> to review.
> 
> thanks,
> vlad
> 
Agree with Vlad

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
  2015-01-13 10:14   ` Vlad Zolotarov
@ 2015-01-13 11:00     ` Wodkowski, PawelX
  2015-01-14  1:00     ` Ouyang, Changchun
  1 sibling, 0 replies; 10+ messages in thread
From: Wodkowski, PawelX @ 2015-01-13 11:00 UTC (permalink / raw)
  To: Vlad Zolotarov, Jastrzebski, MichalX K, dev

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Vlad Zolotarov
> Sent: Tuesday, January 13, 2015 11:14 AM
> To: Jastrzebski, MichalX K; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
> 
> 
> On 01/12/15 17:50, Michal Jastrzebski wrote:
> > From: Pawel Wodkowski <pawelx.wodkowski@intel.com>
> >
> > This patch add support for DCB in SRIOV mode. When no PFC
> > is enabled this feature might be used as multiple queues
> > (up to 8 or 4) for VF.
> >
> > It incorporate following modifications:
> >   - Allow zero rx/tx queues to be passed to rte_eth_dev_configure().
> >     Rationale:
> >     in SRIOV mode PF use first free VF to RX/TX. If VF count
> >     is 16 or 32 all recources are assigned to VFs so PF can
> >     be used only for configuration.
> >   - split nb_q_per_pool to nb_rx_q_per_pool and nb_tx_q_per_pool
> >     Rationale:
> >     rx and tx number of queue might be different if RX and TX are
> >     configured in different mode. This allow to inform VF about
> >     proper number of queues.
> >   - extern mailbox API for DCB mode
> 
> IMHO each bullet above is worth a separate patch. ;)
> It would be much easier to review.
> 

Good point. I will send next version shortly.

Pawel

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
  2015-01-12 15:50 ` [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe Michal Jastrzebski
@ 2015-01-13 10:14   ` Vlad Zolotarov
  2015-01-13 11:00     ` Wodkowski, PawelX
  2015-01-14  1:00     ` Ouyang, Changchun
  0 siblings, 2 replies; 10+ messages in thread
From: Vlad Zolotarov @ 2015-01-13 10:14 UTC (permalink / raw)
  To: Michal Jastrzebski, dev


On 01/12/15 17:50, Michal Jastrzebski wrote:
> From: Pawel Wodkowski <pawelx.wodkowski@intel.com>
>
> This patch add support for DCB in SRIOV mode. When no PFC
> is enabled this feature might be used as multiple queues
> (up to 8 or 4) for VF.
>
> It incorporate following modifications:
>   - Allow zero rx/tx queues to be passed to rte_eth_dev_configure().
>     Rationale:
>     in SRIOV mode PF use first free VF to RX/TX. If VF count
>     is 16 or 32 all recources are assigned to VFs so PF can
>     be used only for configuration.
>   - split nb_q_per_pool to nb_rx_q_per_pool and nb_tx_q_per_pool
>     Rationale:
>     rx and tx number of queue might be different if RX and TX are
>     configured in different mode. This allow to inform VF about
>     proper number of queues.
>   - extern mailbox API for DCB mode

IMHO each bullet above is worth a separate patch. ;)
It would be much easier to review.

thanks,
vlad

>
> Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
> ---
>   lib/librte_ether/rte_ethdev.c       |   84 +++++++++++++++++++++---------
>   lib/librte_ether/rte_ethdev.h       |    5 +-
>   lib/librte_pmd_e1000/igb_pf.c       |    3 +-
>   lib/librte_pmd_ixgbe/ixgbe_ethdev.c |   10 ++--
>   lib/librte_pmd_ixgbe/ixgbe_ethdev.h |    1 +
>   lib/librte_pmd_ixgbe/ixgbe_pf.c     |   98 ++++++++++++++++++++++++++++++-----
>   lib/librte_pmd_ixgbe/ixgbe_rxtx.c   |    7 ++-
>   7 files changed, 159 insertions(+), 49 deletions(-)
>
> diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
> index 95f2ceb..4c1a494 100644
> --- a/lib/librte_ether/rte_ethdev.c
> +++ b/lib/librte_ether/rte_ethdev.c
> @@ -333,7 +333,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
>   		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
>   				sizeof(dev->data->rx_queues[0]) * nb_queues,
>   				RTE_CACHE_LINE_SIZE);
> -		if (dev->data->rx_queues == NULL) {
> +		if (dev->data->rx_queues == NULL && nb_queues > 0) {
>   			dev->data->nb_rx_queues = 0;
>   			return -(ENOMEM);
>   		}
> @@ -475,7 +475,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
>   		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
>   				sizeof(dev->data->tx_queues[0]) * nb_queues,
>   				RTE_CACHE_LINE_SIZE);
> -		if (dev->data->tx_queues == NULL) {
> +		if (dev->data->tx_queues == NULL && nb_queues > 0) {
>   			dev->data->nb_tx_queues = 0;
>   			return -(ENOMEM);
>   		}
> @@ -507,6 +507,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>   		      const struct rte_eth_conf *dev_conf)
>   {
>   	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
> +	struct rte_eth_dev_info dev_info;
>   
>   	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
>   		/* check multi-queue mode */
> @@ -524,11 +525,33 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>   			return (-EINVAL);
>   		}
>   
> +		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) &&
> +			(dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB)) {
> +			enum rte_eth_nb_pools rx_pools =
> +						dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools;
> +			enum rte_eth_nb_pools tx_pools =
> +						dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools;
> +
> +			if (rx_pools != tx_pools) {
> +				/* Only equal number of pools is supported when
> +				 * DCB+VMDq in SRIOV */
> +				PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
> +						" SRIOV active, DCB+VMDQ mode, "
> +						"number of rx and tx pools is not eqaul\n",
> +						port_id);
> +				return (-EINVAL);
> +			}
> +		}
> +
> +		uint16_t nb_rx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool;
> +		uint16_t nb_tx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
> +
>   		switch (dev_conf->rxmode.mq_mode) {
> -		case ETH_MQ_RX_VMDQ_RSS:
>   		case ETH_MQ_RX_VMDQ_DCB:
> +			break;
> +		case ETH_MQ_RX_VMDQ_RSS:
>   		case ETH_MQ_RX_VMDQ_DCB_RSS:
> -			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
> +			/* RSS, DCB+RSS VMDQ in SRIOV mode, not implement yet */
>   			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
>   					" SRIOV active, "
>   					"unsupported VMDQ mq_mode rx %u\n",
> @@ -537,37 +560,32 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>   		default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
>   			/* if nothing mq mode configure, use default scheme */
>   			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
> -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
> -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
> +			if (nb_rx_q_per_pool > 1)
> +				nb_rx_q_per_pool = 1;
>   			break;
>   		}
>   
>   		switch (dev_conf->txmode.mq_mode) {
> -		case ETH_MQ_TX_VMDQ_DCB:
> -			/* DCB VMDQ in SRIOV mode, not implement yet */
> -			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
> -					" SRIOV active, "
> -					"unsupported VMDQ mq_mode tx %u\n",
> -					port_id, dev_conf->txmode.mq_mode);
> -			return (-EINVAL);
> +		case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV mode*/
> +			break;
>   		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
>   			/* if nothing mq mode configure, use default scheme */
>   			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
> -			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
> -				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
> +			if (nb_tx_q_per_pool > 1)
> +				nb_tx_q_per_pool = 1;
>   			break;
>   		}
>   
>   		/* check valid queue number */
> -		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
> -		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
> +		if (nb_rx_q > nb_rx_q_per_pool || nb_tx_q > nb_tx_q_per_pool) {
>   			PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
> -				    "queue number must less equal to %d\n",
> -					port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
> +				    "rx/tx queue number must less equal to %d/%d\n",
> +					port_id, RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool,
> +					RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool);
>   			return (-EINVAL);
>   		}
>   	} else {
> -		/* For vmdb+dcb mode check our configuration before we go further */
> +		/* For vmdq+dcb mode check our configuration before we go further */
>   		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
>   			const struct rte_eth_vmdq_dcb_conf *conf;
>   
> @@ -606,11 +624,20 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>   			}
>   		}
>   
> +		/* For DCB we need to obtain maximum number of queues dinamically,
> +		 * as this depends on max VF exported in PF */
> +		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
> +			(dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
> +
> +				FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
> +				(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
> +		}
> +
>   		/* For DCB mode check our configuration before we go further */
>   		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
>   			const struct rte_eth_dcb_rx_conf *conf;
>   
> -			if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
> +			if (nb_rx_q != dev_info.max_rx_queues) {
>   				PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
>   						"!= %d\n",
>   						port_id, ETH_DCB_NUM_QUEUES);
> @@ -630,7 +657,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>   		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
>   			const struct rte_eth_dcb_tx_conf *conf;
>   
> -			if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
> +			if (nb_tx_q != dev_info.max_tx_queues) {
>   				PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
>   						"!= %d\n",
>   						port_id, ETH_DCB_NUM_QUEUES);
> @@ -690,7 +717,10 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>   	}
>   	if (nb_rx_q == 0) {
>   		PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
> -		return (-EINVAL);
> +		/* In SRIOV there can be no free resource for PF. So permit use only
> +		 * for configuration. */
> +		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
> +			return (-EINVAL);
>   	}
>   
>   	if (nb_tx_q > dev_info.max_tx_queues) {
> @@ -698,9 +728,13 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>   				port_id, nb_tx_q, dev_info.max_tx_queues);
>   		return (-EINVAL);
>   	}
> +
>   	if (nb_tx_q == 0) {
>   		PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
> -		return (-EINVAL);
> +		/* In SRIOV there can be no free resource for PF. So permit use only
> +		 * for configuration. */
> +		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
> +			return (-EINVAL);
>   	}
>   
>   	/* Copy the dev_conf parameter into the dev structure */
> @@ -750,7 +784,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
>   							ETHER_MAX_LEN;
>   	}
>   
> -	/* multipe queue mode checking */
> +	/* multiple queue mode checking */
>   	diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
>   	if (diag != 0) {
>   		PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
> diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
> index ce0528f..04fda83 100644
> --- a/lib/librte_ether/rte_ethdev.h
> +++ b/lib/librte_ether/rte_ethdev.h
> @@ -299,7 +299,7 @@ enum rte_eth_rx_mq_mode {
>   enum rte_eth_tx_mq_mode {
>   	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
>   	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
> -	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
> +	ETH_MQ_TX_VMDQ_DCB,     /**< For TX side,both DCB and VT is on. */
>   	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
>   };
>   
> @@ -1569,7 +1569,8 @@ struct rte_eth_dev {
>   
>   struct rte_eth_dev_sriov {
>   	uint8_t active;               /**< SRIOV is active with 16, 32 or 64 pools */
> -	uint8_t nb_q_per_pool;        /**< rx queue number per pool */
> +	uint8_t nb_rx_q_per_pool;        /**< rx queue number per pool */
> +	uint8_t nb_tx_q_per_pool;        /**< tx queue number per pool */
>   	uint16_t def_vmdq_idx;        /**< Default pool num used for PF */
>   	uint16_t def_pool_q_idx;      /**< Default pool queue start reg index */
>   };
> diff --git a/lib/librte_pmd_e1000/igb_pf.c b/lib/librte_pmd_e1000/igb_pf.c
> index bc3816a..9d2f858 100644
> --- a/lib/librte_pmd_e1000/igb_pf.c
> +++ b/lib/librte_pmd_e1000/igb_pf.c
> @@ -115,7 +115,8 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
>   		rte_panic("Cannot allocate memory for private VF data\n");
>   
>   	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
> -	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
>   	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
>   	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
>   
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> index 3fc3738..347f03c 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
> @@ -3555,14 +3555,14 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
>   	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>   	struct ixgbe_vf_info *vfinfo =
>   		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
> -	uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
> +	uint8_t  nb_tx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
>   	uint32_t queue_stride =
>   		IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
>   	uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
> -	uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
> +	uint32_t tx_queue_end = queue_idx + nb_tx_q_per_pool - 1;
>   	uint16_t total_rate = 0;
>   
> -	if (queue_end >= hw->mac.max_tx_queues)
> +	if (tx_queue_end >= hw->mac.max_tx_queues)
>   		return -EINVAL;
>   
>   	if (vfinfo != NULL) {
> @@ -3577,7 +3577,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
>   		return -EINVAL;
>   
>   	/* Store tx_rate for this vf. */
> -	for (idx = 0; idx < nb_q_per_pool; idx++) {
> +	for (idx = 0; idx < nb_tx_q_per_pool; idx++) {
>   		if (((uint64_t)0x1 << idx) & q_msk) {
>   			if (vfinfo[vf].tx_rate[idx] != tx_rate)
>   				vfinfo[vf].tx_rate[idx] = tx_rate;
> @@ -3595,7 +3595,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
>   	}
>   
>   	/* Set RTTBCNRC of each queue/pool for vf X  */
> -	for (; queue_idx <= queue_end; queue_idx++) {
> +	for (; queue_idx <= tx_queue_end; queue_idx++) {
>   		if (0x1 & q_msk)
>   			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
>   		q_msk = q_msk >> 1;
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> index ca99170..ebf16e9 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
> @@ -159,6 +159,7 @@ struct ixgbe_vf_info {
>   	uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
>   	uint16_t vlan_count;
>   	uint8_t spoofchk_enabled;
> +	unsigned int vf_api;
>   };
>   
>   /*
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c
> index 51da1fd..4d30bcf 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_pf.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c
> @@ -127,7 +127,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
>   		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
>   	}
>   
> -	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
> +	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
>   	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
>   	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
>   
> @@ -189,7 +190,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
>   	hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
>   
>   	/*
> -	 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
> +	 * SW must set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
>   	 */
>   	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
>   	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
> @@ -214,19 +215,19 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
>   	}
>   
>   	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
> -        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
> +	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
>   
> -        /*
> +	/*
>   	 * enable vlan filtering and allow all vlan tags through
>   	 */
> -        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
> -        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
> -        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
> +	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
> +	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
> +	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
>   
> -        /* VFTA - enable all vlan filters */
> -        for (i = 0; i < IXGBE_MAX_VFTA; i++) {
> -                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
> -        }
> +	/* VFTA - enable all vlan filters */
> +	for (i = 0; i < IXGBE_MAX_VFTA; i++) {
> +		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
> +	}
>   
>   	/* Enable MAC Anti-Spoofing */
>   	hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
> @@ -369,6 +370,73 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
>   }
>   
>   static int
> +ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
> +{
> +	struct ixgbe_vf_info *vfinfo =
> +		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
> +	int api = msgbuf[1];
> +
> +	switch (api) {
> +	case ixgbe_mbox_api_10:
> +	case ixgbe_mbox_api_11:
> +		vfinfo[vf].vf_api = api;
> +		return 0;
> +	default:
> +		break;
> +	}
> +
> +	RTE_LOG(DEBUG, PMD, "VF %d requested invalid api version %u\n", vf, api);
> +	return -1;
> +}
> +
> +static int
> +ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
> +{
> +	struct ixgbe_vf_info *vfinfo =
> +		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
> +	struct ixgbe_dcb_config *dcb_cfg =
> +			IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
> +
> +	uint8_t num_tcs = dcb_cfg->num_tcs.pg_tcs;
> +
> +	/* verify the PF is supporting the correct APIs */
> +	switch (vfinfo[vf].vf_api) {
> +	case ixgbe_mbox_api_10:
> +	case ixgbe_mbox_api_11:
> +		break;
> +	default:
> +		return -1;
> +	}
> +
> +	if (RTE_ETH_DEV_SRIOV(dev).active) {
> +		if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB)
> +			msgbuf[IXGBE_VF_TX_QUEUES] = num_tcs;
> +		else
> +			msgbuf[IXGBE_VF_TX_QUEUES] = 1;
> +
> +		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB)
> +			msgbuf[IXGBE_VF_RX_QUEUES] = num_tcs;
> +		else
> +			msgbuf[IXGBE_VF_RX_QUEUES] = 1;
> +	}	else {
> +		/* only allow 1 Tx queue for bandwidth limiting */
> +		msgbuf[IXGBE_VF_TX_QUEUES] = 1;
> +		msgbuf[IXGBE_VF_RX_QUEUES] = 1;
> +	}
> +
> +	/* notify VF of need for VLAN tag stripping, and correct queue */
> +	if (num_tcs)
> +		msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
> +	else
> +		msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
> +
> +	/* notify VF of default queue */
> +	msgbuf[IXGBE_VF_DEF_QUEUE] = 0;
> +
> +	return 0;
> +}
> +
> +static int
>   ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
>   {
>   	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> @@ -512,6 +580,12 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
>   	case IXGBE_VF_SET_VLAN:
>   		retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
>   		break;
> +	case IXGBE_VF_API_NEGOTIATE:
> +		retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
> +		break;
> +	case IXGBE_VF_GET_QUEUES:
> +		retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
> +		break;
>   	default:
>   		PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
>   		retval = IXGBE_ERR_MBX;
> @@ -526,7 +600,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
>   
>   	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
>   
> -	ixgbe_write_mbx(hw, msgbuf, 1, vf);
> +	ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
>   
>   	return retval;
>   }
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> index e10d6a2..49b44fe 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> @@ -3166,10 +3166,9 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
>   
>   	/* check support mq_mode for DCB */
>   	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
> -	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
> -		return;
> -
> -	if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
> +	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
> +	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_VMDQ_DCB) &&
> +	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_DCB))
>   		return;
>   
>   	/** Configure DCB hardware **/

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe
  2015-01-12 15:50 [dpdk-dev] [PATCH 0/2] Enable DCB in SRIOV mode for ixgbe driver Michal Jastrzebski
@ 2015-01-12 15:50 ` Michal Jastrzebski
  2015-01-13 10:14   ` Vlad Zolotarov
  0 siblings, 1 reply; 10+ messages in thread
From: Michal Jastrzebski @ 2015-01-12 15:50 UTC (permalink / raw)
  To: dev

From: Pawel Wodkowski <pawelx.wodkowski@intel.com>

This patch add support for DCB in SRIOV mode. When no PFC
is enabled this feature might be used as multiple queues
(up to 8 or 4) for VF.

It incorporate following modifications:
 - Allow zero rx/tx queues to be passed to rte_eth_dev_configure().
   Rationale:
   in SRIOV mode PF use first free VF to RX/TX. If VF count
   is 16 or 32 all recources are assigned to VFs so PF can
   be used only for configuration.
 - split nb_q_per_pool to nb_rx_q_per_pool and nb_tx_q_per_pool
   Rationale:
   rx and tx number of queue might be different if RX and TX are
   configured in different mode. This allow to inform VF about
   proper number of queues.
 - extern mailbox API for DCB mode

Signed-off-by: Pawel Wodkowski <pawelx.wodkowski@intel.com>
---
 lib/librte_ether/rte_ethdev.c       |   84 +++++++++++++++++++++---------
 lib/librte_ether/rte_ethdev.h       |    5 +-
 lib/librte_pmd_e1000/igb_pf.c       |    3 +-
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c |   10 ++--
 lib/librte_pmd_ixgbe/ixgbe_ethdev.h |    1 +
 lib/librte_pmd_ixgbe/ixgbe_pf.c     |   98 ++++++++++++++++++++++++++++++-----
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c   |    7 ++-
 7 files changed, 159 insertions(+), 49 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 95f2ceb..4c1a494 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -333,7 +333,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
 		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
 				sizeof(dev->data->rx_queues[0]) * nb_queues,
 				RTE_CACHE_LINE_SIZE);
-		if (dev->data->rx_queues == NULL) {
+		if (dev->data->rx_queues == NULL && nb_queues > 0) {
 			dev->data->nb_rx_queues = 0;
 			return -(ENOMEM);
 		}
@@ -475,7 +475,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
 		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
 				sizeof(dev->data->tx_queues[0]) * nb_queues,
 				RTE_CACHE_LINE_SIZE);
-		if (dev->data->tx_queues == NULL) {
+		if (dev->data->tx_queues == NULL && nb_queues > 0) {
 			dev->data->nb_tx_queues = 0;
 			return -(ENOMEM);
 		}
@@ -507,6 +507,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 		      const struct rte_eth_conf *dev_conf)
 {
 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+	struct rte_eth_dev_info dev_info;
 
 	if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
 		/* check multi-queue mode */
@@ -524,11 +525,33 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 			return (-EINVAL);
 		}
 
+		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) &&
+			(dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB)) {
+			enum rte_eth_nb_pools rx_pools =
+						dev_conf->rx_adv_conf.vmdq_dcb_conf.nb_queue_pools;
+			enum rte_eth_nb_pools tx_pools =
+						dev_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools;
+
+			if (rx_pools != tx_pools) {
+				/* Only equal number of pools is supported when
+				 * DCB+VMDq in SRIOV */
+				PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
+						" SRIOV active, DCB+VMDQ mode, "
+						"number of rx and tx pools is not eqaul\n",
+						port_id);
+				return (-EINVAL);
+			}
+		}
+
+		uint16_t nb_rx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool;
+		uint16_t nb_tx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
+
 		switch (dev_conf->rxmode.mq_mode) {
-		case ETH_MQ_RX_VMDQ_RSS:
 		case ETH_MQ_RX_VMDQ_DCB:
+			break;
+		case ETH_MQ_RX_VMDQ_RSS:
 		case ETH_MQ_RX_VMDQ_DCB_RSS:
-			/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
+			/* RSS, DCB+RSS VMDQ in SRIOV mode, not implement yet */
 			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
 					" SRIOV active, "
 					"unsupported VMDQ mq_mode rx %u\n",
@@ -537,37 +560,32 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 		default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
 			/* if nothing mq mode configure, use default scheme */
 			dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
-			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
-				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+			if (nb_rx_q_per_pool > 1)
+				nb_rx_q_per_pool = 1;
 			break;
 		}
 
 		switch (dev_conf->txmode.mq_mode) {
-		case ETH_MQ_TX_VMDQ_DCB:
-			/* DCB VMDQ in SRIOV mode, not implement yet */
-			PMD_DEBUG_TRACE("ethdev port_id=%" PRIu8
-					" SRIOV active, "
-					"unsupported VMDQ mq_mode tx %u\n",
-					port_id, dev_conf->txmode.mq_mode);
-			return (-EINVAL);
+		case ETH_MQ_TX_VMDQ_DCB: /* DCB VMDQ in SRIOV mode*/
+			break;
 		default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
 			/* if nothing mq mode configure, use default scheme */
 			dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
-			if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
-				RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+			if (nb_tx_q_per_pool > 1)
+				nb_tx_q_per_pool = 1;
 			break;
 		}
 
 		/* check valid queue number */
-		if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
-		    (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+		if (nb_rx_q > nb_rx_q_per_pool || nb_tx_q > nb_tx_q_per_pool) {
 			PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
-				    "queue number must less equal to %d\n",
-					port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+				    "rx/tx queue number must less equal to %d/%d\n",
+					port_id, RTE_ETH_DEV_SRIOV(dev).nb_rx_q_per_pool,
+					RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool);
 			return (-EINVAL);
 		}
 	} else {
-		/* For vmdb+dcb mode check our configuration before we go further */
+		/* For vmdq+dcb mode check our configuration before we go further */
 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
 			const struct rte_eth_vmdq_dcb_conf *conf;
 
@@ -606,11 +624,20 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 			}
 		}
 
+		/* For DCB we need to obtain maximum number of queues dinamically,
+		 * as this depends on max VF exported in PF */
+		if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
+			(dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
+
+				FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+				(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+		}
+
 		/* For DCB mode check our configuration before we go further */
 		if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
 			const struct rte_eth_dcb_rx_conf *conf;
 
-			if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
+			if (nb_rx_q != dev_info.max_rx_queues) {
 				PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
 						"!= %d\n",
 						port_id, ETH_DCB_NUM_QUEUES);
@@ -630,7 +657,7 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 		if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
 			const struct rte_eth_dcb_tx_conf *conf;
 
-			if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
+			if (nb_tx_q != dev_info.max_tx_queues) {
 				PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
 						"!= %d\n",
 						port_id, ETH_DCB_NUM_QUEUES);
@@ -690,7 +717,10 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 	if (nb_rx_q == 0) {
 		PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0\n", port_id);
-		return (-EINVAL);
+		/* In SRIOV there can be no free resource for PF. So permit use only
+		 * for configuration. */
+		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
+			return (-EINVAL);
 	}
 
 	if (nb_tx_q > dev_info.max_tx_queues) {
@@ -698,9 +728,13 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 				port_id, nb_tx_q, dev_info.max_tx_queues);
 		return (-EINVAL);
 	}
+
 	if (nb_tx_q == 0) {
 		PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0\n", port_id);
-		return (-EINVAL);
+		/* In SRIOV there can be no free resource for PF. So permit use only
+		 * for configuration. */
+		if (RTE_ETH_DEV_SRIOV(dev).active == 0)
+			return (-EINVAL);
 	}
 
 	/* Copy the dev_conf parameter into the dev structure */
@@ -750,7 +784,7 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 							ETHER_MAX_LEN;
 	}
 
-	/* multipe queue mode checking */
+	/* multiple queue mode checking */
 	diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
 	if (diag != 0) {
 		PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index ce0528f..04fda83 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -299,7 +299,7 @@ enum rte_eth_rx_mq_mode {
 enum rte_eth_tx_mq_mode {
 	ETH_MQ_TX_NONE    = 0,  /**< It is in neither DCB nor VT mode. */
 	ETH_MQ_TX_DCB,          /**< For TX side,only DCB is on. */
-	ETH_MQ_TX_VMDQ_DCB,	/**< For TX side,both DCB and VT is on. */
+	ETH_MQ_TX_VMDQ_DCB,     /**< For TX side,both DCB and VT is on. */
 	ETH_MQ_TX_VMDQ_ONLY,    /**< Only VT on, no DCB */
 };
 
@@ -1569,7 +1569,8 @@ struct rte_eth_dev {
 
 struct rte_eth_dev_sriov {
 	uint8_t active;               /**< SRIOV is active with 16, 32 or 64 pools */
-	uint8_t nb_q_per_pool;        /**< rx queue number per pool */
+	uint8_t nb_rx_q_per_pool;        /**< rx queue number per pool */
+	uint8_t nb_tx_q_per_pool;        /**< tx queue number per pool */
 	uint16_t def_vmdq_idx;        /**< Default pool num used for PF */
 	uint16_t def_pool_q_idx;      /**< Default pool queue start reg index */
 };
diff --git a/lib/librte_pmd_e1000/igb_pf.c b/lib/librte_pmd_e1000/igb_pf.c
index bc3816a..9d2f858 100644
--- a/lib/librte_pmd_e1000/igb_pf.c
+++ b/lib/librte_pmd_e1000/igb_pf.c
@@ -115,7 +115,8 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
 		rte_panic("Cannot allocate memory for private VF data\n");
 
 	RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
-	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
+	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
+	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
 
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 3fc3738..347f03c 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -3555,14 +3555,14 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_vf_info *vfinfo =
 		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-	uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+	uint8_t  nb_tx_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_tx_q_per_pool;
 	uint32_t queue_stride =
 		IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
 	uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
-	uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
+	uint32_t tx_queue_end = queue_idx + nb_tx_q_per_pool - 1;
 	uint16_t total_rate = 0;
 
-	if (queue_end >= hw->mac.max_tx_queues)
+	if (tx_queue_end >= hw->mac.max_tx_queues)
 		return -EINVAL;
 
 	if (vfinfo != NULL) {
@@ -3577,7 +3577,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
 		return -EINVAL;
 
 	/* Store tx_rate for this vf. */
-	for (idx = 0; idx < nb_q_per_pool; idx++) {
+	for (idx = 0; idx < nb_tx_q_per_pool; idx++) {
 		if (((uint64_t)0x1 << idx) & q_msk) {
 			if (vfinfo[vf].tx_rate[idx] != tx_rate)
 				vfinfo[vf].tx_rate[idx] = tx_rate;
@@ -3595,7 +3595,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
 	}
 
 	/* Set RTTBCNRC of each queue/pool for vf X  */
-	for (; queue_idx <= queue_end; queue_idx++) {
+	for (; queue_idx <= tx_queue_end; queue_idx++) {
 		if (0x1 & q_msk)
 			ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
 		q_msk = q_msk >> 1;
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
index ca99170..ebf16e9 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
@@ -159,6 +159,7 @@ struct ixgbe_vf_info {
 	uint16_t tx_rate[IXGBE_MAX_QUEUE_NUM_PER_VF];
 	uint16_t vlan_count;
 	uint8_t spoofchk_enabled;
+	unsigned int vf_api;
 };
 
 /*
diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c
index 51da1fd..4d30bcf 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_pf.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c
@@ -127,7 +127,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
 		RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
 	}
 
-	RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
+	RTE_ETH_DEV_SRIOV(eth_dev).nb_rx_q_per_pool = nb_queue;
+	RTE_ETH_DEV_SRIOV(eth_dev).nb_tx_q_per_pool = nb_queue;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
 	RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
 
@@ -189,7 +190,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
 
 	/*
-	 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
+	 * SW must set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
 	 */
 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
@@ -214,19 +215,19 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 	}
 
 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
-        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
 
-        /*
+	/*
 	 * enable vlan filtering and allow all vlan tags through
 	 */
-        vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
-        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
-        IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+	vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+	vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+	IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
 
-        /* VFTA - enable all vlan filters */
-        for (i = 0; i < IXGBE_MAX_VFTA; i++) {
-                IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
-        }
+	/* VFTA - enable all vlan filters */
+	for (i = 0; i < IXGBE_MAX_VFTA; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+	}
 
 	/* Enable MAC Anti-Spoofing */
 	hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
@@ -369,6 +370,73 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
 }
 
 static int
+ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+	struct ixgbe_vf_info *vfinfo =
+		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+	int api = msgbuf[1];
+
+	switch (api) {
+	case ixgbe_mbox_api_10:
+	case ixgbe_mbox_api_11:
+		vfinfo[vf].vf_api = api;
+		return 0;
+	default:
+		break;
+	}
+
+	RTE_LOG(DEBUG, PMD, "VF %d requested invalid api version %u\n", vf, api);
+	return -1;
+}
+
+static int
+ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
+{
+	struct ixgbe_vf_info *vfinfo =
+		*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+	struct ixgbe_dcb_config *dcb_cfg =
+			IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+
+	uint8_t num_tcs = dcb_cfg->num_tcs.pg_tcs;
+
+	/* verify the PF is supporting the correct APIs */
+	switch (vfinfo[vf].vf_api) {
+	case ixgbe_mbox_api_10:
+	case ixgbe_mbox_api_11:
+		break;
+	default:
+		return -1;
+	}
+
+	if (RTE_ETH_DEV_SRIOV(dev).active) {
+		if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB)
+			msgbuf[IXGBE_VF_TX_QUEUES] = num_tcs;
+		else
+			msgbuf[IXGBE_VF_TX_QUEUES] = 1;
+
+		if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB)
+			msgbuf[IXGBE_VF_RX_QUEUES] = num_tcs;
+		else
+			msgbuf[IXGBE_VF_RX_QUEUES] = 1;
+	}	else {
+		/* only allow 1 Tx queue for bandwidth limiting */
+		msgbuf[IXGBE_VF_TX_QUEUES] = 1;
+		msgbuf[IXGBE_VF_RX_QUEUES] = 1;
+	}
+
+	/* notify VF of need for VLAN tag stripping, and correct queue */
+	if (num_tcs)
+		msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
+	else
+		msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
+
+	/* notify VF of default queue */
+	msgbuf[IXGBE_VF_DEF_QUEUE] = 0;
+
+	return 0;
+}
+
+static int
 ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 {
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -512,6 +580,12 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
 	case IXGBE_VF_SET_VLAN:
 		retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
 		break;
+	case IXGBE_VF_API_NEGOTIATE:
+		retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
+		break;
+	case IXGBE_VF_GET_QUEUES:
+		retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
+		break;
 	default:
 		PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
 		retval = IXGBE_ERR_MBX;
@@ -526,7 +600,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
 
 	msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
 
-	ixgbe_write_mbx(hw, msgbuf, 1, vf);
+	ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
 
 	return retval;
 }
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index e10d6a2..49b44fe 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -3166,10 +3166,9 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
 
 	/* check support mq_mode for DCB */
 	if ((dev_conf->rxmode.mq_mode != ETH_MQ_RX_VMDQ_DCB) &&
-	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
-		return;
-
-	if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
+	    (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB) &&
+	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_VMDQ_DCB) &&
+	    (dev_conf->txmode.mq_mode != ETH_MQ_TX_DCB))
 		return;
 
 	/** Configure DCB hardware **/
-- 
1.7.9.5

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2015-01-14  9:47 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-12 14:43 [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe Michal Jastrzebski
2015-01-12 15:46 ` Jastrzebski, MichalX K
2015-01-13 10:02   ` Vlad Zolotarov
2015-01-13 10:08 ` Vlad Zolotarov
2015-01-14  0:51   ` Ouyang, Changchun
2015-01-14  9:46     ` Wodkowski, PawelX
2015-01-12 15:50 [dpdk-dev] [PATCH 0/2] Enable DCB in SRIOV mode for ixgbe driver Michal Jastrzebski
2015-01-12 15:50 ` [dpdk-dev] [PATCH 1/2] pmd: add DCB for VF for ixgbe Michal Jastrzebski
2015-01-13 10:14   ` Vlad Zolotarov
2015-01-13 11:00     ` Wodkowski, PawelX
2015-01-14  1:00     ` Ouyang, Changchun

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).