From: Helin Zhang <helin.zhang@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 2/3] i40evf: support I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EX in i40e VF PMD
Date: Wed, 20 Aug 2014 11:33:30 +0800 [thread overview]
Message-ID: <1408505611-6959-3-git-send-email-helin.zhang@intel.com> (raw)
In-Reply-To: <1408505611-6959-1-git-send-email-helin.zhang@intel.com>
To support configurable CRC in VF, use operation of
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EX to carry more
information from VM to PF host, if the peer is DPDK
PF host. Otherwise assume it is Linux PF host and
just use operation of I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES.
Signed-off-by: Helin Zhang <helin.zhang@intel.com>
Reviewed-by: Jingjing Wu <jingjing.wu@intel.com>
Reviewed-by: Jing Chen <jing.d.chen@intel.com>
---
lib/librte_pmd_i40e/i40e_ethdev_vf.c | 188 ++++++++++++++++++++++++-----------
1 file changed, 130 insertions(+), 58 deletions(-)
diff --git a/lib/librte_pmd_i40e/i40e_ethdev_vf.c b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
index 2726bfb..97310ea 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev_vf.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
@@ -515,82 +515,154 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
return err;
}
+/* It configures VSI queues to co-work with Linux PF host */
static int
-i40evf_configure_queues(struct rte_eth_dev *dev)
+i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct i40e_virtchnl_vsi_queue_config_info *queue_info;
- struct i40e_virtchnl_queue_pair_info *queue_cfg;
struct i40e_rx_queue **rxq =
(struct i40e_rx_queue **)dev->data->rx_queues;
struct i40e_tx_queue **txq =
(struct i40e_tx_queue **)dev->data->tx_queues;
- int i, len, nb_qpairs, num_rxq, num_txq;
- int err;
+ struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
+ struct i40e_virtchnl_queue_pair_info *vc_qpi;
struct vf_cmd_info args;
- struct rte_pktmbuf_pool_private *mbp_priv;
-
- nb_qpairs = vf->num_queue_pairs;
- len = sizeof(*queue_info) + sizeof(*queue_cfg) * nb_qpairs;
- queue_info = rte_zmalloc("queue_info", len, 0);
- if (queue_info == NULL) {
- PMD_INIT_LOG(ERR, "failed alloc memory for queue_info\n");
- return -1;
+ int size, i, nb_qp, ret;
+
+ nb_qp = vf->num_queue_pairs;
+ size = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
+ sizeof(struct i40e_virtchnl_queue_pair_info) * nb_qp;
+ vc_vqci = rte_zmalloc("queue_info", size, 0);
+ if (!vc_vqci) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for VF "
+ "configuring queues\n");
+ return -ENOMEM;
}
- queue_info->vsi_id = vf->vsi_res->vsi_id;
- queue_info->num_queue_pairs = nb_qpairs;
- queue_cfg = queue_info->qpair;
-
- num_rxq = dev->data->nb_rx_queues;
- num_txq = dev->data->nb_tx_queues;
- /*
- * PF host driver required to configure queues in pairs, which means
- * rxq_num should equals to txq_num. The actual usage won't always
- * work that way. The solution is fills 0 with HW ring option in case
- * they are not equal.
- */
- for (i = 0; i < nb_qpairs; i++) {
- /*Fill TX info */
- queue_cfg->txq.vsi_id = queue_info->vsi_id;
- queue_cfg->txq.queue_id = i;
- if (i < num_txq) {
- queue_cfg->txq.ring_len = txq[i]->nb_tx_desc;
- queue_cfg->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
- } else {
- queue_cfg->txq.ring_len = 0;
- queue_cfg->txq.dma_ring_addr = 0;
+ vc_vqci->vsi_id = vf->vsi_res->vsi_id;
+ vc_vqci->num_queue_pairs = nb_qp;
+
+ for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
+ vc_qpi->txq.vsi_id = vc_vqci->vsi_id;
+ vc_qpi->txq.queue_id = i;
+ if (i < dev->data->nb_tx_queues) {
+ vc_qpi->txq.ring_len = txq[i]->nb_tx_desc;
+ vc_qpi->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
}
- /* Fill RX info */
- queue_cfg->rxq.vsi_id = queue_info->vsi_id;
- queue_cfg->rxq.queue_id = i;
- queue_cfg->rxq.max_pkt_size = vf->max_pkt_len;
- if (i < num_rxq) {
+ vc_qpi->rxq.vsi_id = vc_vqci->vsi_id;
+ vc_qpi->rxq.queue_id = i;
+ vc_qpi->rxq.max_pkt_size = vf->max_pkt_len;
+ if (i < dev->data->nb_rx_queues) {
+ struct rte_pktmbuf_pool_private *mbp_priv;
+
+ vc_qpi->rxq.ring_len = rxq[i]->nb_rx_desc;
+ vc_qpi->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
mbp_priv = rte_mempool_get_priv(rxq[i]->mp);
- queue_cfg->rxq.databuffer_size = mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM;;
- queue_cfg->rxq.ring_len = rxq[i]->nb_rx_desc;
- queue_cfg->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;;
- } else {
- queue_cfg->rxq.ring_len = 0;
- queue_cfg->rxq.dma_ring_addr = 0;
- queue_cfg->rxq.databuffer_size = 0;
+ vc_qpi->rxq.databuffer_size =
+ mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM;
}
- queue_cfg++;
}
-
+ memset(&args, 0, sizeof(args));
args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
- args.in_args = (u8 *)queue_info;
- args.in_args_size = len;
+ args.in_args = (uint8_t *)vc_vqci;
+ args.in_args_size = size;
args.out_buffer = cmd_result_buffer;
args.out_size = I40E_AQ_BUF_SZ;
- err = i40evf_execute_vf_cmd(dev, &args);
- if (err)
- PMD_DRV_LOG(ERR, "fail to execute command "
- "OP_CONFIG_VSI_QUEUES\n");
- rte_free(queue_info);
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
+ rte_free(vc_vqci);
- return err;
+ return ret;
+}
+
+/* It configures VSI queues to co-work with DPDK PF host */
+static int
+i40evf_configure_vsi_queues_ex(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct i40e_rx_queue **rxq =
+ (struct i40e_rx_queue **)dev->data->rx_queues;
+ struct i40e_tx_queue **txq =
+ (struct i40e_tx_queue **)dev->data->tx_queues;
+ struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
+ struct i40e_virtchnl_queue_pair_info *vc_qpi;
+ struct i40e_virtchnl_queue_pair_extra_info *vc_qpei;
+ struct vf_cmd_info args;
+ int size, i, nb_qp, ret;
+
+ nb_qp = vf->num_queue_pairs;
+ size = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
+ sizeof(struct i40e_virtchnl_queue_pair_info) * nb_qp +
+ sizeof(struct i40e_virtchnl_queue_pair_extra_info) * nb_qp;
+ vc_vqci = rte_zmalloc("queue_info", size, 0);
+ if (!vc_vqci) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for VF "
+ "configuring queues\n");
+ return -ENOMEM;
+ }
+ vc_vqci->vsi_id = vf->vsi_res->vsi_id;
+ vc_vqci->num_queue_pairs = nb_qp;
+ vc_qpi = vc_vqci->qpair;
+ vc_qpei = (struct i40e_virtchnl_queue_pair_extra_info *)
+ (((uint8_t *)vc_vqci->qpair) +
+ sizeof(struct i40e_virtchnl_queue_pair_info) * nb_qp);
+
+ for (i = 0; i < nb_qp; i++, vc_qpi++, vc_qpei++) {
+ vc_qpi->txq.vsi_id = vc_vqci->vsi_id;
+ vc_qpi->txq.queue_id = i;
+ if (i < dev->data->nb_tx_queues) {
+ vc_qpi->txq.ring_len = txq[i]->nb_tx_desc;
+ vc_qpi->txq.dma_ring_addr = txq[i]->tx_ring_phys_addr;
+ }
+ vc_qpi->rxq.vsi_id = vc_vqci->vsi_id;
+ vc_qpi->rxq.queue_id = i;
+ vc_qpi->rxq.max_pkt_size = vf->max_pkt_len;
+ if (i < dev->data->nb_rx_queues) {
+ struct rte_pktmbuf_pool_private *mbp_priv;
+
+ vc_qpi->rxq.ring_len = rxq[i]->nb_rx_desc;
+ vc_qpi->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
+ mbp_priv = rte_mempool_get_priv(rxq[i]->mp);
+ vc_qpi->rxq.databuffer_size =
+ mbp_priv->mbuf_data_room_size -
+ RTE_PKTMBUF_HEADROOM;
+ /*
+ * It adds extra info for configuring VSI queues, which
+ * is needed to enable the configurable crc stripping
+ * in VF.
+ */
+ vc_qpei->crcstrip =
+ dev->data->dev_conf.rxmode.hw_strip_crc;
+ }
+ }
+ memset(&args, 0, sizeof(args));
+ args.ops =
+ (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EX;
+ args.in_args = (uint8_t *)vc_vqci;
+ args.in_args_size = size;
+ args.out_buffer = cmd_result_buffer;
+ args.out_size = I40E_AQ_BUF_SZ;
+ ret = i40evf_execute_vf_cmd(dev, &args);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EX\n");
+ rte_free(vc_vqci);
+
+ return ret;
+}
+
+static int
+i40evf_configure_queues(struct rte_eth_dev *dev)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (vf->host_is_dpdk) /* To support DPDK PF host */
+ return i40evf_configure_vsi_queues_ex(dev);
+ else /* To support Linux PF host */
+ return i40evf_configure_vsi_queues(dev);
}
static int
--
1.8.1.4
next prev parent reply other threads:[~2014-08-20 3:30 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-08-20 3:33 [dpdk-dev] [PATCH 0/3] support of configurable CRC stripping in VF Helin Zhang
2014-08-20 3:33 ` [dpdk-dev] [PATCH 1/3] i40evf: support I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EX in DPDK PF host Helin Zhang
2014-08-30 14:43 ` Thomas Monjalon
2014-09-15 0:21 ` Zhang, Helin
2014-08-20 3:33 ` Helin Zhang [this message]
2014-08-30 14:52 ` [dpdk-dev] [PATCH 2/3] i40evf: support I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EX in i40e VF PMD Thomas Monjalon
2014-08-20 3:33 ` [dpdk-dev] [PATCH 3/3] config: remove useless config of CONFIG_RTE_LIBRTE_I40E_PF_DISABLE_STRIP_CRC Helin Zhang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1408505611-6959-3-git-send-email-helin.zhang@intel.com \
--to=helin.zhang@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).