DPDK patches and discussions
 help / color / mirror / Atom feed
From: longli@linuxonhyperv.com
To: Ferruh Yigit <ferruh.yigit@intel.com>
Cc: dev@dpdk.org, Ajay Sharma <sharmaajay@microsoft.com>,
	Stephen Hemminger <sthemmin@microsoft.com>,
	Long Li <longli@microsoft.com>
Subject: [PATCH 13/17] net/mana: add function to start/stop RX queues
Date: Fri,  1 Jul 2022 02:02:43 -0700	[thread overview]
Message-ID: <1656666167-26035-14-git-send-email-longli@linuxonhyperv.com> (raw)
In-Reply-To: <1656666167-26035-1-git-send-email-longli@linuxonhyperv.com>

From: Long Li <longli@microsoft.com>

MANA allocates device queues through the IB layer when starting RX queues. When
device is stopped all the queues are unmapped and freed.

Signed-off-by: Long Li <longli@microsoft.com>
---
 drivers/net/mana/mana.h      |   5 +
 drivers/net/mana/meson.build |   1 +
 drivers/net/mana/rx.c        | 369 +++++++++++++++++++++++++++++++++++
 3 files changed, 375 insertions(+)
 create mode 100644 drivers/net/mana/rx.c

diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index fef646a9a7..5052ec9061 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -364,6 +364,7 @@ extern int mana_logtype_init;
 
 int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
 		       uint32_t queue_id, uint32_t tail);
+int rq_ring_doorbell(struct mana_rxq *rxq);
 
 int gdma_post_work_request(struct mana_gdma_queue *queue,
 			   struct gdma_work_request *work_req,
@@ -379,10 +380,14 @@ uint16_t mana_tx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
 int gdma_poll_completion_queue(struct mana_gdma_queue *cq,
 			       struct gdma_comp *comp);
 
+int start_rx_queues(struct rte_eth_dev *dev);
 int start_tx_queues(struct rte_eth_dev *dev);
 
+int stop_rx_queues(struct rte_eth_dev *dev);
 int stop_tx_queues(struct rte_eth_dev *dev);
 
+int alloc_and_post_rx_wqe(struct mana_rxq *rxq);
+
 struct mana_mr_cache *find_pmd_mr(struct mana_mr_btree *local_tree,
 				  struct mana_priv *priv,
 				  struct rte_mbuf *mbuf);
diff --git a/drivers/net/mana/meson.build b/drivers/net/mana/meson.build
index 34bb9c6b2f..8233c04eee 100644
--- a/drivers/net/mana/meson.build
+++ b/drivers/net/mana/meson.build
@@ -11,6 +11,7 @@ deps += ['pci', 'bus_pci', 'net', 'eal', 'kvargs']
 
 sources += files(
 	'mana.c',
+	'rx.c',
 	'tx.c',
 	'mr.c',
 	'gdma.c',
diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c
new file mode 100644
index 0000000000..bcc9f308f3
--- /dev/null
+++ b/drivers/net/mana/rx.c
@@ -0,0 +1,369 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2022 Microsoft Corporation
+ */
+
+#include <stddef.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <dirent.h>
+
+#include <rte_malloc.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_kvargs.h>
+#include <rte_rwlock.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_alarm.h>
+#include <rte_log.h>
+#include <rte_eal_paging.h>
+#include <rte_io.h>
+
+#include <infiniband/verbs.h>
+#include <infiniband/manadv.h>
+
+#include "mana.h"
+
+static uint8_t mana_rss_hash_key_default[TOEPLITZ_HASH_KEY_SIZE_IN_BYTES] = {
+	0x2c, 0xc6, 0x81, 0xd1,
+	0x5b, 0xdb, 0xf4, 0xf7,
+	0xfc, 0xa2, 0x83, 0x19,
+	0xdb, 0x1a, 0x3e, 0x94,
+	0x6b, 0x9e, 0x38, 0xd9,
+	0x2c, 0x9c, 0x03, 0xd1,
+	0xad, 0x99, 0x44, 0xa7,
+	0xd9, 0x56, 0x3d, 0x59,
+	0x06, 0x3c, 0x25, 0xf3,
+	0xfc, 0x1f, 0xdc, 0x2a,
+};
+
+int rq_ring_doorbell(struct mana_rxq *rxq)
+{
+	struct mana_priv *priv = rxq->priv;
+	int ret;
+	void *db_page = priv->db_page;
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+		struct rte_eth_dev *dev =
+			&rte_eth_devices[priv->dev_data->port_id];
+		struct mana_process_priv *process_priv = dev->process_private;
+
+		db_page = process_priv->db_page;
+	}
+
+	ret = mana_ring_doorbell(db_page, gdma_queue_receive,
+				 rxq->gdma_rq.id,
+				 rxq->gdma_rq.head *
+					GDMA_WQE_ALIGNMENT_UNIT_SIZE);
+
+	if (ret)
+		DRV_LOG(ERR, "failed to ring RX doorbell ret %d", ret);
+
+	return ret;
+}
+
+int alloc_and_post_rx_wqe(struct mana_rxq *rxq)
+{
+	struct rte_mbuf *mbuf = NULL;
+	struct gdma_sgl_element sgl[1];
+	struct gdma_work_request request = {0};
+	struct gdma_posted_wqe_info wqe_info = {0};
+	struct mana_priv *priv = rxq->priv;
+	int ret;
+	struct mana_mr_cache *mr;
+
+	mbuf = rte_pktmbuf_alloc(rxq->mp);
+	if (!mbuf) {
+		rxq->stats.nombuf++;
+		return -ENOMEM;
+	}
+
+	mr = find_pmd_mr(&rxq->mr_btree, priv, mbuf);
+	if (!mr) {
+		DRV_LOG(ERR, "failed to register RX MR");
+		rte_pktmbuf_free(mbuf);
+		return -ENOMEM;
+	}
+
+	request.gdma_header.struct_size = sizeof(request);
+	wqe_info.gdma_header.struct_size = sizeof(wqe_info);
+
+	sgl[0].address = rte_cpu_to_le_64(rte_pktmbuf_mtod(mbuf, uint64_t));
+	sgl[0].memory_key = mr->lkey;
+	sgl[0].size =
+		rte_pktmbuf_data_room_size(rxq->mp) -
+		RTE_PKTMBUF_HEADROOM;
+
+	request.sgl = sgl;
+	request.num_sgl_elements = 1;
+	request.inline_oob_data = NULL;
+	request.inline_oob_size_in_bytes = 0;
+	request.flags = 0;
+	request.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;
+
+	ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_info);
+	if (!ret) {
+		struct mana_rxq_desc *desc =
+			&rxq->desc_ring[rxq->desc_ring_head];
+
+		/* update queue for tracking pending packets */
+		desc->pkt = mbuf;
+		desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu;
+		rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc;
+	} else {
+		DRV_LOG(ERR, "failed to post recv ret %d", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int alloc_and_post_rx_wqes(struct mana_rxq *rxq)
+{
+	int ret;
+
+	for (uint32_t i = 0; i < rxq->num_desc; i++) {
+		ret = alloc_and_post_rx_wqe(rxq);
+		if (ret) {
+			DRV_LOG(ERR, "failed to post RX ret = %d", ret);
+			return ret;
+		}
+	}
+
+	rq_ring_doorbell(rxq);
+
+	return ret;
+}
+
+int stop_rx_queues(struct rte_eth_dev *dev)
+{
+	struct mana_priv *priv = dev->data->dev_private;
+	int ret, i;
+
+	if (priv->rwq_qp) {
+		ret = ibv_destroy_qp(priv->rwq_qp);
+		if (ret)
+			DRV_LOG(ERR, "rx_queue destory_qp failed %d", ret);
+		priv->rwq_qp = NULL;
+	}
+
+	if (priv->ind_table) {
+		ret = ibv_destroy_rwq_ind_table(priv->ind_table);
+		if (ret)
+			DRV_LOG(ERR, "destroy rwq ind table failed %d", ret);
+		priv->ind_table = NULL;
+	}
+
+	for (i = 0; i < priv->num_queues; i++) {
+		struct mana_rxq *rxq = dev->data->rx_queues[i];
+
+		if (rxq->wq) {
+			ret = ibv_destroy_wq(rxq->wq);
+			if (ret)
+				DRV_LOG(ERR,
+					"rx_queue destroy_wq failed %d", ret);
+			rxq->wq = NULL;
+		}
+
+		if (rxq->cq) {
+			ret = ibv_destroy_cq(rxq->cq);
+			if (ret)
+				DRV_LOG(ERR,
+					"rx_queue destroy_cq failed %d", ret);
+			rxq->cq = NULL;
+		}
+
+		/* Drain and free posted WQEs */
+		while (rxq->desc_ring_tail != rxq->desc_ring_head) {
+			struct mana_rxq_desc *desc =
+				&rxq->desc_ring[rxq->desc_ring_tail];
+
+			rte_pktmbuf_free(desc->pkt);
+
+			rxq->desc_ring_tail =
+				(rxq->desc_ring_tail + 1) % rxq->num_desc;
+		}
+		rxq->desc_ring_head = 0;
+		rxq->desc_ring_tail = 0;
+
+		memset(&rxq->gdma_rq, 0, sizeof(rxq->gdma_rq));
+		memset(&rxq->gdma_cq, 0, sizeof(rxq->gdma_cq));
+	}
+	return 0;
+}
+
+int start_rx_queues(struct rte_eth_dev *dev)
+{
+	struct mana_priv *priv = dev->data->dev_private;
+	int ret, i;
+	struct ibv_wq *ind_tbl[priv->num_queues];
+
+	DRV_LOG(INFO, "start rx queues");
+	for (i = 0; i < priv->num_queues; i++) {
+		struct mana_rxq *rxq = dev->data->rx_queues[i];
+		struct ibv_wq_init_attr wq_attr = {};
+
+		manadv_set_context_attr(priv->ib_ctx,
+			MANADV_CTX_ATTR_BUF_ALLOCATORS,
+			(void *)((uintptr_t)&(struct manadv_ctx_allocators){
+				.alloc = &mana_alloc_verbs_buf,
+				.free = &mana_free_verbs_buf,
+				.data = (void *)(uintptr_t)rxq->socket,
+			}));
+
+		rxq->cq = ibv_create_cq(priv->ib_ctx, rxq->num_desc,
+					NULL, NULL, 0);
+		if (!rxq->cq) {
+			ret = -errno;
+			DRV_LOG(ERR, "failed to create rx cq queue %d", i);
+			goto fail;
+		}
+
+		wq_attr.wq_type = IBV_WQT_RQ;
+		wq_attr.max_wr = rxq->num_desc;
+		wq_attr.max_sge = 1;
+		wq_attr.pd = priv->ib_parent_pd;
+		wq_attr.cq = rxq->cq;
+
+		rxq->wq = ibv_create_wq(priv->ib_ctx, &wq_attr);
+		if (!rxq->wq) {
+			ret = -errno;
+			DRV_LOG(ERR, "failed to create rx wq %d", i);
+			goto fail;
+		}
+
+		ind_tbl[i] = rxq->wq;
+	}
+
+	struct ibv_rwq_ind_table_init_attr ind_table_attr = {
+		.log_ind_tbl_size = rte_log2_u32(RTE_DIM(ind_tbl)),
+		.ind_tbl = ind_tbl,
+		.comp_mask = 0,
+	};
+
+	priv->ind_table = ibv_create_rwq_ind_table(priv->ib_ctx,
+						   &ind_table_attr);
+	if (!priv->ind_table) {
+		ret = -errno;
+		DRV_LOG(ERR, "failed to create ind_table ret %d", ret);
+		goto fail;
+	}
+
+	DRV_LOG(INFO, "ind_table handle %d num %d",
+		priv->ind_table->ind_tbl_handle,
+		priv->ind_table->ind_tbl_num);
+
+	struct ibv_qp_init_attr_ex qp_attr_ex = {
+		.comp_mask = IBV_QP_INIT_ATTR_PD |
+			     IBV_QP_INIT_ATTR_RX_HASH |
+			     IBV_QP_INIT_ATTR_IND_TABLE,
+		.qp_type = IBV_QPT_RAW_PACKET,
+		.pd = priv->ib_parent_pd,
+		.rwq_ind_tbl = priv->ind_table,
+		.rx_hash_conf = {
+			.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+			.rx_hash_key_len = TOEPLITZ_HASH_KEY_SIZE_IN_BYTES,
+			.rx_hash_key = mana_rss_hash_key_default,
+			.rx_hash_fields_mask =
+				IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
+		},
+
+	};
+
+	/* overwrite default if rss key is set */
+	if (priv->rss_conf.rss_key_len && priv->rss_conf.rss_key)
+		qp_attr_ex.rx_hash_conf.rx_hash_key =
+			priv->rss_conf.rss_key;
+
+	/* overwrite default if rss hash fields are set */
+	if (priv->rss_conf.rss_hf) {
+		qp_attr_ex.rx_hash_conf.rx_hash_fields_mask = 0;
+
+		if (priv->rss_conf.rss_hf & ETH_RSS_IPV4)
+			qp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=
+				IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4;
+
+		if (priv->rss_conf.rss_hf & ETH_RSS_IPV6)
+			qp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=
+				IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_SRC_IPV6;
+
+		if (priv->rss_conf.rss_hf &
+		    (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+			qp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=
+				IBV_RX_HASH_SRC_PORT_TCP |
+				IBV_RX_HASH_DST_PORT_TCP;
+
+		if (priv->rss_conf.rss_hf &
+		    (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+			qp_attr_ex.rx_hash_conf.rx_hash_fields_mask |=
+				IBV_RX_HASH_SRC_PORT_UDP |
+				IBV_RX_HASH_DST_PORT_UDP;
+	}
+
+	priv->rwq_qp = ibv_create_qp_ex(priv->ib_ctx, &qp_attr_ex);
+	if (!priv->rwq_qp) {
+		ret = -errno;
+		DRV_LOG(ERR, "rx ibv_create_qp_ex failed");
+		goto fail;
+	}
+
+	for (i = 0; i < priv->num_queues; i++) {
+		struct mana_rxq *rxq = dev->data->rx_queues[i];
+		struct manadv_obj obj = {};
+		struct manadv_cq dv_cq;
+		struct manadv_rwq dv_wq;
+
+		obj.cq.in = rxq->cq;
+		obj.cq.out = &dv_cq;
+		obj.rwq.in = rxq->wq;
+		obj.rwq.out = &dv_wq;
+		ret = manadv_init_obj(&obj, MANADV_OBJ_CQ | MANADV_OBJ_RWQ);
+		if (ret) {
+			DRV_LOG(ERR, "manadv_init_obj failed ret %d", ret);
+			goto fail;
+		}
+
+		rxq->gdma_cq.buffer = obj.cq.out->buf;
+		rxq->gdma_cq.count = obj.cq.out->count;
+		rxq->gdma_cq.size = rxq->gdma_cq.count * COMP_ENTRY_SIZE;
+		rxq->gdma_cq.id = obj.cq.out->cq_id;
+
+		/* CQ head starts with count */
+		rxq->gdma_cq.head = rxq->gdma_cq.count;
+
+		DRV_LOG(INFO, "rxq cq id %u buf %px count %u size %u",
+			rxq->gdma_cq.id, rxq->gdma_cq.buffer,
+			rxq->gdma_cq.count, rxq->gdma_cq.size);
+
+		priv->db_page = obj.rwq.out->db_page;
+
+		rxq->gdma_rq.buffer = obj.rwq.out->buf;
+		rxq->gdma_rq.count = obj.rwq.out->count;
+		rxq->gdma_rq.size = obj.rwq.out->size;
+		rxq->gdma_rq.id = obj.rwq.out->wq_id;
+
+		DRV_LOG(INFO, "rxq rq id %u buf %px count %u size %u",
+			rxq->gdma_rq.id, rxq->gdma_rq.buffer,
+			rxq->gdma_rq.count, rxq->gdma_rq.size);
+	}
+
+	for (i = 0; i < priv->num_queues; i++) {
+		ret = alloc_and_post_rx_wqes(dev->data->rx_queues[i]);
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+
+fail:
+	stop_rx_queues(dev);
+	return ret;
+}
-- 
2.17.1


  parent reply	other threads:[~2022-07-01  9:04 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-01  9:02 [PATCH 00/17] Introduce Microsoft Azure Network Adatper (MANA) PMD longli
2022-07-01  9:02 ` [PATCH 01/17] net/mana: add basic driver, build environment and doc longli
2022-07-01 16:29   ` Stephen Hemminger
2022-07-01 16:44   ` Stephen Hemminger
2022-07-01 16:45   ` Stephen Hemminger
2022-07-01 16:47   ` Stephen Hemminger
2022-07-03  7:56     ` Long Li
2022-07-04 16:35       ` Stephen Hemminger
2022-07-04 16:41       ` Stephen Hemminger
2022-07-05 19:50         ` Long Li
2022-07-01 16:49   ` Stephen Hemminger
2022-07-01 19:40     ` Long Li
2022-07-01  9:02 ` [PATCH 02/17] net/mana: add device configuration and stop longli
2022-07-01 16:34   ` Stephen Hemminger
2022-07-01  9:02 ` [PATCH 03/17] net/mana: add function to report support ptypes longli
2022-07-01  9:02 ` [PATCH 04/17] net/mana: add link update longli
2022-07-01  9:02 ` [PATCH 05/17] net/mana: add function for device removal interrupts longli
2022-07-01  9:02 ` [PATCH 06/17] net/mana: add device info longli
2022-07-01  9:02 ` [PATCH 07/17] net/mana: add function to configure RSS longli
2022-07-01  9:02 ` [PATCH 08/17] net/mana: add function to configure RX queues longli
2022-07-01  9:02 ` [PATCH 09/17] net/mana: add function to configure TX queues longli
2022-07-01  9:02 ` [PATCH 10/17] net/mana: implement memory registration longli
2022-07-01 16:37   ` Stephen Hemminger
2022-07-01  9:02 ` [PATCH 11/17] net/mana: implement the hardware layer operations longli
2022-07-01 16:38   ` Stephen Hemminger
2022-07-01  9:02 ` [PATCH 12/17] net/mana: add function to start/stop TX queues longli
2022-07-01 16:39   ` Stephen Hemminger
2022-07-01  9:02 ` longli [this message]
2022-07-01 16:40   ` [PATCH 13/17] net/mana: add function to start/stop RX queues Stephen Hemminger
2022-07-02  8:42     ` Long Li
2022-07-01  9:02 ` [PATCH 14/17] net/mana: add function to receive packets longli
2022-07-01  9:02 ` [PATCH 15/17] net/mana: add function to send packets longli
2022-07-01  9:02 ` [PATCH 16/17] net/mana: add function to start/stop device longli
2022-07-01  9:02 ` [PATCH 17/17] net/mana: add function to report queue stats longli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1656666167-26035-14-git-send-email-longli@linuxonhyperv.com \
    --to=longli@linuxonhyperv.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=longli@microsoft.com \
    --cc=sharmaajay@microsoft.com \
    --cc=sthemmin@microsoft.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).