* [PATCH 01/21] net/virtio: move CVQ code into a dedicated file
2023-02-07 15:15 [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
@ 2023-02-07 15:15 ` Maxime Coquelin
2023-02-07 15:15 ` [PATCH 02/21] net/virtio: introduce notify callback for control queue Maxime Coquelin
` (3 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Maxime Coquelin @ 2023-02-07 15:15 UTC (permalink / raw)
To: dev, chenbo.xia, david.marchand, eperezma, stephen; +Cc: Maxime Coquelin
This patch moves Virtio control queue code into a dedicated
file, as preliminary rework to support shadow control queue
in Virtio-user.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
drivers/net/virtio/meson.build | 1 +
drivers/net/virtio/virtio_cvq.c | 230 +++++++++++++++++++++++++++++
drivers/net/virtio/virtio_cvq.h | 126 ++++++++++++++++
drivers/net/virtio/virtio_ethdev.c | 218 +--------------------------
drivers/net/virtio/virtio_rxtx.h | 9 --
drivers/net/virtio/virtqueue.h | 105 +------------
6 files changed, 359 insertions(+), 330 deletions(-)
create mode 100644 drivers/net/virtio/virtio_cvq.c
create mode 100644 drivers/net/virtio/virtio_cvq.h
diff --git a/drivers/net/virtio/meson.build b/drivers/net/virtio/meson.build
index d78b8278c6..0ffd77024e 100644
--- a/drivers/net/virtio/meson.build
+++ b/drivers/net/virtio/meson.build
@@ -9,6 +9,7 @@ endif
sources += files(
'virtio.c',
+ 'virtio_cvq.c',
'virtio_ethdev.c',
'virtio_pci_ethdev.c',
'virtio_pci.c',
diff --git a/drivers/net/virtio/virtio_cvq.c b/drivers/net/virtio/virtio_cvq.c
new file mode 100644
index 0000000000..de4299a2a7
--- /dev/null
+++ b/drivers/net/virtio/virtio_cvq.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ * Copyright(c) 2022 Red Hat Inc,
+ */
+
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+
+#include "virtio_cvq.h"
+#include "virtqueue.h"
+
+static struct virtio_pmd_ctrl *
+virtio_send_command_packed(struct virtnet_ctl *cvq,
+ struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
+{
+ struct virtqueue *vq = virtnet_cq_to_vq(cvq);
+ int head;
+ struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
+ struct virtio_pmd_ctrl *result;
+ uint16_t flags;
+ int sum = 0;
+ int nb_descs = 0;
+ int k;
+
+ /*
+ * Format is enforced in qemu code:
+ * One TX packet for header;
+ * At least one TX packet per argument;
+ * One RX packet for ACK.
+ */
+ head = vq->vq_avail_idx;
+ flags = vq->vq_packed.cached_flags;
+ desc[head].addr = cvq->virtio_net_hdr_mem;
+ desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_free_cnt--;
+ nb_descs++;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
+ }
+
+ for (k = 0; k < pkt_num; k++) {
+ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr)
+ + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
+ desc[vq->vq_avail_idx].len = dlen[k];
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
+ vq->vq_packed.cached_flags;
+ sum += dlen[k];
+ vq->vq_free_cnt--;
+ nb_descs++;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^=
+ VRING_PACKED_DESC_F_AVAIL_USED;
+ }
+ }
+
+ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr);
+ desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
+ vq->vq_packed.cached_flags;
+ vq->vq_free_cnt--;
+ nb_descs++;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
+ }
+
+ virtqueue_store_flags_packed(&desc[head], VRING_DESC_F_NEXT | flags,
+ vq->hw->weak_barriers);
+
+ virtio_wmb(vq->hw->weak_barriers);
+ virtqueue_notify(vq);
+
+ /* wait for used desc in virtqueue
+ * desc_is_used has a load-acquire or rte_io_rmb inside
+ */
+ while (!desc_is_used(&desc[head], vq))
+ usleep(100);
+
+ /* now get used descriptors */
+ vq->vq_free_cnt += nb_descs;
+ vq->vq_used_cons_idx += nb_descs;
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->vq_packed.used_wrap_counter ^= 1;
+ }
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
+ "vq->vq_avail_idx=%d\n"
+ "vq->vq_used_cons_idx=%d\n"
+ "vq->vq_packed.cached_flags=0x%x\n"
+ "vq->vq_packed.used_wrap_counter=%d",
+ vq->vq_free_cnt,
+ vq->vq_avail_idx,
+ vq->vq_used_cons_idx,
+ vq->vq_packed.cached_flags,
+ vq->vq_packed.used_wrap_counter);
+
+ result = cvq->virtio_net_hdr_mz->addr;
+ return result;
+}
+
+static struct virtio_pmd_ctrl *
+virtio_send_command_split(struct virtnet_ctl *cvq,
+ struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
+{
+ struct virtio_pmd_ctrl *result;
+ struct virtqueue *vq = virtnet_cq_to_vq(cvq);
+ uint32_t head, i;
+ int k, sum = 0;
+
+ head = vq->vq_desc_head_idx;
+
+ /*
+ * Format is enforced in qemu code:
+ * One TX packet for header;
+ * At least one TX packet per argument;
+ * One RX packet for ACK.
+ */
+ vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
+ vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
+ vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_free_cnt--;
+ i = vq->vq_split.ring.desc[head].next;
+
+ for (k = 0; k < pkt_num; k++) {
+ vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
+ vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr)
+ + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
+ vq->vq_split.ring.desc[i].len = dlen[k];
+ sum += dlen[k];
+ vq->vq_free_cnt--;
+ i = vq->vq_split.ring.desc[i].next;
+ }
+
+ vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
+ vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
+ vq->vq_free_cnt--;
+
+ vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ while (virtqueue_nused(vq) == 0)
+ usleep(100);
+
+ while (virtqueue_nused(vq)) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_split.ring.used->ring[used_idx];
+ idx = (uint32_t)uep->id;
+ desc_idx = idx;
+
+ while (vq->vq_split.ring.desc[desc_idx].flags &
+ VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_split.ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ result = cvq->virtio_net_hdr_mz->addr;
+ return result;
+}
+
+int
+virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, int *dlen, int pkt_num)
+{
+ virtio_net_ctrl_ack status = ~0;
+ struct virtio_pmd_ctrl *result;
+ struct virtqueue *vq;
+
+ ctrl->status = status;
+
+ if (!cvq) {
+ PMD_INIT_LOG(ERR, "Control queue is not supported.");
+ return -1;
+ }
+
+ rte_spinlock_lock(&cvq->lock);
+ vq = virtnet_cq_to_vq(cvq);
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
+ "vq->hw->cvq = %p vq = %p",
+ vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
+
+ if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
+ rte_spinlock_unlock(&cvq->lock);
+ return -1;
+ }
+
+ memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
+ sizeof(struct virtio_pmd_ctrl));
+
+ if (virtio_with_packed_queue(vq->hw))
+ result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
+ else
+ result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
+
+ rte_spinlock_unlock(&cvq->lock);
+ return result->status;
+}
+
diff --git a/drivers/net/virtio/virtio_cvq.h b/drivers/net/virtio/virtio_cvq.h
new file mode 100644
index 0000000000..139e813ffb
--- /dev/null
+++ b/drivers/net/virtio/virtio_cvq.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#ifndef _VIRTIO_CVQ_H_
+#define _VIRTIO_CVQ_H_
+
+#include <rte_ether.h>
+
+/**
+ * Control the RX mode, ie. promiscuous, allmulti, etc...
+ * All commands require an "out" sg entry containing a 1 byte
+ * state value, zero = disable, non-zero = enable. Commands
+ * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
+ * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
+ */
+#define VIRTIO_NET_CTRL_RX 0
+#define VIRTIO_NET_CTRL_RX_PROMISC 0
+#define VIRTIO_NET_CTRL_RX_ALLMULTI 1
+#define VIRTIO_NET_CTRL_RX_ALLUNI 2
+#define VIRTIO_NET_CTRL_RX_NOMULTI 3
+#define VIRTIO_NET_CTRL_RX_NOUNI 4
+#define VIRTIO_NET_CTRL_RX_NOBCAST 5
+
+/**
+ * Control the MAC
+ *
+ * The MAC filter table is managed by the hypervisor, the guest should
+ * assume the size is infinite. Filtering should be considered
+ * non-perfect, ie. based on hypervisor resources, the guest may
+ * received packets from sources not specified in the filter list.
+ *
+ * In addition to the class/cmd header, the TABLE_SET command requires
+ * two out scatterlists. Each contains a 4 byte count of entries followed
+ * by a concatenated byte stream of the ETH_ALEN MAC addresses. The
+ * first sg list contains unicast addresses, the second is for multicast.
+ * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
+ * is available.
+ *
+ * The ADDR_SET command requests one out scatterlist, it contains a
+ * 6 bytes MAC address. This functionality is present if the
+ * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
+ */
+struct virtio_net_ctrl_mac {
+ uint32_t entries;
+ uint8_t macs[][RTE_ETHER_ADDR_LEN];
+} __rte_packed;
+
+#define VIRTIO_NET_CTRL_MAC 1
+#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
+#define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
+
+/**
+ * Control VLAN filtering
+ *
+ * The VLAN filter table is controlled via a simple ADD/DEL interface.
+ * VLAN IDs not added may be filtered by the hypervisor. Del is the
+ * opposite of add. Both commands expect an out entry containing a 2
+ * byte VLAN ID. VLAN filtering is available with the
+ * VIRTIO_NET_F_CTRL_VLAN feature bit.
+ */
+#define VIRTIO_NET_CTRL_VLAN 2
+#define VIRTIO_NET_CTRL_VLAN_ADD 0
+#define VIRTIO_NET_CTRL_VLAN_DEL 1
+
+/**
+ * RSS control
+ *
+ * The RSS feature configuration message is sent by the driver when
+ * VIRTIO_NET_F_RSS has been negotiated. It provides the device with
+ * hash types to use, hash key and indirection table. In this
+ * implementation, the driver only supports fixed key length (40B)
+ * and indirection table size (128 entries).
+ */
+#define VIRTIO_NET_RSS_RETA_SIZE 128
+#define VIRTIO_NET_RSS_KEY_SIZE 40
+
+struct virtio_net_ctrl_rss {
+ uint32_t hash_types;
+ uint16_t indirection_table_mask;
+ uint16_t unclassified_queue;
+ uint16_t indirection_table[VIRTIO_NET_RSS_RETA_SIZE];
+ uint16_t max_tx_vq;
+ uint8_t hash_key_length;
+ uint8_t hash_key_data[VIRTIO_NET_RSS_KEY_SIZE];
+};
+
+/*
+ * Control link announce acknowledgment
+ *
+ * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
+ * driver has received the notification; device would clear the
+ * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
+ * this command.
+ */
+#define VIRTIO_NET_CTRL_ANNOUNCE 3
+#define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
+
+struct virtio_net_ctrl_hdr {
+ uint8_t class;
+ uint8_t cmd;
+} __rte_packed;
+
+typedef uint8_t virtio_net_ctrl_ack;
+
+struct virtnet_ctl {
+ /**< memzone to populate hdr. */
+ const struct rte_memzone *virtio_net_hdr_mz;
+ rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ uint16_t port_id; /**< Device port identifier. */
+ const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */
+ rte_spinlock_t lock; /**< spinlock for control queue. */
+};
+
+#define VIRTIO_MAX_CTRL_DATA 2048
+
+struct virtio_pmd_ctrl {
+ struct virtio_net_ctrl_hdr hdr;
+ virtio_net_ctrl_ack status;
+ uint8_t data[VIRTIO_MAX_CTRL_DATA];
+};
+
+int
+virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, int *dlen, int pkt_num);
+
+#endif /* _VIRTIO_RXTX_H_ */
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 0ad740b253..d3aa420c89 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -33,6 +33,7 @@
#include "virtio.h"
#include "virtio_logs.h"
#include "virtqueue.h"
+#include "virtio_cvq.h"
#include "virtio_rxtx.h"
#include "virtio_rxtx_simple.h"
#include "virtio_user/virtio_user_dev.h"
@@ -142,223 +143,6 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
-static struct virtio_pmd_ctrl *
-virtio_send_command_packed(struct virtnet_ctl *cvq,
- struct virtio_pmd_ctrl *ctrl,
- int *dlen, int pkt_num)
-{
- struct virtqueue *vq = virtnet_cq_to_vq(cvq);
- int head;
- struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
- struct virtio_pmd_ctrl *result;
- uint16_t flags;
- int sum = 0;
- int nb_descs = 0;
- int k;
-
- /*
- * Format is enforced in qemu code:
- * One TX packet for header;
- * At least one TX packet per argument;
- * One RX packet for ACK.
- */
- head = vq->vq_avail_idx;
- flags = vq->vq_packed.cached_flags;
- desc[head].addr = cvq->virtio_net_hdr_mem;
- desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
- vq->vq_free_cnt--;
- nb_descs++;
- if (++vq->vq_avail_idx >= vq->vq_nentries) {
- vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
- }
-
- for (k = 0; k < pkt_num; k++) {
- desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
- + sizeof(struct virtio_net_ctrl_hdr)
- + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
- desc[vq->vq_avail_idx].len = dlen[k];
- desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
- vq->vq_packed.cached_flags;
- sum += dlen[k];
- vq->vq_free_cnt--;
- nb_descs++;
- if (++vq->vq_avail_idx >= vq->vq_nentries) {
- vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
- VRING_PACKED_DESC_F_AVAIL_USED;
- }
- }
-
- desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
- + sizeof(struct virtio_net_ctrl_hdr);
- desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
- desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
- vq->vq_packed.cached_flags;
- vq->vq_free_cnt--;
- nb_descs++;
- if (++vq->vq_avail_idx >= vq->vq_nentries) {
- vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
- }
-
- virtqueue_store_flags_packed(&desc[head], VRING_DESC_F_NEXT | flags,
- vq->hw->weak_barriers);
-
- virtio_wmb(vq->hw->weak_barriers);
- virtqueue_notify(vq);
-
- /* wait for used desc in virtqueue
- * desc_is_used has a load-acquire or rte_io_rmb inside
- */
- while (!desc_is_used(&desc[head], vq))
- usleep(100);
-
- /* now get used descriptors */
- vq->vq_free_cnt += nb_descs;
- vq->vq_used_cons_idx += nb_descs;
- if (vq->vq_used_cons_idx >= vq->vq_nentries) {
- vq->vq_used_cons_idx -= vq->vq_nentries;
- vq->vq_packed.used_wrap_counter ^= 1;
- }
-
- PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
- "vq->vq_avail_idx=%d\n"
- "vq->vq_used_cons_idx=%d\n"
- "vq->vq_packed.cached_flags=0x%x\n"
- "vq->vq_packed.used_wrap_counter=%d",
- vq->vq_free_cnt,
- vq->vq_avail_idx,
- vq->vq_used_cons_idx,
- vq->vq_packed.cached_flags,
- vq->vq_packed.used_wrap_counter);
-
- result = cvq->virtio_net_hdr_mz->addr;
- return result;
-}
-
-static struct virtio_pmd_ctrl *
-virtio_send_command_split(struct virtnet_ctl *cvq,
- struct virtio_pmd_ctrl *ctrl,
- int *dlen, int pkt_num)
-{
- struct virtio_pmd_ctrl *result;
- struct virtqueue *vq = virtnet_cq_to_vq(cvq);
- uint32_t head, i;
- int k, sum = 0;
-
- head = vq->vq_desc_head_idx;
-
- /*
- * Format is enforced in qemu code:
- * One TX packet for header;
- * At least one TX packet per argument;
- * One RX packet for ACK.
- */
- vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
- vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
- vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
- vq->vq_free_cnt--;
- i = vq->vq_split.ring.desc[head].next;
-
- for (k = 0; k < pkt_num; k++) {
- vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
- vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
- + sizeof(struct virtio_net_ctrl_hdr)
- + sizeof(ctrl->status) + sizeof(uint8_t)*sum;
- vq->vq_split.ring.desc[i].len = dlen[k];
- sum += dlen[k];
- vq->vq_free_cnt--;
- i = vq->vq_split.ring.desc[i].next;
- }
-
- vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
- vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
- + sizeof(struct virtio_net_ctrl_hdr);
- vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
- vq->vq_free_cnt--;
-
- vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
-
- vq_update_avail_ring(vq, head);
- vq_update_avail_idx(vq);
-
- PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
-
- virtqueue_notify(vq);
-
- while (virtqueue_nused(vq) == 0)
- usleep(100);
-
- while (virtqueue_nused(vq)) {
- uint32_t idx, desc_idx, used_idx;
- struct vring_used_elem *uep;
-
- used_idx = (uint32_t)(vq->vq_used_cons_idx
- & (vq->vq_nentries - 1));
- uep = &vq->vq_split.ring.used->ring[used_idx];
- idx = (uint32_t) uep->id;
- desc_idx = idx;
-
- while (vq->vq_split.ring.desc[desc_idx].flags &
- VRING_DESC_F_NEXT) {
- desc_idx = vq->vq_split.ring.desc[desc_idx].next;
- vq->vq_free_cnt++;
- }
-
- vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
- vq->vq_desc_head_idx = idx;
-
- vq->vq_used_cons_idx++;
- vq->vq_free_cnt++;
- }
-
- PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
- vq->vq_free_cnt, vq->vq_desc_head_idx);
-
- result = cvq->virtio_net_hdr_mz->addr;
- return result;
-}
-
-static int
-virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
- int *dlen, int pkt_num)
-{
- virtio_net_ctrl_ack status = ~0;
- struct virtio_pmd_ctrl *result;
- struct virtqueue *vq;
-
- ctrl->status = status;
-
- if (!cvq) {
- PMD_INIT_LOG(ERR, "Control queue is not supported.");
- return -1;
- }
-
- rte_spinlock_lock(&cvq->lock);
- vq = virtnet_cq_to_vq(cvq);
-
- PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
- "vq->hw->cvq = %p vq = %p",
- vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
-
- if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
- rte_spinlock_unlock(&cvq->lock);
- return -1;
- }
-
- memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
- sizeof(struct virtio_pmd_ctrl));
-
- if (virtio_with_packed_queue(vq->hw))
- result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
- else
- result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
-
- rte_spinlock_unlock(&cvq->lock);
- return result->status;
-}
-
static int
virtio_set_multiple_queues_rss(struct rte_eth_dev *dev, uint16_t nb_queues)
{
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 6ce5d67d15..6ee3a13100 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -46,15 +46,6 @@ struct virtnet_tx {
const struct rte_memzone *mz; /**< mem zone to populate TX ring. */
};
-struct virtnet_ctl {
- /**< memzone to populate hdr. */
- const struct rte_memzone *virtio_net_hdr_mz;
- rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
- uint16_t port_id; /**< Device port identifier. */
- const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */
- rte_spinlock_t lock; /**< spinlock for control queue. */
-};
-
int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
void virtio_update_packet_stats(struct virtnet_stats *stats,
struct rte_mbuf *mbuf);
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index f5d8b40cad..62f472850e 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -16,6 +16,7 @@
#include "virtio_ring.h"
#include "virtio_logs.h"
#include "virtio_rxtx.h"
+#include "virtio_cvq.h"
struct rte_mbuf;
@@ -145,113 +146,9 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
*/
#define VQ_RING_DESC_CHAIN_END 32768
-/**
- * Control the RX mode, ie. promiscuous, allmulti, etc...
- * All commands require an "out" sg entry containing a 1 byte
- * state value, zero = disable, non-zero = enable. Commands
- * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature.
- * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA.
- */
-#define VIRTIO_NET_CTRL_RX 0
-#define VIRTIO_NET_CTRL_RX_PROMISC 0
-#define VIRTIO_NET_CTRL_RX_ALLMULTI 1
-#define VIRTIO_NET_CTRL_RX_ALLUNI 2
-#define VIRTIO_NET_CTRL_RX_NOMULTI 3
-#define VIRTIO_NET_CTRL_RX_NOUNI 4
-#define VIRTIO_NET_CTRL_RX_NOBCAST 5
-
-/**
- * Control the MAC
- *
- * The MAC filter table is managed by the hypervisor, the guest should
- * assume the size is infinite. Filtering should be considered
- * non-perfect, ie. based on hypervisor resources, the guest may
- * received packets from sources not specified in the filter list.
- *
- * In addition to the class/cmd header, the TABLE_SET command requires
- * two out scatterlists. Each contains a 4 byte count of entries followed
- * by a concatenated byte stream of the ETH_ALEN MAC addresses. The
- * first sg list contains unicast addresses, the second is for multicast.
- * This functionality is present if the VIRTIO_NET_F_CTRL_RX feature
- * is available.
- *
- * The ADDR_SET command requests one out scatterlist, it contains a
- * 6 bytes MAC address. This functionality is present if the
- * VIRTIO_NET_F_CTRL_MAC_ADDR feature is available.
- */
-struct virtio_net_ctrl_mac {
- uint32_t entries;
- uint8_t macs[][RTE_ETHER_ADDR_LEN];
-} __rte_packed;
-
-#define VIRTIO_NET_CTRL_MAC 1
-#define VIRTIO_NET_CTRL_MAC_TABLE_SET 0
-#define VIRTIO_NET_CTRL_MAC_ADDR_SET 1
-
-/**
- * Control VLAN filtering
- *
- * The VLAN filter table is controlled via a simple ADD/DEL interface.
- * VLAN IDs not added may be filtered by the hypervisor. Del is the
- * opposite of add. Both commands expect an out entry containing a 2
- * byte VLAN ID. VLAN filtering is available with the
- * VIRTIO_NET_F_CTRL_VLAN feature bit.
- */
-#define VIRTIO_NET_CTRL_VLAN 2
-#define VIRTIO_NET_CTRL_VLAN_ADD 0
-#define VIRTIO_NET_CTRL_VLAN_DEL 1
-
-/**
- * RSS control
- *
- * The RSS feature configuration message is sent by the driver when
- * VIRTIO_NET_F_RSS has been negotiated. It provides the device with
- * hash types to use, hash key and indirection table. In this
- * implementation, the driver only supports fixed key length (40B)
- * and indirection table size (128 entries).
- */
-#define VIRTIO_NET_RSS_RETA_SIZE 128
-#define VIRTIO_NET_RSS_KEY_SIZE 40
-
-struct virtio_net_ctrl_rss {
- uint32_t hash_types;
- uint16_t indirection_table_mask;
- uint16_t unclassified_queue;
- uint16_t indirection_table[VIRTIO_NET_RSS_RETA_SIZE];
- uint16_t max_tx_vq;
- uint8_t hash_key_length;
- uint8_t hash_key_data[VIRTIO_NET_RSS_KEY_SIZE];
-};
-
-/*
- * Control link announce acknowledgement
- *
- * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
- * driver has received the notification; device would clear the
- * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
- * this command.
- */
-#define VIRTIO_NET_CTRL_ANNOUNCE 3
-#define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
-
-struct virtio_net_ctrl_hdr {
- uint8_t class;
- uint8_t cmd;
-} __rte_packed;
-
-typedef uint8_t virtio_net_ctrl_ack;
-
#define VIRTIO_NET_OK 0
#define VIRTIO_NET_ERR 1
-#define VIRTIO_MAX_CTRL_DATA 2048
-
-struct virtio_pmd_ctrl {
- struct virtio_net_ctrl_hdr hdr;
- virtio_net_ctrl_ack status;
- uint8_t data[VIRTIO_MAX_CTRL_DATA];
-};
-
struct vq_desc_extra {
void *cookie;
uint16_t ndescs;
--
2.39.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 02/21] net/virtio: introduce notify callback for control queue
2023-02-07 15:15 [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2023-02-07 15:15 ` [PATCH 01/21] net/virtio: move CVQ code into a dedicated file Maxime Coquelin
@ 2023-02-07 15:15 ` Maxime Coquelin
2023-02-07 15:15 ` [PATCH 03/21] net/virtio: virtqueue headers alloc refactoring Maxime Coquelin
` (2 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Maxime Coquelin @ 2023-02-07 15:15 UTC (permalink / raw)
To: dev, chenbo.xia, david.marchand, eperezma, stephen; +Cc: Maxime Coquelin
This patch introduces a notification callback for the control
virtqueue as preliminary work to add shadow control virtqueue
support.
This new callback is required so that the shadow control queue
implemented in Virtio-user does not call the notifciation op
implemented for the driver layer.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
drivers/net/virtio/virtio_cvq.c | 4 ++--
drivers/net/virtio/virtio_cvq.h | 4 ++++
drivers/net/virtio/virtio_ethdev.c | 7 +++++++
3 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/drivers/net/virtio/virtio_cvq.c b/drivers/net/virtio/virtio_cvq.c
index de4299a2a7..cd25614df8 100644
--- a/drivers/net/virtio/virtio_cvq.c
+++ b/drivers/net/virtio/virtio_cvq.c
@@ -76,7 +76,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
vq->hw->weak_barriers);
virtio_wmb(vq->hw->weak_barriers);
- virtqueue_notify(vq);
+ cvq->notify_queue(vq, cvq->notify_cookie);
/* wait for used desc in virtqueue
* desc_is_used has a load-acquire or rte_io_rmb inside
@@ -155,7 +155,7 @@ virtio_send_command_split(struct virtnet_ctl *cvq,
PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
- virtqueue_notify(vq);
+ cvq->notify_queue(vq, cvq->notify_cookie);
while (virtqueue_nused(vq) == 0)
usleep(100);
diff --git a/drivers/net/virtio/virtio_cvq.h b/drivers/net/virtio/virtio_cvq.h
index 139e813ffb..224dc81422 100644
--- a/drivers/net/virtio/virtio_cvq.h
+++ b/drivers/net/virtio/virtio_cvq.h
@@ -7,6 +7,8 @@
#include <rte_ether.h>
+struct virtqueue;
+
/**
* Control the RX mode, ie. promiscuous, allmulti, etc...
* All commands require an "out" sg entry containing a 1 byte
@@ -110,6 +112,8 @@ struct virtnet_ctl {
uint16_t port_id; /**< Device port identifier. */
const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */
rte_spinlock_t lock; /**< spinlock for control queue. */
+ void (*notify_queue)(struct virtqueue *vq, void *cookie); /**< notify ops. */
+ void *notify_cookie; /**< cookie for notify ops */
};
#define VIRTIO_MAX_CTRL_DATA 2048
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index d3aa420c89..422c597c2b 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -253,6 +253,12 @@ virtio_init_vring(struct virtqueue *vq)
virtqueue_disable_intr(vq);
}
+static void
+virtio_control_queue_notify(struct virtqueue *vq, __rte_unused void *cookie)
+{
+ virtqueue_notify(vq);
+}
+
static int
virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
{
@@ -421,6 +427,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
memset(cvq->virtio_net_hdr_mz->addr, 0, rte_mem_page_size());
hw->cvq = cvq;
+ vq->cq.notify_queue = &virtio_control_queue_notify;
}
if (hw->use_va)
--
2.39.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 03/21] net/virtio: virtqueue headers alloc refactoring
2023-02-07 15:15 [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2023-02-07 15:15 ` [PATCH 01/21] net/virtio: move CVQ code into a dedicated file Maxime Coquelin
2023-02-07 15:15 ` [PATCH 02/21] net/virtio: introduce notify callback for control queue Maxime Coquelin
@ 2023-02-07 15:15 ` Maxime Coquelin
2023-02-07 15:15 ` [PATCH 04/21] net/virtio: remove port ID info from Rx queue Maxime Coquelin
2023-02-07 15:17 ` [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
4 siblings, 0 replies; 7+ messages in thread
From: Maxime Coquelin @ 2023-02-07 15:15 UTC (permalink / raw)
To: dev, chenbo.xia, david.marchand, eperezma, stephen; +Cc: Maxime Coquelin
This patch refactors virtqueue initialization by moving
its headers allocation and deallocation in dedicated
function.
While at it, it renames the memzone metadata and address
pointers in the virtnet_tx and virtnet_ctl structures to
remove redundant virtio_net_ prefix.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
drivers/net/virtio/virtio_cvq.c | 19 ++--
drivers/net/virtio/virtio_cvq.h | 9 +-
drivers/net/virtio/virtio_ethdev.c | 149 ++++++++++++++++++-----------
drivers/net/virtio/virtio_rxtx.c | 12 +--
drivers/net/virtio/virtio_rxtx.h | 12 +--
drivers/net/virtio/virtqueue.c | 8 +-
drivers/net/virtio/virtqueue.h | 13 +--
7 files changed, 126 insertions(+), 96 deletions(-)
diff --git a/drivers/net/virtio/virtio_cvq.c b/drivers/net/virtio/virtio_cvq.c
index cd25614df8..5e457f5fd0 100644
--- a/drivers/net/virtio/virtio_cvq.c
+++ b/drivers/net/virtio/virtio_cvq.c
@@ -34,7 +34,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
*/
head = vq->vq_avail_idx;
flags = vq->vq_packed.cached_flags;
- desc[head].addr = cvq->virtio_net_hdr_mem;
+ desc[head].addr = cvq->hdr_mem;
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
nb_descs++;
@@ -44,7 +44,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
}
for (k = 0; k < pkt_num; k++) {
- desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ desc[vq->vq_avail_idx].addr = cvq->hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
desc[vq->vq_avail_idx].len = dlen[k];
@@ -60,7 +60,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
}
}
- desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ desc[vq->vq_avail_idx].addr = cvq->hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
@@ -103,7 +103,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
vq->vq_packed.cached_flags,
vq->vq_packed.used_wrap_counter);
- result = cvq->virtio_net_hdr_mz->addr;
+ result = cvq->hdr_mz->addr;
return result;
}
@@ -126,14 +126,14 @@ virtio_send_command_split(struct virtnet_ctl *cvq,
* One RX packet for ACK.
*/
vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
- vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
+ vq->vq_split.ring.desc[head].addr = cvq->hdr_mem;
vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
i = vq->vq_split.ring.desc[head].next;
for (k = 0; k < pkt_num; k++) {
vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
- vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ vq->vq_split.ring.desc[i].addr = cvq->hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
vq->vq_split.ring.desc[i].len = dlen[k];
@@ -143,7 +143,7 @@ virtio_send_command_split(struct virtnet_ctl *cvq,
}
vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
- vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ vq->vq_split.ring.desc[i].addr = cvq->hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
vq->vq_free_cnt--;
@@ -186,7 +186,7 @@ virtio_send_command_split(struct virtnet_ctl *cvq,
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
vq->vq_free_cnt, vq->vq_desc_head_idx);
- result = cvq->virtio_net_hdr_mz->addr;
+ result = cvq->hdr_mz->addr;
return result;
}
@@ -216,8 +216,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, int *
return -1;
}
- memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
- sizeof(struct virtio_pmd_ctrl));
+ memcpy(cvq->hdr_mz->addr, ctrl, sizeof(struct virtio_pmd_ctrl));
if (virtio_with_packed_queue(vq->hw))
result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
diff --git a/drivers/net/virtio/virtio_cvq.h b/drivers/net/virtio/virtio_cvq.h
index 224dc81422..226561e6b8 100644
--- a/drivers/net/virtio/virtio_cvq.h
+++ b/drivers/net/virtio/virtio_cvq.h
@@ -106,11 +106,10 @@ struct virtio_net_ctrl_hdr {
typedef uint8_t virtio_net_ctrl_ack;
struct virtnet_ctl {
- /**< memzone to populate hdr. */
- const struct rte_memzone *virtio_net_hdr_mz;
- rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
- uint16_t port_id; /**< Device port identifier. */
- const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */
+ const struct rte_memzone *hdr_mz; /**< memzone to populate hdr. */
+ rte_iova_t hdr_mem; /**< hdr for each xmit packet */
+ uint16_t port_id; /**< Device port identifier. */
+ const struct rte_memzone *mz; /**< mem zone to populate CTL ring. */
rte_spinlock_t lock; /**< spinlock for control queue. */
void (*notify_queue)(struct virtqueue *vq, void *cookie); /**< notify ops. */
void *notify_cookie; /**< cookie for notify ops */
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 422c597c2b..057388cfaf 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -259,19 +259,97 @@ virtio_control_queue_notify(struct virtqueue *vq, __rte_unused void *cookie)
virtqueue_notify(vq);
}
+static int
+virtio_alloc_queue_headers(struct virtqueue *vq, int numa_node, const char *name)
+{
+ char hdr_name[VIRTQUEUE_MAX_NAME_SZ];
+ const struct rte_memzone **hdr_mz;
+ rte_iova_t *hdr_mem;
+ ssize_t size;
+ int queue_type;
+
+ queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
+ switch (queue_type) {
+ case VTNET_TQ:
+ /*
+ * For each xmit packet, allocate a virtio_net_hdr
+ * and indirect ring elements
+ */
+ size = vq->vq_nentries * sizeof(struct virtio_tx_region);
+ hdr_mz = &vq->txq.hdr_mz;
+ hdr_mem = &vq->txq.hdr_mem;
+ break;
+ case VTNET_CQ:
+ /* Allocate a page for control vq command, data and status */
+ size = rte_mem_page_size();
+ hdr_mz = &vq->cq.hdr_mz;
+ hdr_mem = &vq->cq.hdr_mem;
+ break;
+ case VTNET_RQ:
+ /* fallthrough */
+ default:
+ return 0;
+ }
+
+ snprintf(hdr_name, sizeof(hdr_name), "%s_hdr", name);
+ *hdr_mz = rte_memzone_reserve_aligned(hdr_name, size, numa_node,
+ RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
+ if (*hdr_mz == NULL) {
+ if (rte_errno == EEXIST)
+ *hdr_mz = rte_memzone_lookup(hdr_name);
+ if (*hdr_mz == NULL)
+ return -ENOMEM;
+ }
+
+ memset((*hdr_mz)->addr, 0, size);
+
+ if (vq->hw->use_va)
+ *hdr_mem = (uintptr_t)(*hdr_mz)->addr;
+ else
+ *hdr_mem = (uintptr_t)(*hdr_mz)->iova;
+
+ return 0;
+}
+
+static void
+virtio_free_queue_headers(struct virtqueue *vq)
+{
+ const struct rte_memzone **hdr_mz;
+ rte_iova_t *hdr_mem;
+ int queue_type;
+
+ queue_type = virtio_get_queue_type(vq->hw, vq->vq_queue_index);
+ switch (queue_type) {
+ case VTNET_TQ:
+ hdr_mz = &vq->txq.hdr_mz;
+ hdr_mem = &vq->txq.hdr_mem;
+ break;
+ case VTNET_CQ:
+ hdr_mz = &vq->cq.hdr_mz;
+ hdr_mem = &vq->cq.hdr_mem;
+ break;
+ case VTNET_RQ:
+ /* fallthrough */
+ default:
+ return;
+ }
+
+ rte_memzone_free(*hdr_mz);
+ *hdr_mz = NULL;
+ *hdr_mem = 0;
+}
+
static int
virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
{
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
- char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
- const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
+ const struct rte_memzone *mz = NULL;
unsigned int vq_size, size;
struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_rx *rxvq = NULL;
struct virtnet_tx *txvq = NULL;
struct virtnet_ctl *cvq = NULL;
struct virtqueue *vq;
- size_t sz_hdr_mz = 0;
void *sw_ring = NULL;
int queue_type = virtio_get_queue_type(hw, queue_idx);
int ret;
@@ -297,22 +375,12 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
return -EINVAL;
}
- snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
- dev->data->port_id, queue_idx);
+ snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, queue_idx);
size = RTE_ALIGN_CEIL(sizeof(*vq) +
vq_size * sizeof(struct vq_desc_extra),
RTE_CACHE_LINE_SIZE);
- if (queue_type == VTNET_TQ) {
- /*
- * For each xmit packet, allocate a virtio_net_hdr
- * and indirect ring elements
- */
- sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
- } else if (queue_type == VTNET_CQ) {
- /* Allocate a page for control vq command, data and status */
- sz_hdr_mz = rte_mem_page_size();
- }
+
vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
numa_node);
@@ -366,20 +434,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
virtio_init_vring(vq);
- if (sz_hdr_mz) {
- snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
- dev->data->port_id, queue_idx);
- hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
- numa_node, RTE_MEMZONE_IOVA_CONTIG,
- RTE_CACHE_LINE_SIZE);
- if (hdr_mz == NULL) {
- if (rte_errno == EEXIST)
- hdr_mz = rte_memzone_lookup(vq_hdr_name);
- if (hdr_mz == NULL) {
- ret = -ENOMEM;
- goto free_mz;
- }
- }
+ ret = virtio_alloc_queue_headers(vq, numa_node, vq_name);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to alloc queue headers");
+ goto free_mz;
}
if (queue_type == VTNET_RQ) {
@@ -411,21 +469,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
txvq = &vq->txq;
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
- txvq->virtio_net_hdr_mz = hdr_mz;
- if (hw->use_va)
- txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
- else
- txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
cvq->mz = mz;
- cvq->virtio_net_hdr_mz = hdr_mz;
- if (hw->use_va)
- cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
- else
- cvq->virtio_net_hdr_mem = hdr_mz->iova;
- memset(cvq->virtio_net_hdr_mz->addr, 0, rte_mem_page_size());
-
hw->cvq = cvq;
vq->cq.notify_queue = &virtio_control_queue_notify;
}
@@ -439,18 +485,15 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
struct virtio_tx_region *txr;
unsigned int i;
- txr = hdr_mz->addr;
- memset(txr, 0, vq_size * sizeof(*txr));
+ txr = txvq->hdr_mz->addr;
for (i = 0; i < vq_size; i++) {
/* first indirect descriptor is always the tx header */
if (!virtio_with_packed_queue(hw)) {
struct vring_desc *start_dp = txr[i].tx_indir;
vring_desc_init_split(start_dp,
RTE_DIM(txr[i].tx_indir));
- start_dp->addr = txvq->virtio_net_hdr_mem
- + i * sizeof(*txr)
- + offsetof(struct virtio_tx_region,
- tx_hdr);
+ start_dp->addr = txvq->hdr_mem + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region, tx_hdr);
start_dp->len = hw->vtnet_hdr_size;
start_dp->flags = VRING_DESC_F_NEXT;
} else {
@@ -458,10 +501,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
txr[i].tx_packed_indir;
vring_desc_init_indirect_packed(start_dp,
RTE_DIM(txr[i].tx_packed_indir));
- start_dp->addr = txvq->virtio_net_hdr_mem
- + i * sizeof(*txr)
- + offsetof(struct virtio_tx_region,
- tx_hdr);
+ start_dp->addr = txvq->hdr_mem + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region, tx_hdr);
start_dp->len = hw->vtnet_hdr_size;
}
}
@@ -481,7 +522,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
free_sw_ring:
rte_free(sw_ring);
free_hdr_mz:
- rte_memzone_free(hdr_mz);
+ virtio_free_queue_headers(vq);
free_mz:
rte_memzone_free(mz);
free_vq:
@@ -514,12 +555,12 @@ virtio_free_queues(struct virtio_hw *hw)
rte_memzone_free(vq->rxq.mz);
} else if (queue_type == VTNET_TQ) {
rte_memzone_free(vq->txq.mz);
- rte_memzone_free(vq->txq.virtio_net_hdr_mz);
} else {
rte_memzone_free(vq->cq.mz);
- rte_memzone_free(vq->cq.virtio_net_hdr_mz);
}
+ virtio_free_queue_headers(vq);
+
rte_free(vq);
hw->vqs[i] = NULL;
}
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index d9d40832e0..bd95e8ceb5 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -542,7 +542,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t needed, int use_indirect, int can_push,
int in_order)
{
- struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+ struct virtio_tx_region *txr = txvq->hdr_mz->addr;
struct vq_desc_extra *dxp;
struct virtqueue *vq = virtnet_txq_to_vq(txvq);
struct vring_desc *start_dp;
@@ -579,9 +579,8 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
* the first slot in indirect ring is already preset
* to point to the header in reserved region
*/
- start_dp[idx].addr = txvq->virtio_net_hdr_mem +
- RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
- start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
+ start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
+ start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
start_dp[idx].flags = VRING_DESC_F_INDIRECT;
hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
@@ -592,9 +591,8 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
/* setup first tx ring slot to point to header
* stored in reserved region.
*/
- start_dp[idx].addr = txvq->virtio_net_hdr_mem +
- RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
- start_dp[idx].len = vq->hw->vtnet_hdr_size;
+ start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+ start_dp[idx].len = vq->hw->vtnet_hdr_size;
start_dp[idx].flags = VRING_DESC_F_NEXT;
hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 6ee3a13100..226c722d64 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -33,15 +33,13 @@ struct virtnet_rx {
};
struct virtnet_tx {
- /**< memzone to populate hdr. */
- const struct rte_memzone *virtio_net_hdr_mz;
- rte_iova_t virtio_net_hdr_mem; /**< hdr for each xmit packet */
+ const struct rte_memzone *hdr_mz; /**< memzone to populate hdr. */
+ rte_iova_t hdr_mem; /**< hdr for each xmit packet */
- uint16_t queue_id; /**< DPDK queue index. */
- uint16_t port_id; /**< Device port identifier. */
+ uint16_t queue_id; /**< DPDK queue index. */
+ uint16_t port_id; /**< Device port identifier. */
- /* Statistics */
- struct virtnet_stats stats;
+ struct virtnet_stats stats; /* Statistics */
const struct rte_memzone *mz; /**< mem zone to populate TX ring. */
};
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index c98d696e62..3b174a5923 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -200,10 +200,9 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
vq->vq_packed.event_flags_shadow = 0;
txvq = &vq->txq;
- txr = txvq->virtio_net_hdr_mz->addr;
+ txr = txvq->hdr_mz->addr;
memset(txvq->mz->addr, 0, txvq->mz->len);
- memset(txvq->virtio_net_hdr_mz->addr, 0,
- txvq->virtio_net_hdr_mz->len);
+ memset(txvq->hdr_mz->addr, 0, txvq->hdr_mz->len);
for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
dxp = &vq->vq_descx[desc_idx];
@@ -217,8 +216,7 @@ virtqueue_txvq_reset_packed(struct virtqueue *vq)
start_dp = txr[desc_idx].tx_packed_indir;
vring_desc_init_indirect_packed(start_dp,
RTE_DIM(txr[desc_idx].tx_packed_indir));
- start_dp->addr = txvq->virtio_net_hdr_mem
- + desc_idx * sizeof(*txr)
+ start_dp->addr = txvq->hdr_mem + desc_idx * sizeof(*txr)
+ offsetof(struct virtio_tx_region, tx_hdr);
start_dp->len = vq->hw->vtnet_hdr_size;
}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 62f472850e..f5058f362c 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -604,7 +604,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t needed, int use_indirect, int can_push,
int in_order)
{
- struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+ struct virtio_tx_region *txr = txvq->hdr_mz->addr;
struct vq_desc_extra *dxp;
struct virtqueue *vq = virtnet_txq_to_vq(txvq);
struct vring_packed_desc *start_dp, *head_dp;
@@ -646,10 +646,8 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
* the first slot in indirect ring is already preset
* to point to the header in reserved region
*/
- start_dp[idx].addr = txvq->virtio_net_hdr_mem +
- RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
- start_dp[idx].len = (seg_num + 1) *
- sizeof(struct vring_packed_desc);
+ start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
+ start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_packed_desc);
/* Packed descriptor id needs to be restored when inorder. */
if (in_order)
start_dp[idx].id = idx;
@@ -665,9 +663,8 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
/* setup first tx ring slot to point to header
* stored in reserved region.
*/
- start_dp[idx].addr = txvq->virtio_net_hdr_mem +
- RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
- start_dp[idx].len = vq->hw->vtnet_hdr_size;
+ start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+ start_dp[idx].len = vq->hw->vtnet_hdr_size;
hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
idx++;
if (idx >= vq->vq_nentries) {
--
2.39.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 04/21] net/virtio: remove port ID info from Rx queue
2023-02-07 15:15 [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
` (2 preceding siblings ...)
2023-02-07 15:15 ` [PATCH 03/21] net/virtio: virtqueue headers alloc refactoring Maxime Coquelin
@ 2023-02-07 15:15 ` Maxime Coquelin
2023-02-07 15:17 ` [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
4 siblings, 0 replies; 7+ messages in thread
From: Maxime Coquelin @ 2023-02-07 15:15 UTC (permalink / raw)
To: dev, chenbo.xia, david.marchand, eperezma, stephen; +Cc: Maxime Coquelin
The port ID information is duplicated in several places.
This patch removes it from the virtnet_rx struct as it can
be found in virtio_hw struct.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
drivers/net/virtio/virtio_ethdev.c | 1 -
drivers/net/virtio/virtio_rxtx.c | 25 ++++++++++---------------
drivers/net/virtio/virtio_rxtx.h | 1 -
drivers/net/virtio/virtio_rxtx_packed.c | 3 +--
drivers/net/virtio/virtio_rxtx_simple.c | 3 ++-
drivers/net/virtio/virtio_rxtx_simple.h | 5 +++--
6 files changed, 16 insertions(+), 22 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 057388cfaf..1c10c16ca7 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -462,7 +462,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
vq->sw_ring = sw_ring;
rxvq = &vq->rxq;
- rxvq->port_id = dev->data->port_id;
rxvq->mz = mz;
rxvq->fake_mbuf = fake_mbuf;
} else if (queue_type == VTNET_TQ) {
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index bd95e8ceb5..45c04aa3f8 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -1024,7 +1024,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
continue;
}
- rxm->port = rxvq->port_id;
+ rxm->port = hw->port_id;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->ol_flags = 0;
rxm->vlan_tci = 0;
@@ -1066,8 +1066,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
@@ -1127,7 +1126,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
continue;
}
- rxm->port = rxvq->port_id;
+ rxm->port = hw->port_id;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->ol_flags = 0;
rxm->vlan_tci = 0;
@@ -1169,8 +1168,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
}
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
@@ -1258,7 +1256,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
rxm->data_len = (uint16_t)(len[i] - hdr_size);
- rxm->port = rxvq->port_id;
+ rxm->port = hw->port_id;
rx_pkts[nb_rx] = rxm;
prev = rxm;
@@ -1352,8 +1350,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
}
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
@@ -1437,7 +1434,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
rxm->data_len = (uint16_t)(len[i] - hdr_size);
- rxm->port = rxvq->port_id;
+ rxm->port = hw->port_id;
rx_pkts[nb_rx] = rxm;
prev = rxm;
@@ -1530,8 +1527,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
}
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
@@ -1610,7 +1606,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
rxm->data_len = (uint16_t)(len[i] - hdr_size);
- rxm->port = rxvq->port_id;
+ rxm->port = hw->port_id;
rx_pkts[nb_rx] = rxm;
prev = rxm;
@@ -1699,8 +1695,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
}
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 226c722d64..97de9eb0a3 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -24,7 +24,6 @@ struct virtnet_rx {
struct rte_mempool *mpool; /**< mempool for mbuf allocation */
uint16_t queue_id; /**< DPDK queue index. */
- uint16_t port_id; /**< Device port identifier. */
/* Statistics */
struct virtnet_stats stats;
diff --git a/drivers/net/virtio/virtio_rxtx_packed.c b/drivers/net/virtio/virtio_rxtx_packed.c
index 45cf39df22..5f7d4903bc 100644
--- a/drivers/net/virtio/virtio_rxtx_packed.c
+++ b/drivers/net/virtio/virtio_rxtx_packed.c
@@ -124,8 +124,7 @@ virtio_recv_pkts_packed_vec(void *rx_queue,
free_cnt);
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index f248869a8f..438256970d 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -30,12 +30,13 @@
int __rte_cold
virtio_rxq_vec_setup(struct virtnet_rx *rxq)
{
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxq);
uintptr_t p;
struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
mb_def.nb_segs = 1;
mb_def.data_off = RTE_PKTMBUF_HEADROOM;
- mb_def.port = rxq->port_id;
+ mb_def.port = vq->hw->port_id;
rte_mbuf_refcnt_set(&mb_def, 1);
/* prevent compiler reordering: rearm_data covers previous fields */
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index d8f96e0434..8e235f4dbc 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -32,8 +32,9 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
RTE_VIRTIO_VPMD_RX_REARM_THRESH);
if (unlikely(ret)) {
- rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed +=
- RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ struct rte_eth_dev *dev = &rte_eth_devices[vq->hw->port_id];
+
+ dev->data->rx_mbuf_alloc_failed += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
return;
}
--
2.39.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA
2023-02-07 15:15 [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
` (3 preceding siblings ...)
2023-02-07 15:15 ` [PATCH 04/21] net/virtio: remove port ID info from Rx queue Maxime Coquelin
@ 2023-02-07 15:17 ` Maxime Coquelin
4 siblings, 0 replies; 7+ messages in thread
From: Maxime Coquelin @ 2023-02-07 15:17 UTC (permalink / raw)
To: dev, chenbo.xia, david.marchand, eperezma, stephen
Please discard, forgot to prefix with v2
On 2/7/23 16:15, Maxime Coquelin wrote:
> This series introduces control queue support for Vhost-vDPA
> backend. This is a requirement to support multiqueue, but
> be usefull for other features like RSS for example.
>
> Since the Virtio-user layer of the Virtio PMD must handle
> some control messages, like the number of queue pairs to
> be used by the device, a shadow control queue is created
> at Virtio-user layer.
>
> Control messages from the regular Virtio control queue
> are still dequeues and handled if needed by the Virtio-user
> layer, and are then forwarded to the shadow control queue
> so that the physical vDPA device can handle them.
>
> This model is similar to the one adopted by the QEMU
> project.
>
> In order to avoid code duplication, virtqueue allocation
> and control queue message sending has been factored out
> of the Virtio layer to be reusable by the Virtio-user
> layer.
>
> Finally, in order to support vDPA hardware which may
> support large number of queues, last patch removes the
> 8 queue pairs limitation by dynamically allocating
> vring metadata.
>
> The series has been tested with Nvidia Cx-6 DX NIC
> with up to 16 queue pairs:
>
> # echo 0 > /sys/bus/pci/devices/0000\:3b\:00.0/sriov_numvfs
> # echo 0 > /sys/bus/pci/devices/0000\:3b\:00.1/sriov_numvfs
> # modprobe vhost_vdpa
> # modprobe mlx5_vdpa
> # echo 1 > /sys/bus/pci/devices/0000\:3b\:00.0/sriov_numvfs
> # echo 0000:3b:00.2 >/sys/bus/pci/drivers/mlx5_core/unbind
> # devlink dev eswitch set pci/0000:3b:00.0 mode switchdev
> # echo 0000:3b:00.2 >/sys/bus/pci/drivers/mlx5_core/bind
> # vdpa dev add name vdpa0 mgmtdev pci/0000:3b:00.2 mac 00:11:22:33:44:03 max_vqp 16
> # ulimit -l unlimited
> # dpdk-testpmd -l 0,2,4,6 --socket-mem 1024,0 --vdev 'virtio_user0,path=/dev/vhost-vdpa-0' --no-pci -n 3 -- --nb-cores=3 -i --rxq=16 --txq=16
>
> Changes in v2:
> ==============
> - Fix double spaces (Chenbo)
> - Get rid of uneeded gotos (Stephen)
> - Only allocate packed ring metadata if supported (Chenbo)
> - Rebased on top of main
>
> Maxime Coquelin (21):
> net/virtio: move CVQ code into a dedicated file
> net/virtio: introduce notify callback for control queue
> net/virtio: virtqueue headers alloc refactoring
> net/virtio: remove port ID info from Rx queue
> net/virtio: remove unused fields in Tx queue struct
> net/virtio: remove unused queue ID field in Rx queue
> net/virtio: remove unused Port ID in control queue
> net/virtio: move vring memzone to virtqueue struct
> net/virtio: refactor indirect desc headers init
> net/virtio: alloc Rx SW ring only if vectorized path
> net/virtio: extract virtqueue init from virtio queue init
> net/virtio-user: fix device starting failure handling
> net/virtio-user: simplify queues setup
> net/virtio-user: use proper type for number of queue pairs
> net/virtio-user: get max number of queue pairs from device
> net/virtio-user: allocate shadow control queue
> net/virtio-user: send shadow virtqueue info to the backend
> net/virtio-user: add new callback to enable control queue
> net/virtio-user: forward control messages to shadow queue
> net/virtio-user: advertize control VQ support with vDPA
> net/virtio-user: remove max queues limitation
>
> drivers/net/virtio/meson.build | 1 +
> drivers/net/virtio/virtio.h | 6 -
> drivers/net/virtio/virtio_cvq.c | 229 +++++++++
> drivers/net/virtio/virtio_cvq.h | 127 +++++
> drivers/net/virtio/virtio_ethdev.c | 472 +-----------------
> drivers/net/virtio/virtio_rxtx.c | 47 +-
> drivers/net/virtio/virtio_rxtx.h | 31 +-
> drivers/net/virtio/virtio_rxtx_packed.c | 3 +-
> drivers/net/virtio/virtio_rxtx_simple.c | 3 +-
> drivers/net/virtio/virtio_rxtx_simple.h | 7 +-
> .../net/virtio/virtio_rxtx_simple_altivec.c | 4 +-
> drivers/net/virtio/virtio_rxtx_simple_neon.c | 4 +-
> drivers/net/virtio/virtio_rxtx_simple_sse.c | 4 +-
> drivers/net/virtio/virtio_user/vhost.h | 1 +
> drivers/net/virtio/virtio_user/vhost_vdpa.c | 19 +-
> .../net/virtio/virtio_user/virtio_user_dev.c | 305 +++++++++--
> .../net/virtio/virtio_user/virtio_user_dev.h | 30 +-
> drivers/net/virtio/virtio_user_ethdev.c | 49 +-
> drivers/net/virtio/virtqueue.c | 346 ++++++++++++-
> drivers/net/virtio/virtqueue.h | 127 +----
> 20 files changed, 1069 insertions(+), 746 deletions(-)
> create mode 100644 drivers/net/virtio/virtio_cvq.c
> create mode 100644 drivers/net/virtio/virtio_cvq.h
>
^ permalink raw reply [flat|nested] 7+ messages in thread