From: Chaoyong He <chaoyong.he@corigine.com>
To: dev@dpdk.org
Cc: oss-drivers@corigine.com, Xinying Yu <xinying.yu@corigine.com>,
Chaoyong He <chaoyong.he@corigine.com>,
Long Wu <long.wu@corigine.com>,
Peng Zhang <peng.zhang@corigine.com>,
Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH v4 07/11] vdpa/nfp: setup the VF configure
Date: Mon, 5 Aug 2024 10:12:44 +0800 [thread overview]
Message-ID: <20240805021248.1051198-8-chaoyong.he@corigine.com> (raw)
In-Reply-To: <20240805021248.1051198-1-chaoyong.he@corigine.com>
From: Xinying Yu <xinying.yu@corigine.com>
Create the relay vring on host and then set the address of Rx
used ring to the VF config bar. So the device can DMA the
used ring information to host rather than directly to VM.
Use 'NFP_NET_CFG_CTRL_LM_RELAY' notify the device side. And
enable the MSIX interrupt on device.
Tx ring address is not needed to change since the relay vring
only assists Rx ring to do the dirty page logging.
Signed-off-by: Xinying Yu <xinying.yu@corigine.com>
Reviewed-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Long Wu <long.wu@corigine.com>
Reviewed-by: Peng Zhang <peng.zhang@corigine.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/common/nfp/nfp_common_ctrl.h | 3 +
drivers/vdpa/nfp/nfp_vdpa.c | 203 ++++++++++++++++++++++++---
drivers/vdpa/nfp/nfp_vdpa_core.c | 55 ++++++--
drivers/vdpa/nfp/nfp_vdpa_core.h | 8 ++
4 files changed, 239 insertions(+), 30 deletions(-)
diff --git a/drivers/common/nfp/nfp_common_ctrl.h b/drivers/common/nfp/nfp_common_ctrl.h
index 1b30f81fdb..8a760ddb4b 100644
--- a/drivers/common/nfp/nfp_common_ctrl.h
+++ b/drivers/common/nfp/nfp_common_ctrl.h
@@ -207,6 +207,9 @@ struct nfp_net_fw_ver {
#define NFP_NET_CFG_CTRL_FLOW_STEER (0x1 << 8) /**< Flow Steering */
#define NFP_NET_CFG_CTRL_VIRTIO (0x1 << 10) /**< Virtio offload */
#define NFP_NET_CFG_CTRL_IN_ORDER (0x1 << 11) /**< Virtio in-order flag */
+#define NFP_NET_CFG_CTRL_LM_RELAY (0x1 << 12) /**< Virtio live migration relay start */
+#define NFP_NET_CFG_CTRL_NOTIFY_DATA (0x1 << 13) /**< Virtio notification data flag */
+#define NFP_NET_CFG_CTRL_SWLM (0x1 << 14) /**< Virtio SW live migration enable */
#define NFP_NET_CFG_CTRL_USO (0x1 << 16) /**< UDP segmentation offload */
#define NFP_NET_CFG_CAP_WORD1 0x00a4
diff --git a/drivers/vdpa/nfp/nfp_vdpa.c b/drivers/vdpa/nfp/nfp_vdpa.c
index 1643ebbb8c..983123ba08 100644
--- a/drivers/vdpa/nfp/nfp_vdpa.c
+++ b/drivers/vdpa/nfp/nfp_vdpa.c
@@ -11,6 +11,8 @@
#include <nfp_common_pci.h>
#include <nfp_dev.h>
#include <rte_vfio.h>
+#include <rte_eal_paging.h>
+#include <rte_malloc.h>
#include <vdpa_driver.h>
#include "nfp_vdpa_core.h"
@@ -21,6 +23,9 @@
#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
sizeof(int) * (NFP_VDPA_MAX_QUEUES * 2 + 1))
+#define NFP_VDPA_USED_RING_LEN(size) \
+ ((size) * sizeof(struct vring_used_elem) + sizeof(struct vring_used))
+
struct nfp_vdpa_dev {
struct rte_pci_device *pci_dev;
struct rte_vdpa_device *vdev;
@@ -261,15 +266,85 @@ nfp_vdpa_qva_to_gpa(int vid,
return gpa;
}
+static void
+nfp_vdpa_relay_vring_free(struct nfp_vdpa_dev *device,
+ uint16_t vring_index)
+{
+ uint16_t i;
+ uint64_t size;
+ struct rte_vhost_vring vring;
+ uint64_t m_vring_iova = NFP_VDPA_RELAY_VRING;
+
+ for (i = 0; i < vring_index; i++) {
+ rte_vhost_get_vhost_vring(device->vid, i, &vring);
+
+ size = RTE_ALIGN_CEIL(vring_size(vring.size, rte_mem_page_size()),
+ rte_mem_page_size());
+ rte_vfio_container_dma_unmap(device->vfio_container_fd,
+ (uint64_t)(uintptr_t)device->hw.m_vring[i].desc,
+ m_vring_iova, size);
+
+ rte_free(device->hw.m_vring[i].desc);
+ m_vring_iova += size;
+ }
+}
+
static int
-nfp_vdpa_start(struct nfp_vdpa_dev *device)
+nfp_vdpa_relay_vring_alloc(struct nfp_vdpa_dev *device)
+{
+ int ret;
+ uint16_t i;
+ uint64_t size;
+ void *vring_buf;
+ uint64_t page_size;
+ struct rte_vhost_vring vring;
+ struct nfp_vdpa_hw *vdpa_hw = &device->hw;
+ uint64_t m_vring_iova = NFP_VDPA_RELAY_VRING;
+
+ page_size = rte_mem_page_size();
+
+ for (i = 0; i < vdpa_hw->nr_vring; i++) {
+ rte_vhost_get_vhost_vring(device->vid, i, &vring);
+
+ size = RTE_ALIGN_CEIL(vring_size(vring.size, page_size), page_size);
+ vring_buf = rte_zmalloc("nfp_vdpa_relay", size, page_size);
+ if (vring_buf == NULL)
+ goto vring_free_all;
+
+ vring_init(&vdpa_hw->m_vring[i], vring.size, vring_buf, page_size);
+
+ ret = rte_vfio_container_dma_map(device->vfio_container_fd,
+ (uint64_t)(uintptr_t)vring_buf, m_vring_iova, size);
+ if (ret != 0) {
+ DRV_VDPA_LOG(ERR, "vDPA vring relay dma map failed.");
+ goto vring_free_one;
+ }
+
+ m_vring_iova += size;
+ }
+
+ return 0;
+
+vring_free_one:
+ rte_free(device->hw.m_vring[i].desc);
+vring_free_all:
+ nfp_vdpa_relay_vring_free(device, i);
+
+ return -ENOSPC;
+}
+
+static int
+nfp_vdpa_start(struct nfp_vdpa_dev *device,
+ bool relay)
{
int ret;
int vid;
uint16_t i;
uint64_t gpa;
+ uint16_t size;
struct rte_vhost_vring vring;
struct nfp_vdpa_hw *vdpa_hw = &device->hw;
+ uint64_t m_vring_iova = NFP_VDPA_RELAY_VRING;
vid = device->vid;
vdpa_hw->nr_vring = rte_vhost_get_vring_num(vid);
@@ -278,15 +353,21 @@ nfp_vdpa_start(struct nfp_vdpa_dev *device)
if (ret != 0)
return ret;
+ if (relay) {
+ ret = nfp_vdpa_relay_vring_alloc(device);
+ if (ret != 0)
+ return ret;
+ }
+
for (i = 0; i < vdpa_hw->nr_vring; i++) {
ret = rte_vhost_get_vhost_vring(vid, i, &vring);
if (ret != 0)
- return ret;
+ goto relay_vring_free;
gpa = nfp_vdpa_qva_to_gpa(vid, (uint64_t)(uintptr_t)vring.desc);
if (gpa == 0) {
DRV_VDPA_LOG(ERR, "Fail to get GPA for descriptor ring.");
- return -1;
+ goto relay_vring_free;
}
vdpa_hw->vring[i].desc = gpa;
@@ -294,45 +375,123 @@ nfp_vdpa_start(struct nfp_vdpa_dev *device)
gpa = nfp_vdpa_qva_to_gpa(vid, (uint64_t)(uintptr_t)vring.avail);
if (gpa == 0) {
DRV_VDPA_LOG(ERR, "Fail to get GPA for available ring.");
- return -1;
+ goto relay_vring_free;
}
vdpa_hw->vring[i].avail = gpa;
- gpa = nfp_vdpa_qva_to_gpa(vid, (uint64_t)(uintptr_t)vring.used);
- if (gpa == 0) {
- DRV_VDPA_LOG(ERR, "Fail to get GPA for used ring.");
- return -1;
- }
+ /* Direct I/O for Tx queue, relay for Rx queue */
+ if (relay && ((i & 1) == 0)) {
+ vdpa_hw->vring[i].used = m_vring_iova +
+ (char *)vdpa_hw->m_vring[i].used -
+ (char *)vdpa_hw->m_vring[i].desc;
+
+ ret = rte_vhost_get_vring_base(vid, i,
+ &vdpa_hw->m_vring[i].avail->idx,
+ &vdpa_hw->m_vring[i].used->idx);
+ if (ret != 0)
+ goto relay_vring_free;
+ } else {
+ gpa = nfp_vdpa_qva_to_gpa(vid, (uint64_t)(uintptr_t)vring.used);
+ if (gpa == 0) {
+ DRV_VDPA_LOG(ERR, "Fail to get GPA for used ring.");
+ goto relay_vring_free;
+ }
- vdpa_hw->vring[i].used = gpa;
+ vdpa_hw->vring[i].used = gpa;
+ }
vdpa_hw->vring[i].size = vring.size;
+ if (relay) {
+ size = RTE_ALIGN_CEIL(vring_size(vring.size,
+ rte_mem_page_size()), rte_mem_page_size());
+ m_vring_iova += size;
+ }
+
ret = rte_vhost_get_vring_base(vid, i,
&vdpa_hw->vring[i].last_avail_idx,
&vdpa_hw->vring[i].last_used_idx);
if (ret != 0)
- return ret;
+ goto relay_vring_free;
}
- return nfp_vdpa_hw_start(&device->hw, vid);
+ if (relay)
+ return nfp_vdpa_relay_hw_start(&device->hw, vid);
+ else
+ return nfp_vdpa_hw_start(&device->hw, vid);
+
+relay_vring_free:
+ if (relay)
+ nfp_vdpa_relay_vring_free(device, vdpa_hw->nr_vring);
+
+ return -EFAULT;
+}
+
+static void
+nfp_vdpa_update_used_ring(struct nfp_vdpa_dev *dev,
+ uint16_t qid)
+{
+ rte_vdpa_relay_vring_used(dev->vid, qid, &dev->hw.m_vring[qid]);
+ rte_vhost_vring_call(dev->vid, qid);
}
static void
-nfp_vdpa_stop(struct nfp_vdpa_dev *device)
+nfp_vdpa_relay_stop(struct nfp_vdpa_dev *device)
{
int vid;
uint32_t i;
+ uint64_t len;
+ struct rte_vhost_vring vring;
struct nfp_vdpa_hw *vdpa_hw = &device->hw;
nfp_vdpa_hw_stop(vdpa_hw);
vid = device->vid;
- for (i = 0; i < vdpa_hw->nr_vring; i++)
+ for (i = 0; i < vdpa_hw->nr_vring; i++) {
+ /* Synchronize remaining new used entries if any */
+ if ((i & 1) == 0)
+ nfp_vdpa_update_used_ring(device, i);
+
+ rte_vhost_get_vhost_vring(vid, i, &vring);
+ len = NFP_VDPA_USED_RING_LEN(vring.size);
+ vdpa_hw->vring[i].last_avail_idx = vring.avail->idx;
+ vdpa_hw->vring[i].last_used_idx = vring.used->idx;
+
rte_vhost_set_vring_base(vid, i,
vdpa_hw->vring[i].last_avail_idx,
vdpa_hw->vring[i].last_used_idx);
+
+ rte_vhost_log_used_vring(vid, i, 0, len);
+
+ if (vring.used->idx != vring.avail->idx)
+ rte_atomic_store_explicit(
+ (unsigned short __rte_atomic *)&vring.used->idx,
+ vring.avail->idx, rte_memory_order_release);
+ }
+
+ nfp_vdpa_relay_vring_free(device, vdpa_hw->nr_vring);
+}
+
+static void
+nfp_vdpa_stop(struct nfp_vdpa_dev *device,
+ bool relay)
+{
+ int vid;
+ uint32_t i;
+ struct nfp_vdpa_hw *vdpa_hw = &device->hw;
+
+ nfp_vdpa_hw_stop(vdpa_hw);
+
+ vid = device->vid;
+ if (relay)
+ nfp_vdpa_relay_stop(device);
+ else
+ for (i = 0; i < vdpa_hw->nr_vring; i++)
+ rte_vhost_set_vring_base(vid, i,
+ vdpa_hw->vring[i].last_avail_idx,
+ vdpa_hw->vring[i].last_used_idx);
+
}
static int
@@ -575,7 +734,7 @@ update_datapath(struct nfp_vdpa_dev *device)
if (ret != 0)
goto dma_map_rollback;
- ret = nfp_vdpa_start(device);
+ ret = nfp_vdpa_start(device, false);
if (ret != 0)
goto disable_vfio_intr;
@@ -591,7 +750,7 @@ update_datapath(struct nfp_vdpa_dev *device)
rte_memory_order_relaxed) != 0))) {
nfp_vdpa_unset_notify_relay(device);
- nfp_vdpa_stop(device);
+ nfp_vdpa_stop(device, false);
ret = nfp_vdpa_disable_vfio_intr(device);
if (ret != 0)
@@ -608,7 +767,7 @@ update_datapath(struct nfp_vdpa_dev *device)
return 0;
vdpa_stop:
- nfp_vdpa_stop(device);
+ nfp_vdpa_stop(device, false);
disable_vfio_intr:
nfp_vdpa_disable_vfio_intr(device);
dma_map_rollback:
@@ -639,10 +798,17 @@ nfp_vdpa_sw_fallback(struct nfp_vdpa_dev *device)
if (ret != 0)
goto error;
+ /* Config the VF */
+ ret = nfp_vdpa_start(device, true);
+ if (ret != 0)
+ goto unset_intr;
+
device->hw.sw_fallback_running = true;
return 0;
+unset_intr:
+ nfp_vdpa_disable_vfio_intr(device);
error:
return ret;
}
@@ -691,6 +857,9 @@ nfp_vdpa_dev_close(int vid)
device = node->device;
if (device->hw.sw_fallback_running) {
+ /* Reset VF */
+ nfp_vdpa_stop(device, true);
+
device->hw.sw_fallback_running = false;
rte_atomic_store_explicit(&device->dev_attached, 0,
diff --git a/drivers/vdpa/nfp/nfp_vdpa_core.c b/drivers/vdpa/nfp/nfp_vdpa_core.c
index 50eda4cb2c..2b609dddc2 100644
--- a/drivers/vdpa/nfp/nfp_vdpa_core.c
+++ b/drivers/vdpa/nfp/nfp_vdpa_core.c
@@ -109,7 +109,8 @@ nfp_vdpa_check_offloads(void)
static int
nfp_vdpa_vf_config(struct nfp_hw *hw,
- int vid)
+ int vid,
+ bool relay)
{
int ret;
uint32_t update;
@@ -133,6 +134,10 @@ nfp_vdpa_vf_config(struct nfp_hw *hw,
nfp_write_mac(hw, (uint8_t *)mac_addr);
new_ext_ctrl = nfp_vdpa_check_offloads();
+ if (relay)
+ new_ext_ctrl |= NFP_NET_CFG_CTRL_LM_RELAY;
+ else
+ new_ext_ctrl |= NFP_NET_CFG_CTRL_SWLM;
update = NFP_NET_CFG_UPDATE_GEN;
ret = nfp_ext_reconfig(hw, new_ext_ctrl, update);
@@ -149,6 +154,15 @@ nfp_vdpa_vf_config(struct nfp_hw *hw,
NFP_NET_CFG_UPDATE_GEN |
NFP_NET_CFG_UPDATE_RING;
+ if (relay) {
+ update |= NFP_NET_CFG_UPDATE_MSIX;
+
+ /* Enable misx interrupt for vdpa relay */
+ new_ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
+
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 1);
+ }
+
ret = nfp_reconfig(hw, new_ctrl, update);
if (ret < 0)
return -EIO;
@@ -164,20 +178,24 @@ nfp_vdpa_vf_config(struct nfp_hw *hw,
}
static void
-nfp_vdpa_queue_config(struct nfp_vdpa_hw *vdpa_hw)
+nfp_vdpa_queue_config(struct nfp_vdpa_hw *vdpa_hw,
+ bool relay)
{
struct nfp_hw *hw = &vdpa_hw->super;
- nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(0), vdpa_hw->vring[1].desc);
- nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(0),
- rte_log2_u32(vdpa_hw->vring[1].size));
- nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(1), vdpa_hw->vring[1].avail);
- nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(2), vdpa_hw->vring[1].used);
+ if (!relay) {
+ nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(0), vdpa_hw->vring[1].desc);
+ nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(0),
+ rte_log2_u32(vdpa_hw->vring[1].size));
+ nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(1), vdpa_hw->vring[1].avail);
+ nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(2), vdpa_hw->vring[1].used);
+
+ nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(0), vdpa_hw->vring[0].desc);
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(0),
+ rte_log2_u32(vdpa_hw->vring[0].size));
+ nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(1), vdpa_hw->vring[0].avail);
+ }
- nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(0), vdpa_hw->vring[0].desc);
- nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(0),
- rte_log2_u32(vdpa_hw->vring[0].size));
- nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(1), vdpa_hw->vring[0].avail);
nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(2), vdpa_hw->vring[0].used);
rte_wmb();
@@ -189,12 +207,23 @@ nfp_vdpa_hw_start(struct nfp_vdpa_hw *vdpa_hw,
{
struct nfp_hw *hw = &vdpa_hw->super;
- nfp_vdpa_queue_config(vdpa_hw);
+ nfp_vdpa_queue_config(vdpa_hw, false);
nfp_disable_queues(hw);
nfp_enable_queues(hw, NFP_VDPA_MAX_QUEUES, NFP_VDPA_MAX_QUEUES);
- return nfp_vdpa_vf_config(hw, vid);
+ return nfp_vdpa_vf_config(hw, vid, false);
+}
+
+int
+nfp_vdpa_relay_hw_start(struct nfp_vdpa_hw *vdpa_hw,
+ int vid)
+{
+ struct nfp_hw *hw = &vdpa_hw->super;
+
+ nfp_vdpa_queue_config(vdpa_hw, true);
+
+ return nfp_vdpa_vf_config(hw, vid, true);
}
void
diff --git a/drivers/vdpa/nfp/nfp_vdpa_core.h b/drivers/vdpa/nfp/nfp_vdpa_core.h
index 0f880fc0c6..a339ace601 100644
--- a/drivers/vdpa/nfp/nfp_vdpa_core.h
+++ b/drivers/vdpa/nfp/nfp_vdpa_core.h
@@ -9,12 +9,15 @@
#include <bus_pci_driver.h>
#include <nfp_common.h>
#include <rte_ether.h>
+#include <rte_vhost.h>
#define NFP_VDPA_MAX_QUEUES 1
#define NFP_VDPA_NOTIFY_ADDR_BASE 0x4000
#define NFP_VDPA_NOTIFY_ADDR_INTERVAL 0x1000
+#define NFP_VDPA_RELAY_VRING 0xd0000000
+
struct nfp_vdpa_vring {
uint64_t desc;
uint64_t avail;
@@ -40,12 +43,17 @@ struct nfp_vdpa_hw {
/** Software Live Migration */
bool sw_lm;
bool sw_fallback_running;
+
+ /** Mediated vring for SW fallback */
+ struct vring m_vring[NFP_VDPA_MAX_QUEUES * 2];
};
int nfp_vdpa_hw_init(struct nfp_vdpa_hw *vdpa_hw, struct rte_pci_device *dev);
int nfp_vdpa_hw_start(struct nfp_vdpa_hw *vdpa_hw, int vid);
+int nfp_vdpa_relay_hw_start(struct nfp_vdpa_hw *vdpa_hw, int vid);
+
void nfp_vdpa_hw_stop(struct nfp_vdpa_hw *vdpa_hw);
void nfp_vdpa_notify_queue(struct nfp_vdpa_hw *vdpa_hw, uint16_t qid);
--
2.39.1
next prev parent reply other threads:[~2024-08-05 2:14 UTC|newest]
Thread overview: 76+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-26 7:48 [PATCH 00/10] support software live migration Chaoyong He
2024-04-26 7:48 ` [PATCH 01/10] mailmap: add new contributor Chaoyong He
2024-04-26 7:48 ` [PATCH 02/10] vdpa/nfp: fix logic in hardware init Chaoyong He
2024-06-13 11:41 ` Maxime Coquelin
2024-04-26 7:48 ` [PATCH 03/10] vdpa/nfp: fix the logic of reconfiguration Chaoyong He
2024-06-13 11:43 ` Maxime Coquelin
2024-06-14 2:30 ` Chaoyong He
2024-04-26 7:48 ` [PATCH 04/10] vdpa/nfp: refactor the logic of datapath update Chaoyong He
2024-06-13 11:45 ` Maxime Coquelin
2024-04-26 7:48 ` [PATCH 05/10] vdpa/nfp: add the live migration logic Chaoyong He
2024-06-13 11:49 ` Maxime Coquelin
2024-06-14 2:32 ` Chaoyong He
2024-04-26 7:48 ` [PATCH 06/10] vdpa/nfp: add the interrupt logic of vring relay Chaoyong He
2024-06-13 11:52 ` Maxime Coquelin
2024-04-26 7:48 ` [PATCH 07/10] vdpa/nfp: setup the VF configure Chaoyong He
2024-06-13 11:58 ` Maxime Coquelin
2024-06-14 2:33 ` Chaoyong He
2024-04-26 7:48 ` [PATCH 08/10] vdpa/nfp: recover the ring index on new host Chaoyong He
2024-06-13 12:00 ` Maxime Coquelin
2024-06-14 6:40 ` Chaoyong He
2024-04-26 7:48 ` [PATCH 09/10] vdpa/nfp: setup vring relay thread Chaoyong He
2024-06-13 12:03 ` Maxime Coquelin
2024-04-26 7:48 ` [PATCH 10/10] doc: update nfp document Chaoyong He
2024-04-26 21:31 ` Patrick Robb
2024-06-13 12:03 ` Maxime Coquelin
2024-05-30 8:51 ` [PATCH 00/10] support software live migration Chaoyong He
2024-06-14 7:01 ` [PATCH v2 00/11] " Chaoyong He
2024-06-14 7:01 ` [PATCH v2 01/11] mailmap: add new contributor Chaoyong He
2024-06-14 7:01 ` [PATCH v2 02/11] vdpa/nfp: fix logic in hardware init Chaoyong He
2024-06-14 7:01 ` [PATCH v2 03/11] vdpa/nfp: fix the logic of reconfiguration Chaoyong He
2024-06-14 7:01 ` [PATCH v2 04/11] vdpa/nfp: refactor the logic of datapath update Chaoyong He
2024-06-14 7:01 ` [PATCH v2 05/11] vdpa/nfp: add the live migration logic Chaoyong He
2024-06-14 7:01 ` [PATCH v2 06/11] vdpa/nfp: add the interrupt logic of vring relay Chaoyong He
2024-06-14 7:01 ` [PATCH v2 07/11] vdpa/nfp: setup the VF configure Chaoyong He
2024-06-14 7:01 ` [PATCH v2 08/11] vdpa/nfp: recover the ring index on new host Chaoyong He
2024-06-14 7:01 ` [PATCH v2 09/11] vdpa/nfp: setup vring relay thread Chaoyong He
2024-06-14 7:01 ` [PATCH v2 10/11] vdpa/nfp: enable feature bits of live migration Chaoyong He
2024-06-14 7:01 ` [PATCH v2 11/11] doc: update nfp document Chaoyong He
2024-06-17 6:26 ` [PATCH v3 00/11] support software live migration Chaoyong He
2024-06-17 6:26 ` [PATCH v3 01/11] mailmap: add new contributor Chaoyong He
2024-06-18 7:43 ` Maxime Coquelin
2024-06-17 6:26 ` [PATCH v3 02/11] vdpa/nfp: fix logic in hardware init Chaoyong He
2024-06-18 7:50 ` Maxime Coquelin
2024-06-17 6:27 ` [PATCH v3 03/11] vdpa/nfp: fix the logic of reconfiguration Chaoyong He
2024-06-18 7:51 ` Maxime Coquelin
2024-06-17 6:27 ` [PATCH v3 04/11] vdpa/nfp: refactor the logic of datapath update Chaoyong He
2024-06-18 8:12 ` Maxime Coquelin
2024-06-17 6:27 ` [PATCH v3 05/11] vdpa/nfp: add the live migration logic Chaoyong He
2024-06-18 8:12 ` Maxime Coquelin
2024-06-17 6:27 ` [PATCH v3 06/11] vdpa/nfp: add the interrupt logic of vring relay Chaoyong He
2024-06-18 8:14 ` Maxime Coquelin
2024-06-17 6:27 ` [PATCH v3 07/11] vdpa/nfp: setup the VF configure Chaoyong He
2024-06-18 8:19 ` Maxime Coquelin
2024-06-17 6:27 ` [PATCH v3 08/11] vdpa/nfp: recover the ring index on new host Chaoyong He
2024-06-18 8:20 ` Maxime Coquelin
2024-06-17 6:27 ` [PATCH v3 09/11] vdpa/nfp: setup vring relay thread Chaoyong He
2024-06-18 8:25 ` Maxime Coquelin
2024-06-17 6:27 ` [PATCH v3 10/11] vdpa/nfp: enable feature bits of live migration Chaoyong He
2024-06-18 8:25 ` Maxime Coquelin
2024-06-17 6:27 ` [PATCH v3 11/11] doc: update nfp document Chaoyong He
2024-06-18 8:25 ` Maxime Coquelin
2024-06-18 7:45 ` [PATCH v3 00/11] support software live migration Maxime Coquelin
2024-06-18 7:48 ` Chaoyong He
2024-08-05 2:12 ` [PATCH v4 " Chaoyong He
2024-08-05 2:12 ` [PATCH v4 01/11] mailmap: add new contributor Chaoyong He
2024-08-05 2:12 ` [PATCH v4 02/11] vdpa/nfp: fix logic in hardware init Chaoyong He
2024-08-05 2:12 ` [PATCH v4 03/11] vdpa/nfp: fix the logic of reconfiguration Chaoyong He
2024-08-05 2:12 ` [PATCH v4 04/11] vdpa/nfp: refactor the logic of datapath update Chaoyong He
2024-08-05 2:12 ` [PATCH v4 05/11] vdpa/nfp: add the live migration logic Chaoyong He
2024-08-05 2:12 ` [PATCH v4 06/11] vdpa/nfp: add the interrupt logic of vring relay Chaoyong He
2024-08-05 2:12 ` Chaoyong He [this message]
2024-08-05 2:12 ` [PATCH v4 08/11] vdpa/nfp: recover the ring index on new host Chaoyong He
2024-08-05 2:12 ` [PATCH v4 09/11] vdpa/nfp: setup vring relay thread Chaoyong He
2024-08-05 2:12 ` [PATCH v4 10/11] vdpa/nfp: enable feature bits of live migration Chaoyong He
2024-08-05 2:12 ` [PATCH v4 11/11] doc: update nfp document Chaoyong He
2024-09-19 12:05 ` [PATCH v4 00/11] support software live migration Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240805021248.1051198-8-chaoyong.he@corigine.com \
--to=chaoyong.he@corigine.com \
--cc=dev@dpdk.org \
--cc=long.wu@corigine.com \
--cc=maxime.coquelin@redhat.com \
--cc=oss-drivers@corigine.com \
--cc=peng.zhang@corigine.com \
--cc=xinying.yu@corigine.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).