From: Wenjing Qiao <wenjing.qiao@intel.com>
To: yuying.zhang@intel.com, dev@dpdk.org, qi.z.zhang@intel.com,
jingjing.wu@intel.com, beilei.xing@intel.com
Cc: mingxia.liu@intel.com, Wenjing Qiao <wenjing.qiao@intel.com>
Subject: [PATCH v3 4/9] net/cpfl: setup ctrl path
Date: Wed, 6 Sep 2023 09:34:02 +0000 [thread overview]
Message-ID: <20230906093407.3635038-5-wenjing.qiao@intel.com> (raw)
In-Reply-To: <20230906093407.3635038-1-wenjing.qiao@intel.com>
Setup the control vport and control queue for flow offloading.
Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Wenjing Qiao <wenjing.qiao@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 267 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 14 ++
drivers/net/cpfl/cpfl_vchnl.c | 144 ++++++++++++++++++
3 files changed, 425 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 3c4a6a4724..22f3e72894 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1657,6 +1657,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, uint
return;
}
+ /* ignore if it is ctrl vport */
+ if (adapter->ctrl_vport.base.vport_id == vc_event->vport_id)
+ return;
+
vport = cpfl_find_vport(adapter, vc_event->vport_id);
if (!vport) {
PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -1852,6 +1856,260 @@ cpfl_dev_alarm_handler(void *param)
rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
}
+static int
+cpfl_stop_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+ int i, ret;
+
+ for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+ ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, false);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Fail to disable Tx config queue.");
+ return ret;
+ }
+ }
+
+ for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+ ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, false);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Fail to disable Rx config queue.");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+cpfl_start_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+ int i, ret;
+
+ ret = cpfl_config_ctlq_tx(adapter);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Fail to configure Tx config queue.");
+ return ret;
+ }
+
+ ret = cpfl_config_ctlq_rx(adapter);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Fail to configure Rx config queue.");
+ return ret;
+ }
+
+ for (i = 0; i < CPFL_TX_CFGQ_NUM; i++) {
+ ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, false, true);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Fail to enable Tx config queue.");
+ return ret;
+ }
+ }
+
+ for (i = 0; i < CPFL_RX_CFGQ_NUM; i++) {
+ ret = idpf_vc_queue_switch(&adapter->ctrl_vport.base, i, true, true);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Fail to enable Rx config queue.");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void
+cpfl_remove_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+ struct idpf_hw *hw = (struct idpf_hw *)(&adapter->base.hw);
+ struct cpfl_ctlq_create_info *create_cfgq_info;
+ int i;
+
+ create_cfgq_info = adapter->cfgq_info;
+
+ for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+ cpfl_vport_ctlq_remove(hw, adapter->ctlqp[i]);
+ if (create_cfgq_info[i].ring_mem.va)
+ idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+ if (create_cfgq_info[i].buf_mem.va)
+ idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+ }
+}
+
+static int
+cpfl_add_cfgqs(struct cpfl_adapter_ext *adapter)
+{
+ struct idpf_ctlq_info *cfg_cq;
+ int ret = 0;
+ int i = 0;
+
+ for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+ ret = cpfl_vport_ctlq_add((struct idpf_hw *)(&adapter->base.hw),
+ &adapter->cfgq_info[i],
+ &cfg_cq);
+ if (ret || !cfg_cq) {
+ PMD_DRV_LOG(ERR, "ctlq add failed for queue id: %d",
+ adapter->cfgq_info[i].id);
+ cpfl_remove_cfgqs(adapter);
+ return ret;
+ }
+ PMD_DRV_LOG(INFO, "added cfgq to hw. queue id: %d",
+ adapter->cfgq_info[i].id);
+ adapter->ctlqp[i] = cfg_cq;
+ }
+
+ return ret;
+}
+
+#define CPFL_CFGQ_RING_LEN 512
+#define CPFL_CFGQ_DESCRIPTOR_SIZE 32
+#define CPFL_CFGQ_BUFFER_SIZE 256
+#define CPFL_CFGQ_RING_SIZE 512
+
+static int
+cpfl_cfgq_setup(struct cpfl_adapter_ext *adapter)
+{
+ struct cpfl_ctlq_create_info *create_cfgq_info;
+ struct cpfl_vport *vport;
+ int i, err;
+ uint32_t ring_size = CPFL_CFGQ_RING_SIZE * sizeof(struct idpf_ctlq_desc);
+ uint32_t buf_size = CPFL_CFGQ_RING_SIZE * CPFL_CFGQ_BUFFER_SIZE;
+
+ vport = &adapter->ctrl_vport;
+ create_cfgq_info = adapter->cfgq_info;
+
+ for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+ if (i % 2 == 0) {
+ /* Setup Tx config queue */
+ create_cfgq_info[i].id = vport->base.chunks_info.tx_start_qid + i / 2;
+ create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_TX;
+ create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+ create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+ memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+ create_cfgq_info[i].reg.tail = vport->base.chunks_info.tx_qtail_start +
+ i / 2 * vport->base.chunks_info.tx_qtail_spacing;
+ } else {
+ /* Setup Rx config queue */
+ create_cfgq_info[i].id = vport->base.chunks_info.rx_start_qid + i / 2;
+ create_cfgq_info[i].type = IDPF_CTLQ_TYPE_CONFIG_RX;
+ create_cfgq_info[i].len = CPFL_CFGQ_RING_SIZE;
+ create_cfgq_info[i].buf_size = CPFL_CFGQ_BUFFER_SIZE;
+ memset(&create_cfgq_info[i].reg, 0, sizeof(struct idpf_ctlq_reg));
+ create_cfgq_info[i].reg.tail = vport->base.chunks_info.rx_qtail_start +
+ i / 2 * vport->base.chunks_info.rx_qtail_spacing;
+ if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem,
+ buf_size)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+ }
+ if (!idpf_alloc_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem,
+ ring_size)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+ }
+ return 0;
+free_mem:
+ for (i = 0; i < CPFL_CFGQ_NUM; i++) {
+ if (create_cfgq_info[i].ring_mem.va)
+ idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].ring_mem);
+ if (create_cfgq_info[i].buf_mem.va)
+ idpf_free_dma_mem(&adapter->base.hw, &create_cfgq_info[i].buf_mem);
+ }
+ return err;
+}
+
+static int
+cpfl_init_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+ struct cpfl_vport *vport = &adapter->ctrl_vport;
+ struct virtchnl2_create_vport *vport_info =
+ (struct virtchnl2_create_vport *)adapter->ctrl_vport_recv_info;
+ int i;
+
+ vport->itf.adapter = adapter;
+ vport->base.adapter = &adapter->base;
+ vport->base.vport_id = vport_info->vport_id;
+
+ for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+ if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_TX) {
+ vport->base.chunks_info.tx_start_qid =
+ vport_info->chunks.chunks[i].start_queue_id;
+ vport->base.chunks_info.tx_qtail_start =
+ vport_info->chunks.chunks[i].qtail_reg_start;
+ vport->base.chunks_info.tx_qtail_spacing =
+ vport_info->chunks.chunks[i].qtail_reg_spacing;
+ } else if (vport_info->chunks.chunks[i].type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ vport->base.chunks_info.rx_start_qid =
+ vport_info->chunks.chunks[i].start_queue_id;
+ vport->base.chunks_info.rx_qtail_start =
+ vport_info->chunks.chunks[i].qtail_reg_start;
+ vport->base.chunks_info.rx_qtail_spacing =
+ vport_info->chunks.chunks[i].qtail_reg_spacing;
+ } else {
+ PMD_INIT_LOG(ERR, "Unsupported chunk type");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void
+cpfl_ctrl_path_close(struct cpfl_adapter_ext *adapter)
+{
+ cpfl_remove_cfgqs(adapter);
+ cpfl_stop_cfgqs(adapter);
+ idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+}
+
+static int
+cpfl_ctrl_path_open(struct cpfl_adapter_ext *adapter)
+{
+ int ret;
+
+ ret = cpfl_vc_create_ctrl_vport(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to create control vport");
+ return ret;
+ }
+
+ ret = cpfl_init_ctrl_vport(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init control vport");
+ goto err_init_ctrl_vport;
+ }
+
+ ret = cpfl_cfgq_setup(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup control queues");
+ goto err_cfgq_setup;
+ }
+
+ ret = cpfl_add_cfgqs(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to add control queues");
+ goto err_add_cfgq;
+ }
+
+ ret = cpfl_start_cfgqs(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to start control queues");
+ goto err_start_cfgqs;
+ }
+
+ return 0;
+
+err_start_cfgqs:
+ cpfl_stop_cfgqs(adapter);
+err_add_cfgq:
+ cpfl_remove_cfgqs(adapter);
+err_cfgq_setup:
+err_init_ctrl_vport:
+ idpf_vc_vport_destroy(&adapter->ctrl_vport.base);
+
+ return ret;
+}
+
static struct virtchnl2_get_capabilities req_caps = {
.csum_caps =
VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 |
@@ -2019,6 +2277,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
goto err_vports_alloc;
}
+ ret = cpfl_ctrl_path_open(adapter);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup control path");
+ goto err_create_ctrl_vport;
+ }
+
adapter->cur_vports = 0;
adapter->cur_vport_nb = 0;
@@ -2026,6 +2290,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext *a
return ret;
+err_create_ctrl_vport:
+ rte_free(adapter->vports);
err_vports_alloc:
rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
cpfl_repr_whitelist_uninit(adapter);
@@ -2260,6 +2526,7 @@ cpfl_find_adapter_ext(struct rte_pci_device *pci_dev)
static void
cpfl_adapter_ext_deinit(struct cpfl_adapter_ext *adapter)
{
+ cpfl_ctrl_path_close(adapter);
rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
cpfl_vport_map_uninit(adapter);
idpf_adapter_deinit(&adapter->base);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 2151605987..40bba8da00 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -22,6 +22,7 @@
#include "cpfl_logs.h"
#include "cpfl_cpchnl.h"
#include "cpfl_representor.h"
+#include "cpfl_controlq.h"
/* Currently, backend supports up to 8 vports */
#define CPFL_MAX_VPORT_NUM 8
@@ -89,6 +90,10 @@
#define CPFL_FLOW_FILE_LEN 100
+#define CPFL_RX_CFGQ_NUM 4
+#define CPFL_TX_CFGQ_NUM 4
+#define CPFL_CFGQ_NUM 8
+
#define CPFL_INVALID_HW_ID UINT16_MAX
#define CPFL_META_CHUNK_LENGTH 1024
#define CPFL_META_LENGTH 32
@@ -204,11 +209,20 @@ struct cpfl_adapter_ext {
rte_spinlock_t repr_lock;
struct rte_hash *repr_whitelist_hash;
+ /* ctrl vport and ctrl queues. */
+ struct cpfl_vport ctrl_vport;
+ uint8_t ctrl_vport_recv_info[IDPF_DFLT_MBX_BUF_SIZE];
+ struct idpf_ctlq_info *ctlqp[CPFL_CFGQ_NUM];
+ struct cpfl_ctlq_create_info cfgq_info[CPFL_CFGQ_NUM];
+
struct cpfl_metadata meta;
};
TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
+int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
+int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
struct cpfl_vport_id *vport_identity,
struct cpchnl2_vport_info *vport_info);
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
index a21a4a451f..932840a972 100644
--- a/drivers/net/cpfl/cpfl_vchnl.c
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -70,3 +70,147 @@ cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
return 0;
}
+
+int
+cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter)
+{
+ struct virtchnl2_create_vport vport_msg;
+ struct idpf_cmd_info args;
+ int err = -1;
+
+ memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
+ vport_msg.vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+ vport_msg.txq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+ vport_msg.rxq_model = rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+ vport_msg.num_tx_q = CPFL_TX_CFGQ_NUM;
+ vport_msg.num_tx_complq = 0;
+ vport_msg.num_rx_q = CPFL_RX_CFGQ_NUM;
+ vport_msg.num_rx_bufq = 0;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CREATE_VPORT;
+ args.in_args = (uint8_t *)&vport_msg;
+ args.in_args_size = sizeof(vport_msg);
+ args.out_buffer = adapter->base.mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(&adapter->base, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL2_OP_CREATE_VPORT");
+ return err;
+ }
+
+ rte_memcpy(adapter->ctrl_vport_recv_info, args.out_buffer,
+ IDPF_DFLT_MBX_BUF_SIZE);
+ return err;
+}
+
+int
+cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter)
+{
+ struct cpfl_vport *vport = &adapter->ctrl_vport;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct virtchnl2_rxq_info *rxq_info;
+ struct idpf_cmd_info args;
+ uint16_t num_qs;
+ int size, err, i;
+
+ if (vport->base.rxq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_DRV_LOG(ERR, "This rxq model isn't supported.");
+ err = -EINVAL;
+ return err;
+ }
+
+ num_qs = CPFL_RX_CFGQ_NUM;
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (!vc_rxqs) {
+ PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->base.vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+
+ for (i = 0; i < num_qs; i++) {
+ rxq_info = &vc_rxqs->qinfo[i];
+ rxq_info->dma_ring_addr = adapter->ctlqp[2 * i + 1]->desc_ring.pa;
+ rxq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_RX;
+ rxq_info->queue_id = adapter->cfgq_info[2 * i + 1].id;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ rxq_info->data_buffer_size = adapter->cfgq_info[2 * i + 1].buf_size;
+ rxq_info->max_pkt_size = vport->base.max_pkt_len;
+ rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+ rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+ rxq_info->ring_len = adapter->cfgq_info[2 * i + 1].len;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->base.mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(&adapter->base, &args);
+ rte_free(vc_rxqs);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
+int
+cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter)
+{
+ struct cpfl_vport *vport = &adapter->ctrl_vport;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct virtchnl2_txq_info *txq_info;
+ struct idpf_cmd_info args;
+ uint16_t num_qs;
+ int size, err, i;
+
+ if (vport->base.txq_model != VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_DRV_LOG(ERR, "This txq model isn't supported.");
+ err = -EINVAL;
+ return err;
+ }
+
+ num_qs = CPFL_TX_CFGQ_NUM;
+ size = sizeof(*vc_txqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (!vc_txqs) {
+ PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->base.vport_id;
+ vc_txqs->num_qinfo = num_qs;
+
+ for (i = 0; i < num_qs; i++) {
+ txq_info = &vc_txqs->qinfo[i];
+ txq_info->dma_ring_addr = adapter->ctlqp[2 * i]->desc_ring.pa;
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_CONFIG_TX;
+ txq_info->queue_id = adapter->cfgq_info[2 * i].id;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+ txq_info->ring_len = adapter->cfgq_info[2 * i].len;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->base.mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(&adapter->base, &args);
+ rte_free(vc_txqs);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
--
2.34.1
next prev parent reply other threads:[~2023-09-06 9:34 UTC|newest]
Thread overview: 128+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-12 7:55 [PATCH v1 0/5] add rte flow support for cpfl Yuying Zhang
2023-08-12 7:55 ` [PATCH v1 1/5] net/cpfl: setup rte flow skeleton Yuying Zhang
2023-08-25 3:55 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure Yuying Zhang
2023-08-25 5:55 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle Yuying Zhang
2023-08-25 6:23 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 4/5] net/cpfl: add fxp rule module Yuying Zhang
2023-08-25 7:35 ` Xing, Beilei
2023-08-25 8:42 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 5/5] net/cpfl: add fxp flow engine Yuying Zhang
2023-08-25 9:15 ` Xing, Beilei
2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 1/8] net/cpfl: parse flow parser file in devargs Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 2/8] net/cpfl: add flow json parser Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 3/8] net/cpfl: add FXP low level implementation Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 4/8] net/cpfl: setup ctrl path Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 5/8] net/cpfl: set up rte flow skeleton Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 6/8] net/cpfl: add fxp rule module Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 7/8] net/cpfl: add fxp flow engine Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 8/8] net/cpfl: add flow support for representor Yuying Zhang
2023-09-06 9:33 ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
2023-08-15 16:50 ` [PATCH v4 " Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-09-15 15:11 ` Stephen Hemminger
2023-08-15 16:50 ` [PATCH v4 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 5/9] net/cpfl: add fxp rule module Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 7/9] net/cpfl: add flow support for representor Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 8/9] app/test-pmd: refine encap content Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
2023-09-06 9:33 ` [PATCH v3 1/9] net/cpfl: parse flow parser file in devargs Wenjing Qiao
2023-09-11 0:48 ` Wu, Jingjing
2023-09-06 9:34 ` [PATCH v3 2/9] net/cpfl: add flow json parser Wenjing Qiao
2023-09-08 6:26 ` Liu, Mingxia
2023-09-11 6:24 ` Wu, Jingjing
2023-09-06 9:34 ` [PATCH v3 3/9] net/cpfl: add FXP low level implementation Wenjing Qiao
2023-09-06 9:34 ` Wenjing Qiao [this message]
2023-09-11 6:30 ` [PATCH v3 4/9] net/cpfl: setup ctrl path Liu, Mingxia
2023-09-11 6:36 ` Wu, Jingjing
2023-09-06 9:34 ` [PATCH v3 5/9] net/cpfl: set up rte flow skeleton Wenjing Qiao
2023-09-06 9:34 ` [PATCH v3 6/9] net/cpfl: add fxp rule module Wenjing Qiao
2023-09-12 7:40 ` FW: " Liu, Mingxia
2023-09-06 9:34 ` [PATCH v3 7/9] net/cpfl: add fxp flow engine Wenjing Qiao
2023-09-06 9:34 ` [PATCH v3 8/9] net/cpfl: add flow support for representor Wenjing Qiao
2023-09-06 9:34 ` [PATCH v3 9/9] app/test-pmd: refine encap content Wenjing Qiao
2023-09-15 10:00 ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 0/8] " Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 1/8] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 2/8] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 3/8] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 4/8] net/cpfl: set up control path Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 5/8] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 6/8] net/cpfl: add fxp rule module Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 7/8] net/cpfl: add fxp flow engine Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 8/8] net/cpfl: add flow support for representor Zhang, Yuying
2023-09-26 18:16 ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
2023-09-26 18:16 ` [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-26 19:03 ` Stephen Hemminger
2023-09-27 1:21 ` Zhang, Qi Z
2023-09-26 18:16 ` [PATCH v7 2/8] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-26 18:16 ` [PATCH v7 3/8] net/cpfl: set up rte flow skeleton yuying.zhang
2023-09-26 18:16 ` [PATCH v7 4/8] net/cpfl: set up control path yuying.zhang
2023-09-26 18:17 ` [PATCH v7 5/8] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-26 18:17 ` [PATCH v7 6/8] net/cpfl: add fxp rule module yuying.zhang
2023-09-28 3:29 ` Zhang, Qi Z
2023-09-26 18:17 ` [PATCH v7 7/8] net/cpfl: add fxp flow engine yuying.zhang
2023-09-26 18:17 ` [PATCH v7 8/8] net/cpfl: add flow support for representor yuying.zhang
2023-09-27 12:54 ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
2023-09-27 12:54 ` [PATCH v8 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-27 12:54 ` [PATCH v8 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-27 12:54 ` [PATCH v8 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
2023-09-27 12:54 ` [PATCH v8 4/9] net/cpfl: set up control path yuying.zhang
2023-09-27 12:54 ` [PATCH v8 5/9] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-27 12:54 ` [PATCH v8 6/9] net/cpfl: add fxp rule module yuying.zhang
2023-09-27 12:54 ` [PATCH v8 7/9] net/cpfl: add fxp flow engine yuying.zhang
2023-09-27 12:54 ` [PATCH v8 8/9] net/cpfl: add flow support for representor yuying.zhang
2023-09-27 12:54 ` [PATCH v8 9/9] net/cpfl: add support of to represented port action yuying.zhang
2023-09-28 3:37 ` [PATCH v8 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-09-28 8:44 ` [PATCH v9 " yuying.zhang
2023-09-08 16:05 ` [PATCH v10 " Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
2023-09-28 8:44 ` [PATCH v9 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-28 8:44 ` [PATCH v9 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-28 8:44 ` [PATCH v9 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
2023-10-15 13:01 ` Thomas Monjalon
2023-10-16 3:07 ` Zhang, Qi Z
2023-09-28 8:44 ` [PATCH v9 4/9] net/cpfl: set up control path yuying.zhang
2023-09-28 8:44 ` [PATCH v9 5/9] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-28 8:44 ` [PATCH v9 6/9] net/cpfl: add fxp rule module yuying.zhang
2023-09-28 8:44 ` [PATCH v9 7/9] net/cpfl: add fxp flow engine yuying.zhang
2023-09-28 8:44 ` [PATCH v9 8/9] net/cpfl: add flow support for representor yuying.zhang
2023-09-28 8:44 ` [PATCH v9 9/9] net/cpfl: add support of to represented port action yuying.zhang
2023-09-28 12:45 ` [PATCH v9 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-09-28 16:04 ` Stephen Hemminger
2023-10-09 4:00 ` [PATCH v10 " Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
2023-10-10 1:31 ` [PATCH v10 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-10-15 11:21 ` [PATCH v9 " Thomas Monjalon
2023-09-15 10:00 ` [PATCH v5 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-09-15 11:14 ` Zhang, Qi Z
2023-09-15 10:00 ` [PATCH v5 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-09-15 11:19 ` Zhang, Qi Z
2023-09-15 10:00 ` [PATCH v5 5/9] net/cpfl: add fxp rule module Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 7/9] net/cpfl: add flow support for representor Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 8/9] app/test-pmd: refine encap content Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230906093407.3635038-5-wenjing.qiao@intel.com \
--to=wenjing.qiao@intel.com \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=mingxia.liu@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=yuying.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).