From: yuying.zhang@intel.com
To: yuying.zhang@intel.com, dev@dpdk.org, qi.z.zhang@intel.com,
jingjing.wu@intel.com, beilei.xing@intel.com
Subject: [PATCH v8 6/9] net/cpfl: add fxp rule module
Date: Wed, 27 Sep 2023 12:54:13 +0000 [thread overview]
Message-ID: <20230927125416.2308974-7-yuying.zhang@intel.com> (raw)
In-Reply-To: <20230927125416.2308974-1-yuying.zhang@intel.com>
From: Yuying Zhang <yuying.zhang@intel.com>
Implement FXP rule creation / destroying.
Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 31 ++++
drivers/net/cpfl/cpfl_ethdev.h | 6 +
drivers/net/cpfl/cpfl_fxp_rule.c | 296 +++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_fxp_rule.h | 68 +++++++
drivers/net/cpfl/meson.build | 1 +
5 files changed, 402 insertions(+)
create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c
create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.h
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a2bc6784d0..762fbddfe6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -16,6 +16,7 @@
#include <ethdev_private.h>
#include "cpfl_rxtx.h"
#include "cpfl_flow.h"
+#include "cpfl_rules.h"
#define CPFL_REPRESENTOR "representor"
#define CPFL_TX_SINGLE_Q "tx_single"
@@ -1127,6 +1128,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
adapter->vports[vport->sw_idx] = NULL;
+ idpf_free_dma_mem(NULL, &cpfl_vport->itf.flow_dma);
rte_free(cpfl_vport);
return 0;
@@ -2466,6 +2468,26 @@ cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
return 0;
}
+int
+cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma, uint32_t size,
+ int batch_size)
+{
+ int i;
+
+ if (!idpf_alloc_dma_mem(NULL, orig_dma, size * (1 + batch_size))) {
+ PMD_INIT_LOG(ERR, "Could not alloc dma memory");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < batch_size; i++) {
+ dma[i].va = (void *)((char *)orig_dma->va + size * (i + 1));
+ dma[i].pa = orig_dma->pa + size * (i + 1);
+ dma[i].size = size;
+ dma[i].zone = NULL;
+ }
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -2515,6 +2537,15 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
+ memset(cpfl_vport->itf.dma, 0, sizeof(cpfl_vport->itf.dma));
+ memset(cpfl_vport->itf.msg, 0, sizeof(cpfl_vport->itf.msg));
+ ret = cpfl_alloc_dma_mem_batch(&cpfl_vport->itf.flow_dma,
+ cpfl_vport->itf.dma,
+ sizeof(union cpfl_rule_cfg_pkt_record),
+ CPFL_FLOW_BATCH_SIZE);
+ if (ret < 0)
+ goto err_mac_addrs;
+
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7f83d170d7..8eeeac9910 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -147,10 +147,14 @@ enum cpfl_itf_type {
TAILQ_HEAD(cpfl_flow_list, rte_flow);
+#define CPFL_FLOW_BATCH_SIZE 490
struct cpfl_itf {
enum cpfl_itf_type type;
struct cpfl_adapter_ext *adapter;
struct cpfl_flow_list flow_list;
+ struct idpf_dma_mem flow_dma;
+ struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
+ struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
void *data;
};
@@ -240,6 +244,8 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
int cpfl_vc_create_ctrl_vport(struct cpfl_adapter_ext *adapter);
int cpfl_config_ctlq_rx(struct cpfl_adapter_ext *adapter);
int cpfl_config_ctlq_tx(struct cpfl_adapter_ext *adapter);
+int cpfl_alloc_dma_mem_batch(struct idpf_dma_mem *orig_dma, struct idpf_dma_mem *dma,
+ uint32_t size, int batch_size);
#define CPFL_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c b/drivers/net/cpfl/cpfl_fxp_rule.c
new file mode 100644
index 0000000000..50fac55432
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.c
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+#include "cpfl_ethdev.h"
+
+#include "cpfl_fxp_rule.h"
+#include "cpfl_logs.h"
+
+#define CTLQ_SEND_RETRIES 100
+#define CTLQ_RECEIVE_RETRIES 100
+
+int
+cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+ struct idpf_ctlq_msg q_msg[])
+{
+ struct idpf_ctlq_msg **msg_ptr_list;
+ u16 clean_count = 0;
+ int num_cleaned = 0;
+ int retries = 0;
+ int ret = 0;
+
+ msg_ptr_list = calloc(num_q_msg, sizeof(struct idpf_ctlq_msg *));
+ if (!msg_ptr_list) {
+ PMD_INIT_LOG(ERR, "no memory for cleaning ctlq");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
+ goto send_err;
+ }
+
+ while (retries <= CTLQ_SEND_RETRIES) {
+ clean_count = num_q_msg - num_cleaned;
+ ret = cpfl_vport_ctlq_clean_sq(cq, &clean_count,
+ &msg_ptr_list[num_cleaned]);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "clean ctlq failed: 0x%4x", ret);
+ goto send_err;
+ }
+
+ num_cleaned += clean_count;
+ retries++;
+ if (num_cleaned >= num_q_msg)
+ break;
+ rte_delay_us_sleep(10);
+ }
+
+ if (retries > CTLQ_SEND_RETRIES) {
+ PMD_INIT_LOG(ERR, "timed out while polling for completions");
+ ret = -1;
+ goto send_err;
+ }
+
+send_err:
+ if (msg_ptr_list)
+ free(msg_ptr_list);
+err:
+ return ret;
+}
+
+static int
+cpfl_process_rx_ctlq_msg(u16 num_q_msg, struct idpf_ctlq_msg *q_msg)
+{
+ u16 i;
+
+ if (!num_q_msg || !q_msg)
+ return -EINVAL;
+
+ for (i = 0; i < num_q_msg; i++) {
+ if (q_msg[i].status == CPFL_CFG_PKT_ERR_OK) {
+ continue;
+ } else if (q_msg[i].status == CPFL_CFG_PKT_ERR_EEXIST &&
+ q_msg[i].opcode == cpfl_ctlq_sem_add_rule) {
+ PMD_INIT_LOG(ERR, "The rule has confliction with already existed one");
+ return -EINVAL;
+ } else if (q_msg[i].status == CPFL_CFG_PKT_ERR_ENOTFND &&
+ q_msg[i].opcode == cpfl_ctlq_sem_del_rule) {
+ PMD_INIT_LOG(ERR, "The rule has already deleted");
+ return -EINVAL;
+ } else {
+ PMD_INIT_LOG(ERR, "Invalid rule");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int
+cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+ struct idpf_ctlq_msg q_msg[])
+{
+ int retries = 0;
+ struct idpf_dma_mem *dma;
+ u16 i;
+ uint16_t buff_cnt;
+ int ret = 0, handle_rule = 0;
+
+ retries = 0;
+ while (retries <= CTLQ_RECEIVE_RETRIES) {
+ rte_delay_us_sleep(10);
+ ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
+
+ if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
+ ret != CPFL_ERR_CTLQ_ERROR) {
+ PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err: 0x%4x\n", ret);
+ retries++;
+ continue;
+ }
+
+ if (ret == CPFL_ERR_CTLQ_NO_WORK) {
+ retries++;
+ continue;
+ }
+
+ if (ret == CPFL_ERR_CTLQ_EMPTY)
+ break;
+
+ ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "failed to process rx_ctrlq msg");
+ handle_rule = ret;
+ }
+
+ for (i = 0; i < num_q_msg; i++) {
+ if (q_msg[i].data_len > 0)
+ dma = q_msg[i].ctx.indirect.payload;
+ else
+ dma = NULL;
+
+ buff_cnt = dma ? 1 : 0;
+ ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt, &dma);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "could not posted recv bufs\n");
+ }
+ break;
+ }
+
+ if (retries > CTLQ_RECEIVE_RETRIES) {
+ PMD_INIT_LOG(ERR, "timed out while polling for receive response");
+ ret = -1;
+ }
+
+ return ret + handle_rule;
+}
+
+static int
+cpfl_mod_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+ struct idpf_ctlq_msg *msg)
+{
+ struct cpfl_mod_rule_info *minfo = &rinfo->mod;
+ union cpfl_rule_cfg_pkt_record *blob = NULL;
+ struct cpfl_rule_cfg_data cfg = {0};
+
+ /* prepare rule blob */
+ if (!dma->va) {
+ PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+ return -1;
+ }
+ blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+ memset(blob, 0, sizeof(*blob));
+ memset(&cfg, 0, sizeof(cfg));
+
+ /* fill info for both query and add/update */
+ cpfl_fill_rule_mod_content(minfo->mod_obj_size,
+ minfo->pin_mod_content,
+ minfo->mod_index,
+ &cfg.ext.mod_content);
+
+ /* only fill content for add/update */
+ memcpy(blob->mod_blob, minfo->mod_content,
+ minfo->mod_content_byte_len);
+
+#define NO_HOST_NEEDED 0
+ /* pack message */
+ cpfl_fill_rule_cfg_data_common(cpfl_ctlq_mod_add_update_rule,
+ rinfo->cookie,
+ 0, /* vsi_id not used for mod */
+ rinfo->port_num,
+ NO_HOST_NEEDED,
+ 0, /* time_sel */
+ 0, /* time_sel_val */
+ 0, /* cache_wr_thru */
+ rinfo->resp_req,
+ (u16)sizeof(*blob),
+ (void *)dma,
+ &cfg.common);
+ cpfl_prep_rule_desc(&cfg, msg);
+ return 0;
+}
+
+static int
+cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+ struct idpf_ctlq_msg *msg, bool add)
+{
+ union cpfl_rule_cfg_pkt_record *blob = NULL;
+ enum cpfl_ctlq_rule_cfg_opc opc;
+ struct cpfl_rule_cfg_data cfg;
+ uint16_t cfg_ctrl;
+
+ if (!dma->va) {
+ PMD_INIT_LOG(ERR, "dma mem passed to %s is null\n", __func__);
+ return -1;
+ }
+ blob = (union cpfl_rule_cfg_pkt_record *)dma->va;
+ memset(blob, 0, sizeof(*blob));
+ memset(msg, 0, sizeof(*msg));
+
+ if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+ cfg_ctrl = CPFL_GET_MEV_SEM_RULE_CFG_CTRL(rinfo->sem.prof_id,
+ rinfo->sem.sub_prof_id,
+ rinfo->sem.pin_to_cache,
+ rinfo->sem.fixed_fetch);
+ cpfl_prep_sem_rule_blob(rinfo->sem.key, rinfo->sem.key_byte_len,
+ rinfo->act_bytes, rinfo->act_byte_len,
+ cfg_ctrl, blob);
+ opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
+ } else {
+ PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
+ return -1;
+ }
+
+ cpfl_fill_rule_cfg_data_common(opc,
+ rinfo->cookie,
+ rinfo->vsi,
+ rinfo->port_num,
+ rinfo->host_id,
+ 0, /* time_sel */
+ 0, /* time_sel_val */
+ 0, /* cache_wr_thru */
+ rinfo->resp_req,
+ sizeof(union cpfl_rule_cfg_pkt_record),
+ dma,
+ &cfg.common);
+ cpfl_prep_rule_desc(&cfg, msg);
+ return 0;
+}
+
+static int
+cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
+ struct idpf_ctlq_msg *msg, bool add)
+{
+ int ret = 0;
+
+ if (rinfo->type == CPFL_RULE_TYPE_SEM) {
+ if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
+ ret = -1;
+ } else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
+ if (cpfl_mod_rule_pack(rinfo, dma, msg) < 0)
+ ret = -1;
+ } else {
+ PMD_INIT_LOG(ERR, "Invalid type of rule");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+int
+cpfl_rule_process(struct cpfl_itf *itf,
+ struct idpf_ctlq_info *tx_cq,
+ struct idpf_ctlq_info *rx_cq,
+ struct cpfl_rule_info *rinfo,
+ int rule_num,
+ bool add)
+{
+ struct idpf_hw *hw = &itf->adapter->base.hw;
+ int i;
+ int ret = 0;
+
+ if (rule_num == 0)
+ return 0;
+
+ for (i = 0; i < rule_num; i++) {
+ ret = cpfl_rule_pack(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Could not pack rule");
+ return ret;
+ }
+ }
+ ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to send control message");
+ return ret;
+ }
+ ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to update rule");
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h b/drivers/net/cpfl/cpfl_fxp_rule.h
new file mode 100644
index 0000000000..ed757b80b1
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_fxp_rule.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_FXP_RULE_H_
+#define _CPFL_FXP_RULE_H_
+
+#include "cpfl_rules.h"
+
+#define CPFL_MAX_KEY_LEN 128
+#define CPFL_MAX_RULE_ACTIONS 32
+
+struct cpfl_sem_rule_info {
+ uint16_t prof_id;
+ uint8_t sub_prof_id;
+ uint8_t key[CPFL_MAX_KEY_LEN];
+ uint8_t key_byte_len;
+ uint8_t pin_to_cache;
+ uint8_t fixed_fetch;
+};
+
+#define CPFL_MAX_MOD_CONTENT_LEN 256
+struct cpfl_mod_rule_info {
+ uint8_t mod_content[CPFL_MAX_MOD_CONTENT_LEN];
+ uint8_t mod_content_byte_len;
+ uint32_t mod_index;
+ uint8_t pin_mod_content;
+ uint8_t mod_obj_size;
+};
+
+enum cpfl_rule_type {
+ CPFL_RULE_TYPE_NONE,
+ CPFL_RULE_TYPE_SEM,
+ CPFL_RULE_TYPE_MOD
+};
+
+struct cpfl_rule_info {
+ enum cpfl_rule_type type;
+ uint64_t cookie;
+ uint8_t host_id;
+ uint8_t port_num;
+ uint8_t resp_req;
+ /* TODO: change this to be dynamically allocated/reallocated */
+ uint8_t act_bytes[CPFL_MAX_RULE_ACTIONS * sizeof(union cpfl_action_set)];
+ uint8_t act_byte_len;
+ /* vsi is used for lem and lpm rules */
+ uint16_t vsi;
+ uint8_t clear_mirror_1st_state;
+ /* mod related fields */
+ union {
+ struct cpfl_mod_rule_info mod;
+ struct cpfl_sem_rule_info sem;
+ };
+};
+
+extern struct cpfl_vport_ext *vport;
+
+int cpfl_rule_process(struct cpfl_itf *itf,
+ struct idpf_ctlq_info *tx_cq,
+ struct idpf_ctlq_info *rx_cq,
+ struct cpfl_rule_info *rinfo,
+ int rule_num,
+ bool add);
+int cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+ struct idpf_ctlq_msg q_msg[]);
+int cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
+ struct idpf_ctlq_msg q_msg[]);
+#endif /*CPFL_FXP_RULE_H*/
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index e2b6621cea..6118a16329 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -45,6 +45,7 @@ if dpdk_conf.has('RTE_HAS_JANSSON')
sources += files(
'cpfl_flow.c',
'cpfl_flow_parser.c',
+ 'cpfl_fxp_rule.c',
)
ext_deps += jansson_dep
endif
--
2.34.1
next prev parent reply other threads:[~2023-09-27 12:55 UTC|newest]
Thread overview: 128+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-12 7:55 [PATCH v1 0/5] add rte flow support for cpfl Yuying Zhang
2023-08-12 7:55 ` [PATCH v1 1/5] net/cpfl: setup rte flow skeleton Yuying Zhang
2023-08-25 3:55 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 2/5] common/idpf/base: refine idpf ctlq message structure Yuying Zhang
2023-08-25 5:55 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 3/5] net/cpfl: add cpfl control queue message handle Yuying Zhang
2023-08-25 6:23 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 4/5] net/cpfl: add fxp rule module Yuying Zhang
2023-08-25 7:35 ` Xing, Beilei
2023-08-25 8:42 ` Xing, Beilei
2023-08-12 7:55 ` [PATCH v1 5/5] net/cpfl: add fxp flow engine Yuying Zhang
2023-08-25 9:15 ` Xing, Beilei
2023-09-01 11:31 ` [PATCH v2 0/8] add rte flow support for cpfl Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 1/8] net/cpfl: parse flow parser file in devargs Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 2/8] net/cpfl: add flow json parser Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 3/8] net/cpfl: add FXP low level implementation Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 4/8] net/cpfl: setup ctrl path Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 5/8] net/cpfl: set up rte flow skeleton Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 6/8] net/cpfl: add fxp rule module Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 7/8] net/cpfl: add fxp flow engine Yuying Zhang
2023-09-01 11:31 ` [PATCH v2 8/8] net/cpfl: add flow support for representor Yuying Zhang
2023-09-06 9:33 ` [PATCH v3 0/9] add rte flow support for cpfl Wenjing Qiao
2023-08-15 16:50 ` [PATCH v4 " Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-09-15 15:11 ` Stephen Hemminger
2023-08-15 16:50 ` [PATCH v4 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 5/9] net/cpfl: add fxp rule module Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 7/9] net/cpfl: add flow support for representor Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 8/9] app/test-pmd: refine encap content Zhang, Yuying
2023-08-15 16:50 ` [PATCH v4 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
2023-09-06 9:33 ` [PATCH v3 1/9] net/cpfl: parse flow parser file in devargs Wenjing Qiao
2023-09-11 0:48 ` Wu, Jingjing
2023-09-06 9:34 ` [PATCH v3 2/9] net/cpfl: add flow json parser Wenjing Qiao
2023-09-08 6:26 ` Liu, Mingxia
2023-09-11 6:24 ` Wu, Jingjing
2023-09-06 9:34 ` [PATCH v3 3/9] net/cpfl: add FXP low level implementation Wenjing Qiao
2023-09-06 9:34 ` [PATCH v3 4/9] net/cpfl: setup ctrl path Wenjing Qiao
2023-09-11 6:30 ` Liu, Mingxia
2023-09-11 6:36 ` Wu, Jingjing
2023-09-06 9:34 ` [PATCH v3 5/9] net/cpfl: set up rte flow skeleton Wenjing Qiao
2023-09-06 9:34 ` [PATCH v3 6/9] net/cpfl: add fxp rule module Wenjing Qiao
2023-09-12 7:40 ` FW: " Liu, Mingxia
2023-09-06 9:34 ` [PATCH v3 7/9] net/cpfl: add fxp flow engine Wenjing Qiao
2023-09-06 9:34 ` [PATCH v3 8/9] net/cpfl: add flow support for representor Wenjing Qiao
2023-09-06 9:34 ` [PATCH v3 9/9] app/test-pmd: refine encap content Wenjing Qiao
2023-09-15 10:00 ` [PATCH v5 0/9] add rte flow support for cpfl Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 0/8] " Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 1/8] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 2/8] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 3/8] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 4/8] net/cpfl: set up control path Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 5/8] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 6/8] net/cpfl: add fxp rule module Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 7/8] net/cpfl: add fxp flow engine Zhang, Yuying
2023-08-22 1:02 ` [PATCH v6 8/8] net/cpfl: add flow support for representor Zhang, Yuying
2023-09-26 18:16 ` [PATCH v7 0/8] add rte flow support for cpfl yuying.zhang
2023-09-26 18:16 ` [PATCH v7 1/8] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-26 19:03 ` Stephen Hemminger
2023-09-27 1:21 ` Zhang, Qi Z
2023-09-26 18:16 ` [PATCH v7 2/8] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-26 18:16 ` [PATCH v7 3/8] net/cpfl: set up rte flow skeleton yuying.zhang
2023-09-26 18:16 ` [PATCH v7 4/8] net/cpfl: set up control path yuying.zhang
2023-09-26 18:17 ` [PATCH v7 5/8] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-26 18:17 ` [PATCH v7 6/8] net/cpfl: add fxp rule module yuying.zhang
2023-09-28 3:29 ` Zhang, Qi Z
2023-09-26 18:17 ` [PATCH v7 7/8] net/cpfl: add fxp flow engine yuying.zhang
2023-09-26 18:17 ` [PATCH v7 8/8] net/cpfl: add flow support for representor yuying.zhang
2023-09-27 12:54 ` [PATCH v8 0/9] add rte flow support for cpfl yuying.zhang
2023-09-27 12:54 ` [PATCH v8 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-27 12:54 ` [PATCH v8 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-27 12:54 ` [PATCH v8 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
2023-09-27 12:54 ` [PATCH v8 4/9] net/cpfl: set up control path yuying.zhang
2023-09-27 12:54 ` [PATCH v8 5/9] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-27 12:54 ` yuying.zhang [this message]
2023-09-27 12:54 ` [PATCH v8 7/9] net/cpfl: add fxp flow engine yuying.zhang
2023-09-27 12:54 ` [PATCH v8 8/9] net/cpfl: add flow support for representor yuying.zhang
2023-09-27 12:54 ` [PATCH v8 9/9] net/cpfl: add support of to represented port action yuying.zhang
2023-09-28 3:37 ` [PATCH v8 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-09-28 8:44 ` [PATCH v9 " yuying.zhang
2023-09-08 16:05 ` [PATCH v10 " Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
2023-09-08 16:05 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
2023-09-28 8:44 ` [PATCH v9 1/9] net/cpfl: add json parser for rte flow pattern rules yuying.zhang
2023-09-28 8:44 ` [PATCH v9 2/9] net/cpfl: build action mapping rules from JSON yuying.zhang
2023-09-28 8:44 ` [PATCH v9 3/9] net/cpfl: set up rte flow skeleton yuying.zhang
2023-10-15 13:01 ` Thomas Monjalon
2023-10-16 3:07 ` Zhang, Qi Z
2023-09-28 8:44 ` [PATCH v9 4/9] net/cpfl: set up control path yuying.zhang
2023-09-28 8:44 ` [PATCH v9 5/9] net/cpfl: add FXP low level implementation yuying.zhang
2023-09-28 8:44 ` [PATCH v9 6/9] net/cpfl: add fxp rule module yuying.zhang
2023-09-28 8:44 ` [PATCH v9 7/9] net/cpfl: add fxp flow engine yuying.zhang
2023-09-28 8:44 ` [PATCH v9 8/9] net/cpfl: add flow support for representor yuying.zhang
2023-09-28 8:44 ` [PATCH v9 9/9] net/cpfl: add support of to represented port action yuying.zhang
2023-09-28 12:45 ` [PATCH v9 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-09-28 16:04 ` Stephen Hemminger
2023-10-09 4:00 ` [PATCH v10 " Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 1/9] net/cpfl: parse flow offloading hint from JSON Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 2/9] net/cpfl: build action mapping rules " Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 3/9] net/cpfl: set up flow offloading skeleton Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 4/9] net/cpfl: set up control path Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 5/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 6/9] net/cpfl: implement FXP rule creation and destroying Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 7/9] net/cpfl: adapt FXP to flow engine Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 8/9] net/cpfl: support flow ops on representor Zhang, Yuying
2023-10-09 4:00 ` [PATCH v10 9/9] net/cpfl: support represented port action Zhang, Yuying
2023-10-10 1:31 ` [PATCH v10 0/9] add rte flow support for cpfl Zhang, Qi Z
2023-10-15 11:21 ` [PATCH v9 " Thomas Monjalon
2023-09-15 10:00 ` [PATCH v5 1/9] net/cpfl: add json parser for rte flow pattern rules Zhang, Yuying
2023-09-15 11:14 ` Zhang, Qi Z
2023-09-15 10:00 ` [PATCH v5 2/9] net/cpfl: add mod rule parser support for rte flow Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 3/9] net/cpfl: set up rte flow skeleton Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 4/9] net/cpfl: add FXP low level implementation Zhang, Yuying
2023-09-15 11:19 ` Zhang, Qi Z
2023-09-15 10:00 ` [PATCH v5 5/9] net/cpfl: add fxp rule module Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 6/9] net/cpfl: add fxp flow engine Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 7/9] net/cpfl: add flow support for representor Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 8/9] app/test-pmd: refine encap content Zhang, Yuying
2023-09-15 10:00 ` [PATCH v5 9/9] net/cpfl: fix incorrect status calculation Zhang, Yuying
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230927125416.2308974-7-yuying.zhang@intel.com \
--to=yuying.zhang@intel.com \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=qi.z.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).