From: Chaoyong He <chaoyong.he@corigine.com>
To: dev@dpdk.org
Cc: oss-drivers@corigine.com, niklas.soderlund@corigine.com,
Chaoyong He <chaoyong.he@corigine.com>
Subject: [PATCH v2 07/25] net/nfp: prepare for IPv4 UDP tunnel decap flow action
Date: Sat, 22 Oct 2022 16:24:11 +0800 [thread overview]
Message-ID: <1666427069-10553-8-git-send-email-chaoyong.he@corigine.com> (raw)
In-Reply-To: <1666427069-10553-1-git-send-email-chaoyong.he@corigine.com>
Add the related data structure and functions, prepare for
the decap action of IPv4 UDP tunnel.
Signed-off-by: Chaoyong He <chaoyong.he@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderlund@corigine.com>
---
drivers/net/nfp/flower/nfp_flower_cmsg.c | 118 ++++++++
drivers/net/nfp/flower/nfp_flower_cmsg.h | 94 +++++++
drivers/net/nfp/nfp_flow.c | 461 ++++++++++++++++++++++++++++++-
drivers/net/nfp/nfp_flow.h | 17 ++
4 files changed, 675 insertions(+), 15 deletions(-)
diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.c b/drivers/net/nfp/flower/nfp_flower_cmsg.c
index 8983178..f18f3de 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.c
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.c
@@ -304,3 +304,121 @@
return 0;
}
+
+int
+nfp_flower_cmsg_tun_off_v4(struct nfp_app_fw_flower *app_fw_flower)
+{
+ uint16_t cnt;
+ uint32_t count = 0;
+ struct rte_mbuf *mbuf;
+ struct nfp_flow_priv *priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct nfp_flower_cmsg_tun_ipv4_addr *msg;
+
+ mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for v4 tun addr");
+ return -ENOMEM;
+ }
+
+ msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_TUN_IPS, sizeof(*msg));
+
+ priv = app_fw_flower->flow_priv;
+ rte_spinlock_lock(&priv->ipv4_off_lock);
+ LIST_FOREACH(entry, &priv->ipv4_off_list, next) {
+ if (count >= NFP_FL_IPV4_ADDRS_MAX) {
+ rte_spinlock_unlock(&priv->ipv4_off_lock);
+ PMD_DRV_LOG(ERR, "IPv4 offload exceeds limit.");
+ return -ERANGE;
+ }
+ msg->ipv4_addr[count] = entry->ipv4_addr;
+ count++;
+ }
+ msg->count = rte_cpu_to_be_32(count);
+ rte_spinlock_unlock(&priv->ipv4_off_lock);
+
+ cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+ if (cnt == 0) {
+ PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+ rte_pktmbuf_free(mbuf);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+nfp_flower_cmsg_pre_tunnel_rule(struct nfp_app_fw_flower *app_fw_flower,
+ struct nfp_fl_rule_metadata *nfp_flow_meta,
+ uint16_t mac_idx,
+ bool is_del)
+{
+ uint16_t cnt;
+ struct rte_mbuf *mbuf;
+ struct nfp_flower_meta_tci *meta_tci;
+ struct nfp_flower_cmsg_pre_tun_rule *msg;
+
+ mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for pre tunnel rule");
+ return -ENOMEM;
+ }
+
+ msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE, sizeof(*msg));
+
+ meta_tci = (struct nfp_flower_meta_tci *)((char *)nfp_flow_meta +
+ sizeof(struct nfp_fl_rule_metadata));
+ if (meta_tci->tci)
+ msg->vlan_tci = meta_tci->tci;
+ else
+ msg->vlan_tci = 0xffff;
+
+ if (is_del)
+ msg->flags = rte_cpu_to_be_32(NFP_TUN_PRE_TUN_RULE_DEL);
+
+ msg->port_idx = rte_cpu_to_be_16(mac_idx);
+ msg->host_ctx_id = nfp_flow_meta->host_ctx_id;
+
+ cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+ if (cnt == 0) {
+ PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+ rte_pktmbuf_free(mbuf);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int
+nfp_flower_cmsg_tun_mac_rule(struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_ether_addr *mac,
+ uint16_t mac_idx,
+ bool is_del)
+{
+ uint16_t cnt;
+ struct rte_mbuf *mbuf;
+ struct nfp_flower_cmsg_tun_mac *msg;
+
+ mbuf = rte_pktmbuf_alloc(app_fw_flower->ctrl_pktmbuf_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(DEBUG, "Failed to alloc mbuf for tunnel mac");
+ return -ENOMEM;
+ }
+
+ msg = nfp_flower_cmsg_init(mbuf, NFP_FLOWER_CMSG_TYPE_TUN_MAC, sizeof(*msg));
+
+ msg->count = rte_cpu_to_be_16(1);
+ msg->index = rte_cpu_to_be_16(mac_idx);
+ rte_ether_addr_copy(mac, &msg->addr);
+ if (is_del)
+ msg->flags = rte_cpu_to_be_16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
+
+ cnt = nfp_flower_ctrl_vnic_xmit(app_fw_flower, mbuf);
+ if (cnt == 0) {
+ PMD_DRV_LOG(ERR, "Send cmsg through ctrl vnic failed.");
+ rte_pktmbuf_free(mbuf);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/nfp/flower/nfp_flower_cmsg.h b/drivers/net/nfp/flower/nfp_flower_cmsg.h
index d1e0562..0933dac 100644
--- a/drivers/net/nfp/flower/nfp_flower_cmsg.h
+++ b/drivers/net/nfp/flower/nfp_flower_cmsg.h
@@ -195,6 +195,91 @@ struct nfp_flower_cmsg_tun_neigh_v6 {
struct nfp_flower_tun_neigh common;
};
+#define NFP_TUN_PRE_TUN_RULE_DEL (1 << 0)
+#define NFP_TUN_PRE_TUN_IDX_BIT (1 << 3)
+#define NFP_TUN_PRE_TUN_IPV6_BIT (1 << 7)
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE
+ * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +---------------------------------------------------------------+
+ * 0 | FLAGS |
+ * +---------------------------------------------------------------+
+ * 1 | MAC_IDX | VLAN_ID |
+ * +---------------------------------------------------------------+
+ * 2 | HOST_CTX |
+ * +---------------------------------------------------------------+
+ */
+struct nfp_flower_cmsg_pre_tun_rule {
+ rte_be32_t flags;
+ rte_be16_t port_idx;
+ rte_be16_t vlan_tci;
+ rte_be32_t host_ctx_id;
+};
+
+#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_TUN_MAC
+ * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * Word +-----------------------+---+-+-+---------------+---------------+
+ * 0 | spare |NBI|D|F| Amount of MAC’s in this msg |
+ * +---------------+-------+---+-+-+---------------+---------------+
+ * 1 | Index 0 | MAC[0] | MAC[1] |
+ * +---------------+---------------+---------------+---------------+
+ * 2 | MAC[2] | MAC[3] | MAC[4] | MAC[5] |
+ * +---------------+---------------+---------------+---------------+
+ * 3 | Index 1 | MAC[0] | MAC[1] |
+ * +---------------+---------------+---------------+---------------+
+ * 4 | MAC[2] | MAC[3] | MAC[4] | MAC[5] |
+ * +---------------+---------------+---------------+---------------+
+ * ...
+ * +---------------+---------------+---------------+---------------+
+ * 2N-1 | Index N | MAC[0] | MAC[1] |
+ * +---------------+---------------+---------------+---------------+
+ * 2N | MAC[2] | MAC[3] | MAC[4] | MAC[5] |
+ * +---------------+---------------+---------------+---------------+
+ *
+ * F: Flush bit. Set if entire table must be flushed. Rest of info in cmsg
+ * will be ignored. Not implemented.
+ * D: Delete bit. Set if entry must be deleted instead of added
+ * NBI: Network Block Interface. Set to 0
+ * The amount of MAC’s per control message is limited only by the packet
+ * buffer size. A 2048B buffer can fit 253 MAC address and a 10240B buffer
+ * 1277 MAC addresses.
+ */
+struct nfp_flower_cmsg_tun_mac {
+ rte_be16_t flags;
+ rte_be16_t count; /**< Should always be 1 */
+ rte_be16_t index;
+ struct rte_ether_addr addr;
+};
+
+#define NFP_FL_IPV4_ADDRS_MAX 32
+
+/*
+ * NFP_FLOWER_CMSG_TYPE_TUN_IPS
+ * Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * -----\ 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +---------------------------------------------------------------+
+ * 0 | Number of IP Addresses |
+ * +---------------------------------------------------------------+
+ * 1 | IP Address #1 |
+ * +---------------------------------------------------------------+
+ * 2 | IP Address #2 |
+ * +---------------------------------------------------------------+
+ * | ... |
+ * +---------------------------------------------------------------+
+ * 32 | IP Address #32 |
+ * +---------------------------------------------------------------+
+ */
+struct nfp_flower_cmsg_tun_ipv4_addr {
+ rte_be32_t count;
+ rte_be32_t ipv4_addr[NFP_FL_IPV4_ADDRS_MAX];
+};
+
/*
* NFP_FLOWER_CMSG_TYPE_FLOW_STATS
* Bit 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
@@ -716,5 +801,14 @@ int nfp_flower_cmsg_tun_neigh_v4_rule(struct nfp_app_fw_flower *app_fw_flower,
struct nfp_flower_cmsg_tun_neigh_v4 *payload);
int nfp_flower_cmsg_tun_neigh_v6_rule(struct nfp_app_fw_flower *app_fw_flower,
struct nfp_flower_cmsg_tun_neigh_v6 *payload);
+int nfp_flower_cmsg_tun_off_v4(struct nfp_app_fw_flower *app_fw_flower);
+int nfp_flower_cmsg_pre_tunnel_rule(struct nfp_app_fw_flower *app_fw_flower,
+ struct nfp_fl_rule_metadata *nfp_flow_meta,
+ uint16_t mac_idx,
+ bool is_del);
+int nfp_flower_cmsg_tun_mac_rule(struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_ether_addr *mac,
+ uint16_t mac_idx,
+ bool is_del);
#endif /* _NFP_CMSG_H_ */
diff --git a/drivers/net/nfp/nfp_flow.c b/drivers/net/nfp/nfp_flow.c
index 9ee02b0..c088d24 100644
--- a/drivers/net/nfp/nfp_flow.c
+++ b/drivers/net/nfp/nfp_flow.c
@@ -47,7 +47,8 @@ struct nfp_flow_item_proc {
/* Size in bytes for @p mask_support and @p mask_default. */
const unsigned int mask_sz;
/* Merge a pattern item into a flow rule handle. */
- int (*merge)(struct rte_flow *nfp_flow,
+ int (*merge)(struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
@@ -63,6 +64,12 @@ struct nfp_mask_id_entry {
uint8_t mask_id;
};
+struct nfp_pre_tun_entry {
+ uint16_t mac_index;
+ uint16_t ref_cnt;
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
+} __rte_aligned(32);
+
static inline struct nfp_flow_priv *
nfp_flow_dev_to_priv(struct rte_eth_dev *dev)
{
@@ -406,6 +413,83 @@ struct nfp_mask_id_entry {
return 0;
}
+__rte_unused static int
+nfp_tun_add_ipv4_off(struct nfp_app_fw_flower *app_fw_flower,
+ rte_be32_t ipv4)
+{
+ struct nfp_flow_priv *priv;
+ struct nfp_ipv4_addr_entry *entry;
+ struct nfp_ipv4_addr_entry *tmp_entry;
+
+ priv = app_fw_flower->flow_priv;
+
+ rte_spinlock_lock(&priv->ipv4_off_lock);
+ LIST_FOREACH(entry, &priv->ipv4_off_list, next) {
+ if (entry->ipv4_addr == ipv4) {
+ entry->ref_count++;
+ rte_spinlock_unlock(&priv->ipv4_off_lock);
+ return 0;
+ }
+ }
+ rte_spinlock_unlock(&priv->ipv4_off_lock);
+
+ tmp_entry = rte_zmalloc("nfp_ipv4_off", sizeof(struct nfp_ipv4_addr_entry), 0);
+ if (tmp_entry == NULL) {
+ PMD_DRV_LOG(ERR, "Mem error when offloading IP address.");
+ return -ENOMEM;
+ }
+
+ tmp_entry->ipv4_addr = ipv4;
+ tmp_entry->ref_count = 1;
+
+ rte_spinlock_lock(&priv->ipv4_off_lock);
+ LIST_INSERT_HEAD(&priv->ipv4_off_list, tmp_entry, next);
+ rte_spinlock_unlock(&priv->ipv4_off_lock);
+
+ return nfp_flower_cmsg_tun_off_v4(app_fw_flower);
+}
+
+static int
+nfp_tun_del_ipv4_off(struct nfp_app_fw_flower *app_fw_flower,
+ rte_be32_t ipv4)
+{
+ struct nfp_flow_priv *priv;
+ struct nfp_ipv4_addr_entry *entry;
+
+ priv = app_fw_flower->flow_priv;
+
+ rte_spinlock_lock(&priv->ipv4_off_lock);
+ LIST_FOREACH(entry, &priv->ipv4_off_list, next) {
+ if (entry->ipv4_addr == ipv4) {
+ entry->ref_count--;
+ if (entry->ref_count == 0) {
+ LIST_REMOVE(entry, next);
+ rte_free(entry);
+ rte_spinlock_unlock(&priv->ipv4_off_lock);
+ return nfp_flower_cmsg_tun_off_v4(app_fw_flower);
+ }
+ break;
+ }
+ }
+ rte_spinlock_unlock(&priv->ipv4_off_lock);
+
+ return 0;
+}
+
+static int
+nfp_tun_check_ip_off_del(struct nfp_flower_representor *repr,
+ struct rte_flow *nfp_flow)
+{
+ int ret;
+ struct nfp_flower_ipv4_udp_tun *udp4;
+
+ udp4 = (struct nfp_flower_ipv4_udp_tun *)(nfp_flow->payload.mask_data -
+ sizeof(struct nfp_flower_ipv4_udp_tun));
+ ret = nfp_tun_del_ipv4_off(repr->app_fw_flower, udp4->ipv4.dst);
+
+ return ret;
+}
+
static void
nfp_flower_compile_meta_tci(char *mbuf_off, struct nfp_fl_key_ls *key_layer)
{
@@ -635,6 +719,9 @@ struct nfp_mask_id_entry {
case RTE_FLOW_ACTION_TYPE_COUNT:
PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_COUNT detected");
break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_JUMP detected");
+ break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
PMD_DRV_LOG(DEBUG, "RTE_FLOW_ACTION_TYPE_PORT_ID detected");
key_ls->act_size += sizeof(struct nfp_fl_act_output);
@@ -786,7 +873,8 @@ struct nfp_mask_id_entry {
}
static int
-nfp_flow_merge_eth(__rte_unused struct rte_flow *nfp_flow,
+nfp_flow_merge_eth(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+ __rte_unused struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
@@ -823,7 +911,8 @@ struct nfp_mask_id_entry {
}
static int
-nfp_flow_merge_vlan(struct rte_flow *nfp_flow,
+nfp_flow_merge_vlan(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *nfp_flow,
__rte_unused char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
@@ -853,7 +942,8 @@ struct nfp_mask_id_entry {
}
static int
-nfp_flow_merge_ipv4(struct rte_flow *nfp_flow,
+nfp_flow_merge_ipv4(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
@@ -914,7 +1004,8 @@ struct nfp_mask_id_entry {
}
static int
-nfp_flow_merge_ipv6(struct rte_flow *nfp_flow,
+nfp_flow_merge_ipv6(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
@@ -979,7 +1070,8 @@ struct nfp_mask_id_entry {
}
static int
-nfp_flow_merge_tcp(struct rte_flow *nfp_flow,
+nfp_flow_merge_tcp(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
@@ -1052,7 +1144,8 @@ struct nfp_mask_id_entry {
}
static int
-nfp_flow_merge_udp(struct rte_flow *nfp_flow,
+nfp_flow_merge_udp(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
@@ -1100,7 +1193,8 @@ struct nfp_mask_id_entry {
}
static int
-nfp_flow_merge_sctp(struct rte_flow *nfp_flow,
+nfp_flow_merge_sctp(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
@@ -1142,7 +1236,8 @@ struct nfp_mask_id_entry {
}
static int
-nfp_flow_merge_vxlan(struct rte_flow *nfp_flow,
+nfp_flow_merge_vxlan(__rte_unused struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *nfp_flow,
char **mbuf_off,
const struct rte_flow_item *item,
const struct nfp_flow_item_proc *proc,
@@ -1391,7 +1486,8 @@ struct nfp_mask_id_entry {
}
static int
-nfp_flow_compile_item_proc(const struct rte_flow_item items[],
+nfp_flow_compile_item_proc(struct nfp_flower_representor *repr,
+ const struct rte_flow_item items[],
struct rte_flow *nfp_flow,
char **mbuf_off_exact,
char **mbuf_off_mask,
@@ -1402,6 +1498,7 @@ struct nfp_mask_id_entry {
bool continue_flag = true;
const struct rte_flow_item *item;
const struct nfp_flow_item_proc *proc_list;
+ struct nfp_app_fw_flower *app_fw_flower = repr->app_fw_flower;
proc_list = nfp_flow_item_proc_list;
for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END && continue_flag; ++item) {
@@ -1437,14 +1534,14 @@ struct nfp_mask_id_entry {
break;
}
- ret = proc->merge(nfp_flow, mbuf_off_exact, item,
+ ret = proc->merge(app_fw_flower, nfp_flow, mbuf_off_exact, item,
proc, false, is_outer_layer);
if (ret != 0) {
PMD_DRV_LOG(ERR, "nfp flow item %d exact merge failed", item->type);
break;
}
- ret = proc->merge(nfp_flow, mbuf_off_mask, item,
+ ret = proc->merge(app_fw_flower, nfp_flow, mbuf_off_mask, item,
proc, true, is_outer_layer);
if (ret != 0) {
PMD_DRV_LOG(ERR, "nfp flow item %d mask merge failed", item->type);
@@ -1458,7 +1555,7 @@ struct nfp_mask_id_entry {
}
static int
-nfp_flow_compile_items(__rte_unused struct nfp_flower_representor *representor,
+nfp_flow_compile_items(struct nfp_flower_representor *representor,
const struct rte_flow_item items[],
struct rte_flow *nfp_flow)
{
@@ -1489,7 +1586,7 @@ struct nfp_mask_id_entry {
is_outer_layer = false;
/* Go over items */
- ret = nfp_flow_compile_item_proc(loop_item, nfp_flow,
+ ret = nfp_flow_compile_item_proc(representor, loop_item, nfp_flow,
&mbuf_off_exact, &mbuf_off_mask, is_outer_layer);
if (ret != 0) {
PMD_DRV_LOG(ERR, "nfp flow item compile failed.");
@@ -1498,7 +1595,7 @@ struct nfp_mask_id_entry {
/* Go over inner items */
if (is_tun_flow) {
- ret = nfp_flow_compile_item_proc(items, nfp_flow,
+ ret = nfp_flow_compile_item_proc(representor, items, nfp_flow,
&mbuf_off_exact, &mbuf_off_mask, true);
if (ret != 0) {
PMD_DRV_LOG(ERR, "nfp flow outer item compile failed.");
@@ -1873,6 +1970,59 @@ struct nfp_mask_id_entry {
return nfp_flower_cmsg_tun_neigh_v4_rule(app_fw_flower, &payload);
}
+__rte_unused static int
+nfp_flower_add_tun_neigh_v4_decap(struct nfp_app_fw_flower *app_fw_flower,
+ struct rte_flow *nfp_flow)
+{
+ struct nfp_fl_tun *tmp;
+ struct nfp_fl_tun *tun;
+ struct nfp_flow_priv *priv;
+ struct nfp_flower_ipv4 *ipv4;
+ struct nfp_flower_mac_mpls *eth;
+ struct nfp_flower_in_port *port;
+ struct nfp_flower_meta_tci *meta_tci;
+ struct nfp_flower_cmsg_tun_neigh_v4 payload;
+
+ meta_tci = (struct nfp_flower_meta_tci *)nfp_flow->payload.unmasked_data;
+ port = (struct nfp_flower_in_port *)(meta_tci + 1);
+ eth = (struct nfp_flower_mac_mpls *)(port + 1);
+
+ if (meta_tci->nfp_flow_key_layer & NFP_FLOWER_LAYER_TP)
+ ipv4 = (struct nfp_flower_ipv4 *)((char *)eth +
+ sizeof(struct nfp_flower_mac_mpls) +
+ sizeof(struct nfp_flower_tp_ports));
+ else
+ ipv4 = (struct nfp_flower_ipv4 *)((char *)eth +
+ sizeof(struct nfp_flower_mac_mpls));
+
+ tun = &nfp_flow->tun;
+ tun->payload.v6_flag = 0;
+ tun->payload.dst.dst_ipv4 = ipv4->ipv4_src;
+ tun->payload.src.src_ipv4 = ipv4->ipv4_dst;
+ memcpy(tun->payload.dst_addr, eth->mac_src, RTE_ETHER_ADDR_LEN);
+ memcpy(tun->payload.src_addr, eth->mac_dst, RTE_ETHER_ADDR_LEN);
+
+ tun->ref_cnt = 1;
+ priv = app_fw_flower->flow_priv;
+ LIST_FOREACH(tmp, &priv->nn_list, next) {
+ if (memcmp(&tmp->payload, &tun->payload, sizeof(struct nfp_fl_tun_entry)) == 0) {
+ tmp->ref_cnt++;
+ return 0;
+ }
+ }
+
+ LIST_INSERT_HEAD(&priv->nn_list, tun, next);
+
+ memset(&payload, 0, sizeof(struct nfp_flower_cmsg_tun_neigh_v4));
+ payload.dst_ipv4 = ipv4->ipv4_src;
+ payload.src_ipv4 = ipv4->ipv4_dst;
+ memcpy(payload.common.dst_mac, eth->mac_src, RTE_ETHER_ADDR_LEN);
+ memcpy(payload.common.src_mac, eth->mac_dst, RTE_ETHER_ADDR_LEN);
+ payload.common.port_id = port->in_port;
+
+ return nfp_flower_cmsg_tun_neigh_v4_rule(app_fw_flower, &payload);
+}
+
static int
nfp_flower_del_tun_neigh_v4(struct nfp_app_fw_flower *app_fw_flower,
rte_be32_t ipv4)
@@ -2090,6 +2240,200 @@ struct nfp_mask_id_entry {
actions, vxlan_data, nfp_flow_meta, tun);
}
+static struct nfp_pre_tun_entry *
+nfp_pre_tun_table_search(struct nfp_flow_priv *priv,
+ char *hash_data,
+ uint32_t hash_len)
+{
+ int index;
+ uint32_t hash_key;
+ struct nfp_pre_tun_entry *mac_index;
+
+ hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
+ index = rte_hash_lookup_data(priv->pre_tun_table, &hash_key, (void **)&mac_index);
+ if (index < 0) {
+ PMD_DRV_LOG(DEBUG, "Data NOT found in the hash table");
+ return NULL;
+ }
+
+ return mac_index;
+}
+
+static bool
+nfp_pre_tun_table_add(struct nfp_flow_priv *priv,
+ char *hash_data,
+ uint32_t hash_len)
+{
+ int ret;
+ uint32_t hash_key;
+
+ hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
+ ret = rte_hash_add_key_data(priv->pre_tun_table, &hash_key, hash_data);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Add to pre tunnel table failed");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+nfp_pre_tun_table_delete(struct nfp_flow_priv *priv,
+ char *hash_data,
+ uint32_t hash_len)
+{
+ int ret;
+ uint32_t hash_key;
+
+ hash_key = rte_jhash(hash_data, hash_len, priv->hash_seed);
+ ret = rte_hash_del_key(priv->pre_tun_table, &hash_key);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Delete from pre tunnel table failed");
+ return false;
+ }
+
+ return true;
+}
+
+__rte_unused static int
+nfp_pre_tun_table_check_add(struct nfp_flower_representor *repr,
+ uint16_t *index)
+{
+ uint16_t i;
+ uint32_t entry_size;
+ uint16_t mac_index = 1;
+ struct nfp_flow_priv *priv;
+ struct nfp_pre_tun_entry *entry;
+ struct nfp_pre_tun_entry *find_entry;
+
+ priv = repr->app_fw_flower->flow_priv;
+ if (priv->pre_tun_cnt >= NFP_TUN_PRE_TUN_RULE_LIMIT) {
+ PMD_DRV_LOG(ERR, "Pre tunnel table has full");
+ return -EINVAL;
+ }
+
+ entry_size = sizeof(struct nfp_pre_tun_entry);
+ entry = rte_zmalloc("nfp_pre_tun", entry_size, 0);
+ if (entry == NULL) {
+ PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table");
+ return -ENOMEM;
+ }
+
+ entry->ref_cnt = 1U;
+ memcpy(entry->mac_addr, repr->mac_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
+
+ /* 0 is considered a failed match */
+ for (i = 1; i < NFP_TUN_PRE_TUN_RULE_LIMIT; i++) {
+ if (priv->pre_tun_bitmap[i] == 0)
+ continue;
+ entry->mac_index = i;
+ find_entry = nfp_pre_tun_table_search(priv, (char *)entry, entry_size);
+ if (find_entry != NULL) {
+ find_entry->ref_cnt++;
+ *index = find_entry->mac_index;
+ rte_free(entry);
+ return 0;
+ }
+ }
+
+ for (i = 1; i < NFP_TUN_PRE_TUN_RULE_LIMIT; i++) {
+ if (priv->pre_tun_bitmap[i] == 0) {
+ priv->pre_tun_bitmap[i] = 1U;
+ mac_index = i;
+ break;
+ }
+ }
+
+ entry->mac_index = mac_index;
+ if (!nfp_pre_tun_table_add(priv, (char *)entry, entry_size)) {
+ rte_free(entry);
+ return -EINVAL;
+ }
+
+ *index = entry->mac_index;
+ priv->pre_tun_cnt++;
+ return 0;
+}
+
+static int
+nfp_pre_tun_table_check_del(struct nfp_flower_representor *repr,
+ struct rte_flow *nfp_flow)
+{
+ uint16_t i;
+ int ret = 0;
+ uint32_t entry_size;
+ uint16_t nfp_mac_idx;
+ struct nfp_flow_priv *priv;
+ struct nfp_pre_tun_entry *entry;
+ struct nfp_pre_tun_entry *find_entry;
+ struct nfp_fl_rule_metadata *nfp_flow_meta;
+
+ priv = repr->app_fw_flower->flow_priv;
+ if (priv->pre_tun_cnt == 1)
+ return 0;
+
+ entry_size = sizeof(struct nfp_pre_tun_entry);
+ entry = rte_zmalloc("nfp_pre_tun", entry_size, 0);
+ if (entry == NULL) {
+ PMD_DRV_LOG(ERR, "Memory alloc failed for pre tunnel table");
+ return -ENOMEM;
+ }
+
+ entry->ref_cnt = 1U;
+ memcpy(entry->mac_addr, repr->mac_addr.addr_bytes, RTE_ETHER_ADDR_LEN);
+
+ /* 0 is considered a failed match */
+ for (i = 1; i < NFP_TUN_PRE_TUN_RULE_LIMIT; i++) {
+ if (priv->pre_tun_bitmap[i] == 0)
+ continue;
+ entry->mac_index = i;
+ find_entry = nfp_pre_tun_table_search(priv, (char *)entry, entry_size);
+ if (find_entry != NULL) {
+ find_entry->ref_cnt--;
+ if (find_entry->ref_cnt != 0)
+ goto free_entry;
+ priv->pre_tun_bitmap[i] = 0;
+ break;
+ }
+ }
+
+ nfp_flow_meta = nfp_flow->payload.meta;
+ nfp_mac_idx = (find_entry->mac_index << 8) |
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT |
+ NFP_TUN_PRE_TUN_IDX_BIT;
+ ret = nfp_flower_cmsg_tun_mac_rule(repr->app_fw_flower, &repr->mac_addr,
+ nfp_mac_idx, true);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Send tunnel mac rule failed");
+ ret = -EINVAL;
+ goto free_entry;
+ }
+
+ ret = nfp_flower_cmsg_pre_tunnel_rule(repr->app_fw_flower, nfp_flow_meta,
+ nfp_mac_idx, true);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Send pre tunnel rule failed");
+ ret = -EINVAL;
+ goto free_entry;
+ }
+
+ find_entry->ref_cnt = 1U;
+ if (!nfp_pre_tun_table_delete(priv, (char *)find_entry, entry_size)) {
+ PMD_DRV_LOG(ERR, "Delete entry from pre tunnel table failed");
+ ret = -EINVAL;
+ goto free_entry;
+ }
+
+ rte_free(entry);
+ rte_free(find_entry);
+ priv->pre_tun_cnt--;
+
+free_entry:
+ rte_free(entry);
+
+ return ret;
+}
+
static int
nfp_flow_compile_action(struct nfp_flower_representor *representor,
const struct rte_flow_action actions[],
@@ -2125,6 +2469,9 @@ struct nfp_mask_id_entry {
case RTE_FLOW_ACTION_TYPE_COUNT:
PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_COUNT");
break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_JUMP");
+ break;
case RTE_FLOW_ACTION_TYPE_PORT_ID:
PMD_DRV_LOG(DEBUG, "Process RTE_FLOW_ACTION_TYPE_PORT_ID");
ret = nfp_flow_action_output(position, action, nfp_flow_meta);
@@ -2561,6 +2908,15 @@ struct nfp_mask_id_entry {
/* Delete the entry from nn table */
ret = nfp_flower_del_tun_neigh(app_fw_flower, nfp_flow);
break;
+ case NFP_FLOW_DECAP:
+ /* Delete the entry from nn table */
+ ret = nfp_flower_del_tun_neigh(app_fw_flower, nfp_flow);
+ if (ret != 0)
+ goto exit;
+
+ /* Delete the entry in pre tunnel table */
+ ret = nfp_pre_tun_table_check_del(representor, nfp_flow);
+ break;
default:
PMD_DRV_LOG(ERR, "Invalid nfp flow type %d.", nfp_flow->type);
ret = -EINVAL;
@@ -2570,6 +2926,10 @@ struct nfp_mask_id_entry {
if (ret != 0)
goto exit;
+ /* Delete the ip off */
+ if (nfp_flow_is_tunnel(nfp_flow))
+ nfp_tun_check_ip_off_del(representor, nfp_flow);
+
/* Delete the flow from hardware */
if (nfp_flow->install_flag) {
ret = nfp_flower_cmsg_flow_delete(app_fw_flower, nfp_flow);
@@ -2703,6 +3063,49 @@ struct nfp_mask_id_entry {
return 0;
}
+static int
+nfp_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
+ struct rte_flow_tunnel *tunnel,
+ struct rte_flow_action **pmd_actions,
+ uint32_t *num_of_actions,
+ __rte_unused struct rte_flow_error *err)
+{
+ struct rte_flow_action *nfp_action;
+
+ nfp_action = rte_zmalloc("nfp_tun_action", sizeof(struct rte_flow_action), 0);
+ if (nfp_action == NULL) {
+ PMD_DRV_LOG(ERR, "Alloc memory for nfp tunnel action failed.");
+ return -ENOMEM;
+ }
+
+ switch (tunnel->type) {
+ default:
+ *pmd_actions = NULL;
+ *num_of_actions = 0;
+ rte_free(nfp_action);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nfp_flow_tunnel_action_decap_release(__rte_unused struct rte_eth_dev *dev,
+ struct rte_flow_action *pmd_actions,
+ uint32_t num_of_actions,
+ __rte_unused struct rte_flow_error *err)
+{
+ uint32_t i;
+ struct rte_flow_action *nfp_action;
+
+ for (i = 0; i < num_of_actions; i++) {
+ nfp_action = &pmd_actions[i];
+ rte_free(nfp_action);
+ }
+
+ return 0;
+}
+
static const struct rte_flow_ops nfp_flow_ops = {
.validate = nfp_flow_validate,
.create = nfp_flow_create,
@@ -2711,6 +3114,8 @@ struct nfp_mask_id_entry {
.query = nfp_flow_query,
.tunnel_match = nfp_flow_tunnel_match,
.tunnel_item_release = nfp_flow_tunnel_item_release,
+ .tunnel_decap_set = nfp_flow_tunnel_decap_set,
+ .tunnel_action_decap_release = nfp_flow_tunnel_action_decap_release,
};
int
@@ -2755,6 +3160,15 @@ struct nfp_mask_id_entry {
.extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
};
+ struct rte_hash_parameters pre_tun_hash_params = {
+ .name = "pre_tunnel_table",
+ .entries = 32,
+ .hash_func = rte_jhash,
+ .socket_id = rte_socket_id(),
+ .key_len = sizeof(uint32_t),
+ .extra_flag = RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY,
+ };
+
ctx_count = nfp_rtsym_read_le(pf_dev->sym_tbl,
"CONFIG_FC_HOST_CTX_COUNT", &ret);
if (ret < 0) {
@@ -2835,11 +3249,27 @@ struct nfp_mask_id_entry {
goto free_mask_table;
}
+ /* pre tunnel table */
+ priv->pre_tun_cnt = 1;
+ pre_tun_hash_params.hash_func_init_val = priv->hash_seed;
+ priv->pre_tun_table = rte_hash_create(&pre_tun_hash_params);
+ if (priv->pre_tun_table == NULL) {
+ PMD_INIT_LOG(ERR, "Pre tunnel table creation failed");
+ ret = -ENOMEM;
+ goto free_flow_table;
+ }
+
+ /* ipv4 off list */
+ rte_spinlock_init(&priv->ipv4_off_lock);
+ LIST_INIT(&priv->ipv4_off_list);
+
/* neighbor next list */
LIST_INIT(&priv->nn_list);
return 0;
+free_flow_table:
+ rte_hash_free(priv->flow_table);
free_mask_table:
rte_free(priv->mask_table);
free_stats:
@@ -2863,6 +3293,7 @@ struct nfp_mask_id_entry {
app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);
priv = app_fw_flower->flow_priv;
+ rte_hash_free(priv->pre_tun_table);
rte_hash_free(priv->flow_table);
rte_hash_free(priv->mask_table);
rte_free(priv->stats);
diff --git a/drivers/net/nfp/nfp_flow.h b/drivers/net/nfp/nfp_flow.h
index 892dbc0..f536da2 100644
--- a/drivers/net/nfp/nfp_flow.h
+++ b/drivers/net/nfp/nfp_flow.h
@@ -6,6 +6,7 @@
#ifndef _NFP_FLOW_H_
#define _NFP_FLOW_H_
+#include <sys/queue.h>
#include <rte_bitops.h>
#include <ethdev_driver.h>
@@ -93,6 +94,7 @@ enum nfp_flower_tun_type {
enum nfp_flow_type {
NFP_FLOW_COMMON,
NFP_FLOW_ENCAP,
+ NFP_FLOW_DECAP,
};
struct nfp_fl_key_ls {
@@ -169,6 +171,14 @@ struct nfp_fl_stats {
uint64_t bytes;
};
+struct nfp_ipv4_addr_entry {
+ LIST_ENTRY(nfp_ipv4_addr_entry) next;
+ rte_be32_t ipv4_addr;
+ int ref_count;
+};
+
+#define NFP_TUN_PRE_TUN_RULE_LIMIT 32
+
struct nfp_flow_priv {
uint32_t hash_seed; /**< Hash seed for hash tables in this structure. */
uint64_t flower_version; /**< Flow version, always increase. */
@@ -184,6 +194,13 @@ struct nfp_flow_priv {
struct nfp_fl_stats_id stats_ids; /**< The stats id ring. */
struct nfp_fl_stats *stats; /**< Store stats of flow. */
rte_spinlock_t stats_lock; /** < Lock the update of 'stats' field. */
+ /* pre tunnel rule */
+ uint16_t pre_tun_cnt; /**< The size of pre tunnel rule */
+ uint8_t pre_tun_bitmap[NFP_TUN_PRE_TUN_RULE_LIMIT]; /**< Bitmap of pre tunnel rule */
+ struct rte_hash *pre_tun_table; /**< Hash table to store pre tunnel rule */
+ /* IPv4 off */
+ LIST_HEAD(, nfp_ipv4_addr_entry) ipv4_off_list; /**< Store ipv4 off */
+ rte_spinlock_t ipv4_off_lock; /**< Lock the ipv4 off list */
/* neighbor next */
LIST_HEAD(, nfp_fl_tun)nn_list; /**< Store nn entry */
};
--
1.8.3.1
next prev parent reply other threads:[~2022-10-22 8:25 UTC|newest]
Thread overview: 88+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-10-18 3:22 [PATCH 00/25] add the extend rte_flow offload support of nfp PMD Chaoyong He
2022-10-18 3:22 ` [PATCH 01/25] net/nfp: add the offload support of IPv4 VXLAN item Chaoyong He
2022-10-18 3:22 ` [PATCH 02/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 03/25] net/nfp: prepare for the encap action of IPv4 tunnel Chaoyong He
2022-10-18 3:22 ` [PATCH 04/25] net/nfp: prepare for the encap action of IPv6 tunnel Chaoyong He
2022-10-18 3:22 ` [PATCH 05/25] net/nfp: add the offload support of IPv4 VXLAN encap action Chaoyong He
2022-10-18 3:22 ` [PATCH 06/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 07/25] net/nfp: prepare for the decap action of IPv4 UDP tunnel Chaoyong He
2022-10-18 3:22 ` [PATCH 08/25] net/nfp: prepare for the decap action of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 09/25] net/nfp: add the offload support of IPv4 VXLAN decap action Chaoyong He
2022-10-18 3:22 ` [PATCH 10/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 11/25] net/nfp: add the offload support of IPv4 GENEVE encap action Chaoyong He
2022-10-18 3:22 ` [PATCH 12/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 13/25] net/nfp: add the offload support of IPv4 GENEVE item Chaoyong He
2022-10-18 3:22 ` [PATCH 14/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 15/25] net/nfp: add the offload support of IPv4 GENEVE decap action Chaoyong He
2022-10-18 3:22 ` [PATCH 16/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 17/25] net/nfp: add the offload support of IPv4 NVGRE encap action Chaoyong He
2022-10-18 3:22 ` [PATCH 18/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 19/25] net/nfp: prepare for the decap action of IPv4 GRE tunnel Chaoyong He
2022-10-18 3:22 ` [PATCH 20/25] net/nfp: prepare for the decap action of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 21/25] net/nfp: add the offload support of IPv4 NVGRE item Chaoyong He
2022-10-18 3:22 ` [PATCH 22/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 23/25] net/nfp: add the offload support of IPv4 NVGRE decap action Chaoyong He
2022-10-18 3:22 ` [PATCH 24/25] net/nfp: add the offload support of IPv6 " Chaoyong He
2022-10-18 3:22 ` [PATCH 25/25] net/nfp: add the support of new tunnel solution Chaoyong He
2022-10-21 13:37 ` [PATCH 00/25] add the extend rte_flow offload support of nfp PMD Ferruh Yigit
2022-10-21 13:39 ` Ferruh Yigit
2022-10-22 8:24 ` [PATCH v2 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 01/25] net/nfp: support IPv4 VXLAN flow item Chaoyong He
2022-10-22 8:24 ` [PATCH v2 02/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 03/25] net/nfp: prepare for IPv4 tunnel encap flow action Chaoyong He
2022-10-22 8:24 ` [PATCH v2 04/25] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 05/25] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 06/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` Chaoyong He [this message]
2022-10-22 8:24 ` [PATCH v2 08/25] net/nfp: prepare for IPv6 UDP tunnel decap " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 09/25] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 10/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 11/25] net/nfp: support IPv4 GENEVE encap " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 12/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 13/25] net/nfp: support IPv4 GENEVE flow item Chaoyong He
2022-10-22 8:24 ` [PATCH v2 14/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 15/25] net/nfp: support IPv4 GENEVE decap flow action Chaoyong He
2022-10-22 8:24 ` [PATCH v2 16/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 17/25] net/nfp: support IPv4 NVGRE encap " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 18/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 19/25] net/nfp: prepare for IPv4 GRE tunnel decap " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 20/25] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 21/25] net/nfp: support IPv4 NVGRE flow item Chaoyong He
2022-10-22 8:24 ` [PATCH v2 22/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 23/25] net/nfp: support IPv4 NVGRE decap flow action Chaoyong He
2022-10-22 8:24 ` [PATCH v2 24/25] net/nfp: support IPv6 " Chaoyong He
2022-10-22 8:24 ` [PATCH v2 25/25] net/nfp: support new tunnel solution Chaoyong He
2022-10-24 15:09 ` Ferruh Yigit
2022-10-25 1:44 ` Chaoyong He
2022-10-25 8:18 ` Ferruh Yigit
2022-10-24 15:07 ` [PATCH v2 00/25] add the extend rte_flow offload support of nfp PMD Ferruh Yigit
2022-10-25 3:17 ` Chaoyong He
2022-10-25 3:29 ` Chaoyong He
2022-10-25 7:58 ` [PATCH v3 00/26] " Chaoyong He
2022-10-25 7:58 ` [PATCH v3 01/26] net/nfp: fix the app stuck by CPP bridge service Chaoyong He
2022-10-25 7:58 ` [PATCH v3 02/26] net/nfp: support IPv4 VXLAN flow item Chaoyong He
2022-10-25 7:58 ` [PATCH v3 03/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:58 ` [PATCH v3 04/26] net/nfp: prepare for IPv4 tunnel encap flow action Chaoyong He
2022-10-25 7:58 ` [PATCH v3 05/26] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-25 7:58 ` [PATCH v3 06/26] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-25 7:58 ` [PATCH v3 07/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 08/26] net/nfp: prepare for IPv4 UDP tunnel decap " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 09/26] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 10/26] net/nfp: support IPv4 VXLAN " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 11/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 12/26] net/nfp: support IPv4 GENEVE encap " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 13/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 14/26] net/nfp: support IPv4 GENEVE flow item Chaoyong He
2022-10-25 7:59 ` [PATCH v3 15/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 16/26] net/nfp: support IPv4 GENEVE decap flow action Chaoyong He
2022-10-25 7:59 ` [PATCH v3 17/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 18/26] net/nfp: support IPv4 NVGRE encap " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 19/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 20/26] net/nfp: prepare for IPv4 GRE tunnel decap " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 21/26] net/nfp: prepare for IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 22/26] net/nfp: support IPv4 NVGRE flow item Chaoyong He
2022-10-25 7:59 ` [PATCH v3 23/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 24/26] net/nfp: support IPv4 NVGRE decap flow action Chaoyong He
2022-10-25 7:59 ` [PATCH v3 25/26] net/nfp: support IPv6 " Chaoyong He
2022-10-25 7:59 ` [PATCH v3 26/26] net/nfp: support new solution for tunnel decap action Chaoyong He
2022-10-25 11:42 ` [PATCH v3 00/26] add the extend rte_flow offload support of nfp PMD Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1666427069-10553-8-git-send-email-chaoyong.he@corigine.com \
--to=chaoyong.he@corigine.com \
--cc=dev@dpdk.org \
--cc=niklas.soderlund@corigine.com \
--cc=oss-drivers@corigine.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).