* [dpdk-dev] [PATCH 01/37] net/txgbe: add ntuple filter init and uninit
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 02/37] net/txgbe: support ntuple filter add and delete Jiawen Wu
` (36 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add ntuple filter init and uninit.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/base/txgbe_type.h | 1 +
drivers/net/txgbe/txgbe_ethdev.c | 28 ++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ethdev.h | 33 +++++++++++++++++++++++++++++
3 files changed, 62 insertions(+)
diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index b322a2cac..69aa8993a 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -15,6 +15,7 @@
#define TXGBE_FRAME_SIZE_DFT (1518) /* Default frame size, +FCS */
#define TXGBE_NUM_POOL (64)
#define TXGBE_PBTXSIZE_MAX 0x00028000 /* 160KB Packet Buffer */
+#define TXGBE_MAX_FTQF_FILTERS 128
#define TXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
#define TXGBE_MAX_UP 8
#define TXGBE_MAX_QP (128)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 189caf2e9..08f19a2ef 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -470,6 +470,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
const struct rte_memzone *mz;
@@ -677,6 +678,13 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
/* enable support intr */
txgbe_enable_intr(eth_dev);
+ /* initialize filter info */
+ memset(filter_info, 0,
+ sizeof(struct txgbe_filter_info));
+
+ /* initialize 5tuple filter list */
+ TAILQ_INIT(&filter_info->fivetuple_list);
+
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
@@ -696,6 +704,23 @@ eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
+ struct txgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple,
+ entries);
+ rte_free(p_5tuple);
+ }
+ memset(filter_info->fivetuple_mask, 0,
+ sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
+
+ return 0;
+}
+
static int
eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
@@ -1774,6 +1799,9 @@ txgbe_dev_close(struct rte_eth_dev *dev)
rte_free(dev->data->hash_mac_addrs);
dev->data->hash_mac_addrs = NULL;
+ /* Remove all ntuple filters of the device */
+ txgbe_ntuple_filter_uninit(dev);
+
return ret;
}
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 2c3680218..60687bc49 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -110,6 +110,36 @@ struct txgbe_vf_info {
uint16_t mac_count;
};
+TAILQ_HEAD(txgbe_5tuple_filter_list, txgbe_5tuple_filter);
+
+struct txgbe_5tuple_filter_info {
+ uint32_t dst_ip;
+ uint32_t src_ip;
+ uint16_t dst_port;
+ uint16_t src_port;
+ enum txgbe_5tuple_protocol proto; /* l4 protocol. */
+ uint8_t priority; /* seven levels (001b-111b), 111b is highest,
+ * used when more than one filter matches.
+ */
+ uint8_t dst_ip_mask:1, /* if mask is 1b, do not compare dst ip. */
+ src_ip_mask:1, /* if mask is 1b, do not compare src ip. */
+ dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
+ src_port_mask:1, /* if mask is 1b, do not compare src port. */
+ proto_mask:1; /* if mask is 1b, do not compare protocol. */
+};
+
+/* 5tuple filter structure */
+struct txgbe_5tuple_filter {
+ TAILQ_ENTRY(txgbe_5tuple_filter) entries;
+ uint16_t index; /* the index of 5tuple filter */
+ struct txgbe_5tuple_filter_info filter_info;
+ uint16_t queue; /* rx queue assigned to */
+};
+
+#define TXGBE_5TUPLE_ARRAY_SIZE \
+ (RTE_ALIGN(TXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
+ (sizeof(uint32_t) * NBBY))
+
struct txgbe_ethertype_filter {
uint16_t ethertype;
uint32_t etqf;
@@ -128,6 +158,9 @@ struct txgbe_filter_info {
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
/* store used ethertype filters*/
struct txgbe_ethertype_filter ethertype_filters[TXGBE_ETF_ID_MAX];
+ /* Bit mask for every used 5tuple filter */
+ uint32_t fivetuple_mask[TXGBE_5TUPLE_ARRAY_SIZE];
+ struct txgbe_5tuple_filter_list fivetuple_list;
};
/* The configuration of bandwidth */
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 02/37] net/txgbe: support ntuple filter add and delete
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 01/37] net/txgbe: add ntuple filter init and uninit Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 03/37] net/txgbe: add ntuple parse rule Jiawen Wu
` (35 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Support add and delete operations on ntuple filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 310 +++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ethdev.h | 6 +
2 files changed, 316 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 08f19a2ef..fe32194a7 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -109,6 +109,8 @@ static void txgbe_dev_interrupt_handler(void *param);
static void txgbe_dev_interrupt_delayed_handler(void *param);
static void txgbe_configure_msix(struct rte_eth_dev *dev);
+static int txgbe_filter_restore(struct rte_eth_dev *dev);
+
#define TXGBE_SET_HWSTRIP(h, q) do {\
uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
@@ -1611,6 +1613,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
/* resume enabled intr since hw reset */
txgbe_enable_intr(dev);
+ txgbe_filter_restore(dev);
/*
* Update link status right before return, because it may
@@ -3693,6 +3696,293 @@ txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
return 0;
}
+static inline enum txgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+ if (protocol_value == IPPROTO_TCP)
+ return TXGBE_5TF_PROT_TCP;
+ else if (protocol_value == IPPROTO_UDP)
+ return TXGBE_5TF_PROT_UDP;
+ else if (protocol_value == IPPROTO_SCTP)
+ return TXGBE_5TF_PROT_SCTP;
+ else
+ return TXGBE_5TF_PROT_NONE;
+}
+
+/* inject a 5-tuple filter to HW */
+static inline void
+txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+ struct txgbe_5tuple_filter *filter)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int i;
+ uint32_t ftqf, sdpqf;
+ uint32_t l34timir = 0;
+ uint32_t mask = TXGBE_5TFCTL0_MASK;
+
+ i = filter->index;
+ sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
+ sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
+
+ ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
+ ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
+ if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+ mask &= ~TXGBE_5TFCTL0_MSADDR;
+ if (filter->filter_info.dst_ip_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MDADDR;
+ if (filter->filter_info.src_port_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MSPORT;
+ if (filter->filter_info.dst_port_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MDPORT;
+ if (filter->filter_info.proto_mask == 0)
+ mask &= ~TXGBE_5TFCTL0_MPROTO;
+ ftqf |= mask;
+ ftqf |= TXGBE_5TFCTL0_MPOOL;
+ ftqf |= TXGBE_5TFCTL0_ENA;
+
+ wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
+ wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
+ wr32(hw, TXGBE_5TFPORT(i), sdpqf);
+ wr32(hw, TXGBE_5TFCTL0(i), ftqf);
+
+ l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
+ wr32(hw, TXGBE_5TFCTL1(i), l34timir);
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+ struct txgbe_5tuple_filter *filter)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ int i, idx, shift;
+
+ /*
+ * look for an unused 5tuple filter index,
+ * and insert the filter to list.
+ */
+ for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
+ idx = i / (sizeof(uint32_t) * NBBY);
+ shift = i % (sizeof(uint32_t) * NBBY);
+ if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
+ filter_info->fivetuple_mask[idx] |= 1 << shift;
+ filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+ filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= TXGBE_MAX_FTQF_FILTERS) {
+ PMD_DRV_LOG(ERR, "5tuple filters are full.");
+ return -ENOSYS;
+ }
+
+ txgbe_inject_5tuple_filter(dev, filter);
+
+ return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+static void
+txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+ struct txgbe_5tuple_filter *filter)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint16_t index = filter->index;
+
+ filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
+ ~(1 << (index % (sizeof(uint32_t) * NBBY)));
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ rte_free(filter);
+
+ wr32(hw, TXGBE_5TFDADDR(index), 0);
+ wr32(hw, TXGBE_5TFSADDR(index), 0);
+ wr32(hw, TXGBE_5TFPORT(index), 0);
+ wr32(hw, TXGBE_5TFCTL0(index), 0);
+ wr32(hw, TXGBE_5TFCTL1(index), 0);
+}
+
+static inline struct txgbe_5tuple_filter *
+txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
+ struct txgbe_5tuple_filter_info *key)
+{
+ struct txgbe_5tuple_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct txgbe_5tuple_filter_info)) == 0) {
+ return it;
+ }
+ }
+ return NULL;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter
+ * to struct txgbe_5tuple_filter_info
+ */
+static inline int
+ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+ struct txgbe_5tuple_filter_info *filter_info)
+{
+ if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
+ filter->priority > TXGBE_5TUPLE_MAX_PRI ||
+ filter->priority < TXGBE_5TUPLE_MIN_PRI)
+ return -EINVAL;
+
+ switch (filter->dst_ip_mask) {
+ case UINT32_MAX:
+ filter_info->dst_ip_mask = 0;
+ filter_info->dst_ip = filter->dst_ip;
+ break;
+ case 0:
+ filter_info->dst_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_ip_mask) {
+ case UINT32_MAX:
+ filter_info->src_ip_mask = 0;
+ filter_info->src_ip = filter->src_ip;
+ break;
+ case 0:
+ filter_info->src_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->dst_port_mask) {
+ case UINT16_MAX:
+ filter_info->dst_port_mask = 0;
+ filter_info->dst_port = filter->dst_port;
+ break;
+ case 0:
+ filter_info->dst_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_port_mask) {
+ case UINT16_MAX:
+ filter_info->src_port_mask = 0;
+ filter_info->src_port = filter->src_port;
+ break;
+ case 0:
+ filter_info->src_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->proto_mask) {
+ case UINT8_MAX:
+ filter_info->proto_mask = 0;
+ filter_info->proto =
+ convert_protocol_type(filter->proto);
+ break;
+ case 0:
+ filter_info->proto_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ filter_info->priority = (uint8_t)filter->priority;
+ return 0;
+}
+
+/*
+ * add or delete a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter,
+ bool add)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ struct txgbe_5tuple_filter_info filter_5tuple;
+ struct txgbe_5tuple_filter *filter;
+ int ret;
+
+ if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter != NULL && add) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ return -EEXIST;
+ }
+ if (filter == NULL && !add) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ if (add) {
+ filter = rte_zmalloc("txgbe_5tuple_filter",
+ sizeof(struct txgbe_5tuple_filter), 0);
+ if (filter == NULL)
+ return -ENOMEM;
+ rte_memcpy(&filter->filter_info,
+ &filter_5tuple,
+ sizeof(struct txgbe_5tuple_filter_info));
+ filter->queue = ntuple_filter->queue;
+ ret = txgbe_add_5tuple_filter(dev, filter);
+ if (ret < 0) {
+ rte_free(filter);
+ return ret;
+ }
+ } else {
+ txgbe_remove_5tuple_filter(dev, filter);
+ }
+
+ return 0;
+}
+
static u8 *
txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
u8 **mc_addr_ptr, u32 *vmdq)
@@ -4214,6 +4504,26 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
return 0;
}
+/* restore n-tuple filter */
+static inline void
+txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ struct txgbe_5tuple_filter *node;
+
+ TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
+ txgbe_inject_5tuple_filter(dev, node);
+ }
+}
+
+static int
+txgbe_filter_restore(struct rte_eth_dev *dev)
+{
+ txgbe_ntuple_filter_restore(dev);
+
+ return 0;
+}
+
static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure = txgbe_dev_configure,
.dev_infos_get = txgbe_dev_info_get,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 60687bc49..caccf342d 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -39,6 +39,9 @@
#define TXGBE_MAX_QUEUE_NUM_PER_VF 8
+#define TXGBE_5TUPLE_MAX_PRI 7
+#define TXGBE_5TUPLE_MIN_PRI 1
+
#define TXGBE_RSS_OFFLOAD_ALL ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
@@ -311,6 +314,9 @@ int txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
bool txgbe_rss_update_sp(enum txgbe_mac_type mac_type);
+int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter,
+ bool add);
void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 03/37] net/txgbe: add ntuple parse rule
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 01/37] net/txgbe: add ntuple filter init and uninit Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 02/37] net/txgbe: support ntuple filter add and delete Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 04/37] net/txgbe: support ntuple filter remove operaion Jiawen Wu
` (34 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to parse flow for ntuple filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/meson.build | 1 +
drivers/net/txgbe/txgbe_flow.c | 536 +++++++++++++++++++++++++++++++++
2 files changed, 537 insertions(+)
create mode 100644 drivers/net/txgbe/txgbe_flow.c
diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build
index 345dffaf6..45379175d 100644
--- a/drivers/net/txgbe/meson.build
+++ b/drivers/net/txgbe/meson.build
@@ -6,6 +6,7 @@ objs = [base_objs]
sources = files(
'txgbe_ethdev.c',
+ 'txgbe_flow.c',
'txgbe_ptypes.c',
'txgbe_pf.c',
'txgbe_rxtx.c',
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
new file mode 100644
index 000000000..6f8be3b7f
--- /dev/null
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -0,0 +1,536 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "txgbe_logs.h"
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "txgbe_rxtx.h"
+
+#define TXGBE_MIN_N_TUPLE_PRIO 1
+#define TXGBE_MAX_N_TUPLE_PRIO 7
+
+/**
+ * Endless loop will never happen with below assumption
+ * 1. there is at least one no-void item(END)
+ * 2. cur is before END.
+ */
+static inline
+const struct rte_flow_item *next_no_void_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ cur ? cur + 1 : &pattern[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static inline
+const struct rte_flow_action *next_no_void_action(
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action *cur)
+{
+ const struct rte_flow_action *next =
+ cur ? cur + 1 : &actions[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ struct rte_flow_item_eth eth_null;
+ struct rte_flow_item_vlan vlan_null;
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
+ memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+
+ /* the first not void item can be MAC or IPv4 */
+ item = next_no_void_pattern(pattern, NULL);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if ((item->spec || item->mask) &&
+ (memcmp(eth_spec, ð_null,
+ sizeof(struct rte_flow_item_eth)) ||
+ memcmp(eth_mask, ð_null,
+ sizeof(struct rte_flow_item_eth)))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 or Vlan */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* the content should be NULL */
+ if ((item->spec || item->mask) &&
+ (memcmp(vlan_spec, &vlan_null,
+ sizeof(struct rte_flow_item_vlan)) ||
+ memcmp(vlan_mask, &vlan_null,
+ sizeof(struct rte_flow_item_vlan)))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->mask) {
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ ipv4_mask = item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ if ((ipv4_mask->hdr.src_addr != 0 &&
+ ipv4_mask->hdr.src_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.dst_addr != 0 &&
+ ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
+ ipv4_mask->hdr.next_proto_id != 0)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+ }
+
+ /* check if the next not void item is TCP or UDP */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END &&
+ (!item->spec && !item->mask)) {
+ goto action;
+ }
+
+ /* get the TCP/UDP/SCTP info */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END &&
+ (!item->spec || !item->mask)) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ tcp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ if ((tcp_mask->hdr.src_port != 0 &&
+ tcp_mask->hdr.src_port != UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port != 0 &&
+ tcp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ tcp_spec = item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ udp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ if ((udp_mask->hdr.src_port != 0 &&
+ udp_mask->hdr.src_port != UINT16_MAX) ||
+ (udp_mask->hdr.dst_port != 0 &&
+ udp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ sctp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ } else {
+ goto action;
+ }
+
+ /* check if the next not void item is END */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+action:
+
+ /**
+ * n-tuple only supports forwarding,
+ * check if the first not void action is QUEUE.
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -rte_errno;
+ }
+ filter->queue =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+ filter->priority = (uint16_t)attr->priority;
+ if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
+ attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
+ filter->priority = 1;
+
+ return 0;
+}
+
+/* a specific function for txgbe because the flags is specific */
+static int
+txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* txgbe doesn't support tcp flags */
+ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* txgbe doesn't support many priorities */
+ if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
+ filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Priority not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
+ /* fixed value for txgbe */
+ filter->flags = RTE_5TUPLE_FLAGS;
+ return 0;
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 04/37] net/txgbe: support ntuple filter remove operaion
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (2 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 03/37] net/txgbe: add ntuple parse rule Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 05/37] net/txgbe: support ethertype filter add and delete Jiawen Wu
` (33 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Support remove operation on ntuple filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 11 +++++++++++
drivers/net/txgbe/txgbe_ethdev.h | 2 ++
2 files changed, 13 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index fe32194a7..1804802e5 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -4524,6 +4524,17 @@ txgbe_filter_restore(struct rte_eth_dev *dev)
return 0;
}
+/* remove all the n-tuple filters */
+void
+txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ struct txgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ txgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure = txgbe_dev_configure,
.dev_infos_get = txgbe_dev_info_get,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index caccf342d..c82f0c832 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -337,6 +337,8 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
+void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
+
int txgbe_vt_check(struct txgbe_hw *hw);
int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 05/37] net/txgbe: support ethertype filter add and delete
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (3 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 04/37] net/txgbe: support ntuple filter remove operaion Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 06/37] net/txgbe: add ethertype parse rule Jiawen Wu
` (32 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Support add and delete operaions on ethertype filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 111 +++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ethdev.h | 19 ++++++
2 files changed, 130 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 1804802e5..b479c9152 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -3983,6 +3983,77 @@ txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
return 0;
}
+int
+txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint32_t etqf = 0;
+ uint32_t etqs = 0;
+ int ret;
+ struct txgbe_ethertype_filter ethertype_filter;
+
+ if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+ PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+ " ethertype filter.", filter->ether_type);
+ return -EINVAL;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+ return -EINVAL;
+ }
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ PMD_DRV_LOG(ERR, "drop option is unsupported.");
+ return -EINVAL;
+ }
+
+ ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret >= 0 && add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+ filter->ether_type);
+ return -EEXIST;
+ }
+ if (ret < 0 && !add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ if (add) {
+ etqf = TXGBE_ETFLT_ENA;
+ etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
+ etqs |= TXGBE_ETCLS_QPID(filter->queue);
+ etqs |= TXGBE_ETCLS_QENA;
+
+ ethertype_filter.ethertype = filter->ether_type;
+ ethertype_filter.etqf = etqf;
+ ethertype_filter.etqs = etqs;
+ ethertype_filter.conf = FALSE;
+ ret = txgbe_ethertype_filter_insert(filter_info,
+ ðertype_filter);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSPC;
+ }
+ } else {
+ ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+ if (ret < 0)
+ return -ENOSYS;
+ }
+ wr32(hw, TXGBE_ETFLT(ret), etqf);
+ wr32(hw, TXGBE_ETCLS(ret), etqs);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
static u8 *
txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
u8 **mc_addr_ptr, u32 *vmdq)
@@ -4516,10 +4587,30 @@ txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
}
}
+/* restore ethernet type filter */
+static inline void
+txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ int i;
+
+ for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ wr32(hw, TXGBE_ETFLT(i),
+ filter_info->ethertype_filters[i].etqf);
+ wr32(hw, TXGBE_ETCLS(i),
+ filter_info->ethertype_filters[i].etqs);
+ txgbe_flush(hw);
+ }
+ }
+}
+
static int
txgbe_filter_restore(struct rte_eth_dev *dev)
{
txgbe_ntuple_filter_restore(dev);
+ txgbe_ethertype_filter_restore(dev);
return 0;
}
@@ -4535,6 +4626,26 @@ txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
txgbe_remove_5tuple_filter(dev, p_5tuple);
}
+/* remove all the ether type filters */
+void
+txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ int i;
+
+ for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+ if (filter_info->ethertype_mask & (1 << i) &&
+ !filter_info->ethertype_filters[i].conf) {
+ (void)txgbe_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ wr32(hw, TXGBE_ETFLT(i), 0);
+ wr32(hw, TXGBE_ETCLS(i), 0);
+ txgbe_flush(hw);
+ }
+ }
+}
+
static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure = txgbe_dev_configure,
.dev_infos_get = txgbe_dev_info_get,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index c82f0c832..d89af2150 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -317,6 +317,10 @@ bool txgbe_rss_update_sp(enum txgbe_mac_type mac_type);
int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter,
bool add);
+int txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+
void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
@@ -337,6 +341,7 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
+void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
int txgbe_vt_check(struct txgbe_hw *hw);
@@ -382,6 +387,20 @@ txgbe_ethertype_filter_insert(struct txgbe_filter_info *filter_info,
return (i < TXGBE_ETF_ID_MAX ? i : -1);
}
+static inline int
+txgbe_ethertype_filter_remove(struct txgbe_filter_info *filter_info,
+ uint8_t idx)
+{
+ if (idx >= TXGBE_ETF_ID_MAX)
+ return -1;
+ filter_info->ethertype_mask &= ~(1 << idx);
+ filter_info->ethertype_filters[idx].ethertype = 0;
+ filter_info->ethertype_filters[idx].etqf = 0;
+ filter_info->ethertype_filters[idx].etqs = 0;
+ filter_info->ethertype_filters[idx].etqs = FALSE;
+ return idx;
+}
+
/* High threshold controlling when to start sending XOFF frames. */
#define TXGBE_FC_XOFF_HITH 128 /*KB*/
/* Low threshold controlling when to start sending XON frames. */
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 06/37] net/txgbe: add ethertype parse rule
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (4 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 05/37] net/txgbe: support ethertype filter add and delete Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 07/37] net/txgbe: support syn filter add and delete Jiawen Wu
` (31 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to parse flow for ethertype filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_flow.c | 250 +++++++++++++++++++++++++++++++++
1 file changed, 250 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 6f8be3b7f..fc2505ddc 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -534,3 +534,253 @@ txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ item = next_no_void_pattern(pattern, NULL);
+ /* The first non-void item should be MAC. */
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!rte_is_zero_ether_addr(ð_mask->src) ||
+ (!rte_is_zero_ether_addr(ð_mask->dst) &&
+ !rte_is_broadcast_ether_addr(ð_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ /* Parse action */
+
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* Parse attr */
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = cons_parse_ethertype_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* txgbe doesn't support MAC address. */
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= dev->data->nb_rx_queues) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue index much too big");
+ return -rte_errno;
+ }
+
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "mac compare is unsupported");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "drop option is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 07/37] net/txgbe: support syn filter add and delete
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (5 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 06/37] net/txgbe: add ethertype parse rule Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 08/37] net/txgbe: add syn filter parse rule Jiawen Wu
` (30 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Support add and delete operations on syn filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 70 ++++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ethdev.h | 6 +++
2 files changed, 76 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index b479c9152..e2599e429 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -3696,6 +3696,44 @@ txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
return 0;
}
+int
+txgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint32_t syn_info;
+ uint32_t synqf;
+
+ if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ syn_info = filter_info->syn_info;
+
+ if (add) {
+ if (syn_info & TXGBE_SYNCLS_ENA)
+ return -EINVAL;
+ synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
+ synqf |= TXGBE_SYNCLS_ENA;
+
+ if (filter->hig_pri)
+ synqf |= TXGBE_SYNCLS_HIPRIO;
+ else
+ synqf &= ~TXGBE_SYNCLS_HIPRIO;
+ } else {
+ synqf = rd32(hw, TXGBE_SYNCLS);
+ if (!(syn_info & TXGBE_SYNCLS_ENA))
+ return -ENOENT;
+ synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
+ }
+
+ filter_info->syn_info = synqf;
+ wr32(hw, TXGBE_SYNCLS, synqf);
+ txgbe_flush(hw);
+ return 0;
+}
+
static inline enum txgbe_5tuple_protocol
convert_protocol_type(uint8_t protocol_value)
{
@@ -4606,11 +4644,28 @@ txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
}
}
+/* restore SYN filter */
+static inline void
+txgbe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+ uint32_t synqf;
+
+ synqf = filter_info->syn_info;
+
+ if (synqf & TXGBE_SYNCLS_ENA) {
+ wr32(hw, TXGBE_SYNCLS, synqf);
+ txgbe_flush(hw);
+ }
+}
+
static int
txgbe_filter_restore(struct rte_eth_dev *dev)
{
txgbe_ntuple_filter_restore(dev);
txgbe_ethertype_filter_restore(dev);
+ txgbe_syn_filter_restore(dev);
return 0;
}
@@ -4646,6 +4701,21 @@ txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
}
}
+/* remove the SYN filter */
+void
+txgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
+ filter_info->syn_info = 0;
+
+ wr32(hw, TXGBE_SYNCLS, 0);
+ txgbe_flush(hw);
+ }
+}
+
static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure = txgbe_dev_configure,
.dev_infos_get = txgbe_dev_info_get,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index d89af2150..bb53dd74a 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -164,6 +164,8 @@ struct txgbe_filter_info {
/* Bit mask for every used 5tuple filter */
uint32_t fivetuple_mask[TXGBE_5TUPLE_ARRAY_SIZE];
struct txgbe_5tuple_filter_list fivetuple_list;
+ /* store the SYN filter info */
+ uint32_t syn_info;
};
/* The configuration of bandwidth */
@@ -320,6 +322,9 @@ int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
int txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add);
+int txgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
@@ -343,6 +348,7 @@ uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
+void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
int txgbe_vt_check(struct txgbe_hw *hw);
int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 08/37] net/txgbe: add syn filter parse rule
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (6 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 07/37] net/txgbe: support syn filter add and delete Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 09/37] net/txgbe: add L2 tunnel filter init and uninit Jiawen Wu
` (29 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to parse flow for syn filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_flow.c | 256 +++++++++++++++++++++++++++++++++
1 file changed, 256 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index fc2505ddc..7110e594b 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -784,3 +784,259 @@ txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Parse the rule to see if it is a TCP SYN rule.
+ * And get the TCP SYN filter info BTW.
+ * pattern:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4 or IPV6.
+ * The third not void item must be TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * TCP tcp_flags 0x02 0xFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_action_queue *act_q;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+
+ /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+ item = next_no_void_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* if the item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN address mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is IPv4 or IPv6 */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* if the item is IP, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is TCP */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. Only support SYN. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+ if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
+ tcp_mask->hdr.src_port ||
+ tcp_mask->hdr.dst_port ||
+ tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the first not void action is QUEUE. */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Support 2 priorities, the lowest or highest. */
+ if (!attr->priority) {
+ filter->hig_pri = 0;
+ } else if (attr->priority == (uint32_t)~0U) {
+ filter->hig_pri = 1;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_parse_syn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = cons_parse_syn_filter(attr, pattern,
+ actions, filter, error);
+
+ if (filter->queue >= dev->data->nb_rx_queues)
+ return -rte_errno;
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 09/37] net/txgbe: add L2 tunnel filter init and uninit
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (7 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 08/37] net/txgbe: add syn filter parse rule Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 10/37] net/txgbe: config L2 tunnel filter with e-tag Jiawen Wu
` (28 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add L2 tunnel filter init and uninit.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 65 ++++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ethdev.h | 32 ++++++++++++++++
2 files changed, 97 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index e2599e429..79f1f9535 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -88,6 +88,8 @@ static const struct reg_info *txgbe_regs_others[] = {
txgbe_regs_diagnostic,
NULL};
+static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
+static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
static int txgbe_dev_close(struct rte_eth_dev *dev);
@@ -687,6 +689,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
+ /* initialize l2 tunnel filter list & hash */
+ txgbe_l2_tn_filter_init(eth_dev);
+
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
@@ -723,6 +728,63 @@ static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
+ struct txgbe_l2_tn_filter *l2_tn_filter;
+
+ if (l2_tn_info->hash_map)
+ rte_free(l2_tn_info->hash_map);
+ if (l2_tn_info->hash_handle)
+ rte_hash_free(l2_tn_info->hash_handle);
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
+ l2_tn_filter,
+ entries);
+ rte_free(l2_tn_filter);
+ }
+
+ return 0;
+}
+
+static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
+ char l2_tn_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters l2_tn_hash_params = {
+ .name = l2_tn_hash_name,
+ .entries = TXGBE_MAX_L2_TN_FILTER_NUM,
+ .key_len = sizeof(struct txgbe_l2_tn_key),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&l2_tn_info->l2_tn_list);
+ snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
+ "l2_tn_%s", TDEV_NAME(eth_dev));
+ l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
+ if (!l2_tn_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
+ return -EINVAL;
+ }
+ l2_tn_info->hash_map = rte_zmalloc("txgbe",
+ sizeof(struct txgbe_l2_tn_filter *) *
+ TXGBE_MAX_L2_TN_FILTER_NUM,
+ 0);
+ if (!l2_tn_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for L2 TN hash map!");
+ return -ENOMEM;
+ }
+ l2_tn_info->e_tag_en = FALSE;
+ l2_tn_info->e_tag_fwd_en = FALSE;
+ l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
+
+ return 0;
+}
+
static int
eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
@@ -1802,6 +1864,9 @@ txgbe_dev_close(struct rte_eth_dev *dev)
rte_free(dev->data->hash_mac_addrs);
dev->data->hash_mac_addrs = NULL;
+ /* remove all the L2 tunnel filters & hash */
+ txgbe_l2_tn_filter_uninit(dev);
+
/* Remove all ntuple filters of the device */
txgbe_ntuple_filter_uninit(dev);
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index bb53dd74a..bd4ddab5a 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -9,7 +9,10 @@
#include "base/txgbe.h"
#include "txgbe_ptypes.h"
+#include <rte_flow.h>
#include <rte_time.h>
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
/* need update link, bit flag */
#define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -56,6 +59,8 @@
#define TXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define TXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+#define TXGBE_MAX_L2_TN_FILTER_NUM 128
+
/* structure for interrupt relative data */
struct txgbe_interrupt {
uint32_t flags;
@@ -168,6 +173,28 @@ struct txgbe_filter_info {
uint32_t syn_info;
};
+struct txgbe_l2_tn_key {
+ enum rte_eth_tunnel_type l2_tn_type;
+ uint32_t tn_id;
+};
+
+struct txgbe_l2_tn_filter {
+ TAILQ_ENTRY(txgbe_l2_tn_filter) entries;
+ struct txgbe_l2_tn_key key;
+ uint32_t pool;
+};
+
+TAILQ_HEAD(txgbe_l2_tn_filter_list, txgbe_l2_tn_filter);
+
+struct txgbe_l2_tn_info {
+ struct txgbe_l2_tn_filter_list l2_tn_list;
+ struct txgbe_l2_tn_filter **hash_map;
+ struct rte_hash *hash_handle;
+ bool e_tag_en; /* e-tag enabled */
+ bool e_tag_fwd_en; /* e-tag based forwarding enabled */
+ uint16_t e_tag_ether_type; /* ether type for e-tag */
+};
+
/* The configuration of bandwidth */
struct txgbe_bw_conf {
uint8_t tc_num; /* Number of TCs. */
@@ -188,6 +215,7 @@ struct txgbe_adapter {
struct txgbe_vf_info *vfdata;
struct txgbe_uta_info uta_info;
struct txgbe_filter_info filter;
+ struct txgbe_l2_tn_info l2_tn;
struct txgbe_bw_conf bw_conf;
bool rx_bulk_alloc_allowed;
struct rte_timecounter systime_tc;
@@ -233,6 +261,10 @@ struct txgbe_adapter {
#define TXGBE_DEV_FILTER(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->filter)
+
+#define TXGBE_DEV_L2_TN(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->l2_tn)
+
#define TXGBE_DEV_BW_CONF(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf)
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 10/37] net/txgbe: config L2 tunnel filter with e-tag
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (8 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 09/37] net/txgbe: add L2 tunnel filter init and uninit Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 11/37] net/txgbe: support L2 tunnel filter add and delete Jiawen Wu
` (27 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Config L2 tunnel filter with e-tag.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 63 ++++++++++++++++++++++++++++++++
1 file changed, 63 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 79f1f9535..0be894b7b 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -112,6 +112,7 @@ static void txgbe_dev_interrupt_delayed_handler(void *param);
static void txgbe_configure_msix(struct rte_eth_dev *dev);
static int txgbe_filter_restore(struct rte_eth_dev *dev);
+static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
#define TXGBE_SET_HWSTRIP(h, q) do {\
uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
@@ -1675,6 +1676,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
/* resume enabled intr since hw reset */
txgbe_enable_intr(dev);
+ txgbe_l2_tunnel_conf(dev);
txgbe_filter_restore(dev);
/*
@@ -4678,6 +4680,52 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
return 0;
}
+/* Update e-tag ether type */
+static int
+txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
+ uint16_t ether_type)
+{
+ uint32_t etag_etype;
+
+ etag_etype = rd32(hw, TXGBE_EXTAG);
+ etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
+ etag_etype |= ether_type;
+ wr32(hw, TXGBE_EXTAG, etag_etype);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+/* Enable e-tag tunnel */
+static int
+txgbe_e_tag_enable(struct txgbe_hw *hw)
+{
+ uint32_t etag_etype;
+
+ etag_etype = rd32(hw, TXGBE_PORTCTL);
+ etag_etype |= TXGBE_PORTCTL_ETAG;
+ wr32(hw, TXGBE_PORTCTL, etag_etype);
+ txgbe_flush(hw);
+
+ return 0;
+}
+
+static int
+txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
+{
+ int ret = 0;
+ uint32_t ctrl;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ ctrl = rd32(hw, TXGBE_POOLCTL);
+ ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
+ if (en)
+ ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
+ wr32(hw, TXGBE_POOLCTL, ctrl);
+
+ return ret;
+}
+
/* restore n-tuple filter */
static inline void
txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
@@ -4735,6 +4783,21 @@ txgbe_filter_restore(struct rte_eth_dev *dev)
return 0;
}
+static void
+txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (l2_tn_info->e_tag_en)
+ (void)txgbe_e_tag_enable(hw);
+
+ if (l2_tn_info->e_tag_fwd_en)
+ (void)txgbe_e_tag_forwarding_en_dis(dev, 1);
+
+ (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
/* remove all the n-tuple filters */
void
txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 11/37] net/txgbe: support L2 tunnel filter add and delete
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (9 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 10/37] net/txgbe: config L2 tunnel filter with e-tag Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 12/37] net/txgbe: add L2 tunnel filter parse rule Jiawen Wu
` (26 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Support L2 tunnel filter add and delete operations.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 230 +++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ethdev.h | 18 +++
2 files changed, 248 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 0be894b7b..94fa03182 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -4710,6 +4710,219 @@ txgbe_e_tag_enable(struct txgbe_hw *hw)
return 0;
}
+static int
+txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
+ struct txgbe_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t i, rar_entries;
+ uint32_t rar_low, rar_high;
+
+ rar_entries = hw->mac.num_rar_entries;
+
+ for (i = 1; i < rar_entries; i++) {
+ wr32(hw, TXGBE_ETHADDRIDX, i);
+ rar_high = rd32(hw, TXGBE_ETHADDRH);
+ rar_low = rd32(hw, TXGBE_ETHADDRL);
+ if ((rar_high & TXGBE_ETHADDRH_VLD) &&
+ (rar_high & TXGBE_ETHADDRH_ETAG) &&
+ (TXGBE_ETHADDRL_ETAG(rar_low) ==
+ l2_tunnel->tunnel_id)) {
+ wr32(hw, TXGBE_ETHADDRL, 0);
+ wr32(hw, TXGBE_ETHADDRH, 0);
+
+ txgbe_clear_vmdq(hw, i, BIT_MASK32);
+
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int
+txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
+ struct txgbe_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t i, rar_entries;
+ uint32_t rar_low, rar_high;
+
+ /* One entry for one tunnel. Try to remove potential existing entry. */
+ txgbe_e_tag_filter_del(dev, l2_tunnel);
+
+ rar_entries = hw->mac.num_rar_entries;
+
+ for (i = 1; i < rar_entries; i++) {
+ wr32(hw, TXGBE_ETHADDRIDX, i);
+ rar_high = rd32(hw, TXGBE_ETHADDRH);
+ if (rar_high & TXGBE_ETHADDRH_VLD) {
+ continue;
+ } else {
+ txgbe_set_vmdq(hw, i, l2_tunnel->pool);
+ rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
+ rar_low = l2_tunnel->tunnel_id;
+
+ wr32(hw, TXGBE_ETHADDRL, rar_low);
+ wr32(hw, TXGBE_ETHADDRH, rar_high);
+
+ return ret;
+ }
+ }
+
+ PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
+ " Please remove a rule before adding a new one.");
+ return -EINVAL;
+}
+
+static inline struct txgbe_l2_tn_filter *
+txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
+ struct txgbe_l2_tn_key *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+ struct txgbe_l2_tn_filter *l2_tn_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(l2_tn_info->hash_handle,
+ &l2_tn_filter->key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert L2 tunnel filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+ TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+ return 0;
+}
+
+static inline int
+txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+ struct txgbe_l2_tn_key *key)
+{
+ int ret;
+ struct txgbe_l2_tn_filter *l2_tn_filter;
+
+ ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "No such L2 tunnel filter to delete %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_filter = l2_tn_info->hash_map[ret];
+ l2_tn_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+ rte_free(l2_tn_filter);
+
+ return 0;
+}
+
+/* Add l2 tunnel filter */
+int
+txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct txgbe_l2_tunnel_conf *l2_tunnel,
+ bool restore)
+{
+ int ret;
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_l2_tn_key key;
+ struct txgbe_l2_tn_filter *node;
+
+ if (!restore) {
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+
+ node = txgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+ if (node) {
+ PMD_DRV_LOG(ERR,
+ "The L2 tunnel filter already exists!");
+ return -EINVAL;
+ }
+
+ node = rte_zmalloc("txgbe_l2_tn",
+ sizeof(struct txgbe_l2_tn_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+
+ rte_memcpy(&node->key,
+ &key,
+ sizeof(struct txgbe_l2_tn_key));
+ node->pool = l2_tunnel->pool;
+ ret = txgbe_insert_l2_tn_filter(l2_tn_info, node);
+ if (ret < 0) {
+ rte_free(node);
+ return ret;
+ }
+ }
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = txgbe_e_tag_filter_add(dev, l2_tunnel);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!restore && ret < 0)
+ (void)txgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
+ return ret;
+}
+
+/* Delete l2 tunnel filter */
+int
+txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct txgbe_l2_tunnel_conf *l2_tunnel)
+{
+ int ret;
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_l2_tn_key key;
+
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+ ret = txgbe_remove_l2_tn_filter(l2_tn_info, &key);
+ if (ret < 0)
+ return ret;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = txgbe_e_tag_filter_del(dev, l2_tunnel);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static int
txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
{
@@ -4773,12 +4986,29 @@ txgbe_syn_filter_restore(struct rte_eth_dev *dev)
}
}
+/* restore L2 tunnel filter */
+static inline void
+txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_l2_tn_filter *node;
+ struct txgbe_l2_tunnel_conf l2_tn_conf;
+
+ TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
+ l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = node->key.tn_id;
+ l2_tn_conf.pool = node->pool;
+ (void)txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
+ }
+}
+
static int
txgbe_filter_restore(struct rte_eth_dev *dev)
{
txgbe_ntuple_filter_restore(dev);
txgbe_ethertype_filter_restore(dev);
txgbe_syn_filter_restore(dev);
+ txgbe_l2_tn_filter_restore(dev);
return 0;
}
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index bd4ddab5a..577fc616c 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -358,6 +358,24 @@ int txgbe_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add);
+/**
+ * l2 tunnel configuration.
+ */
+struct txgbe_l2_tunnel_conf {
+ enum rte_eth_tunnel_type l2_tunnel_type;
+ uint16_t ether_type; /* ether type in l2 header */
+ uint32_t tunnel_id; /* port tag id for e-tag */
+ uint16_t vf_id; /* VF id for tag insertion */
+ uint32_t pool; /* destination pool for tag based forwarding */
+};
+
+int
+txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct txgbe_l2_tunnel_conf *l2_tunnel,
+ bool restore);
+int
+txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct txgbe_l2_tunnel_conf *l2_tunnel);
void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 12/37] net/txgbe: add L2 tunnel filter parse rule
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (10 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 11/37] net/txgbe: support L2 tunnel filter add and delete Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 13/37] net/txgbe: add FDIR filter init and uninit Jiawen Wu
` (25 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to parse flow for L2 tunnel filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_flow.c | 202 +++++++++++++++++++++++++++++++++
1 file changed, 202 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 7110e594b..8589e3328 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -15,6 +15,8 @@
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_bus_pci.h>
+#include <rte_hash_crc.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
@@ -1040,3 +1042,203 @@ txgbe_parse_syn_filter(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * Parse the rule to see if it is a L2 tunnel rule.
+ * And get the L2 tunnel filter info BTW.
+ * Only support E-tag now.
+ * pattern:
+ * The first not void item can be E_TAG.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be VF or PF.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * E_TAG grp 0x1 0x3
+ e_cid_base 0x309 0xFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct txgbe_l2_tunnel_conf *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_e_tag *e_tag_spec;
+ const struct rte_flow_item_e_tag *e_tag_mask;
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_vf *act_vf;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* The first not void item should be e-tag. */
+ item = next_no_void_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
+ memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ e_tag_spec = item->spec;
+ e_tag_mask = item->mask;
+
+ /* Only care about GRP and E cid base. */
+ if (e_tag_mask->epcp_edei_in_ecid_b ||
+ e_tag_mask->in_ecid_e ||
+ e_tag_mask->ecid_e ||
+ e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
+ memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ /**
+ * grp and e_cid_base are bit fields and only use 14 bits.
+ * e-tag id is taken as little endian by HW.
+ */
+ filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
+
+ /* check if the next not void item is END */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* check if the first not void action is VF or PF. */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
+ act->type != RTE_FLOW_ACTION_TYPE_PF) {
+ memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ filter->pool = act_vf->id;
+ } else {
+ filter->pool = pci_dev->max_vfs;
+ }
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct txgbe_l2_tunnel_conf *l2_tn_filter,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ret = cons_parse_l2_tn_filter(dev, attr, pattern,
+ actions, l2_tn_filter, error);
+
+ memset(l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by L2 tunnel filter");
+ ret = -rte_errno;
+ return ret;
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 13/37] net/txgbe: add FDIR filter init and uninit.
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (11 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 12/37] net/txgbe: add L2 tunnel filter parse rule Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 14/37] net/txgbe: configure FDIR filter Jiawen Wu
` (24 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add flow director filter init and uninit operations.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/base/txgbe_type.h | 29 +++++++++++++
drivers/net/txgbe/txgbe_ethdev.c | 63 +++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ethdev.h | 51 +++++++++++++++++++++++
3 files changed, 143 insertions(+)
diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index 69aa8993a..b9d31ab83 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -67,6 +67,35 @@ enum {
#define TXGBE_ATR_HASH_MASK 0x7fff
+/* Flow Director ATR input struct. */
+struct txgbe_atr_input {
+ /*
+ * Byte layout in order, all values with MSB first:
+ *
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
+ * vlan_id - 2 bytes
+ * src_ip - 16 bytes
+ * inner_mac - 6 bytes
+ * cloud_mode - 2 bytes
+ * tni_vni - 4 bytes
+ * dst_ip - 16 bytes
+ * src_port - 2 bytes
+ * dst_port - 2 bytes
+ * flex_bytes - 2 bytes
+ * bkt_hash - 2 bytes
+ */
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 pkt_type;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 bkt_hash;
+};
+
enum txgbe_eeprom_type {
txgbe_eeprom_unknown = 0,
txgbe_eeprom_spi,
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 94fa03182..4f4e51fe1 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -88,6 +88,8 @@ static const struct reg_info *txgbe_regs_others[] = {
txgbe_regs_diagnostic,
NULL};
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
@@ -690,6 +692,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
+ /* initialize flow director filter list & hash */
+ txgbe_fdir_filter_init(eth_dev);
+
/* initialize l2 tunnel filter list & hash */
txgbe_l2_tn_filter_init(eth_dev);
@@ -729,6 +734,26 @@ static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+ struct txgbe_fdir_filter *fdir_filter;
+
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_handle)
+ rte_hash_free(fdir_info->hash_handle);
+
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ return 0;
+}
+
static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
{
struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
@@ -749,6 +774,41 @@ static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = TXGBE_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct txgbe_atr_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", TDEV_NAME(eth_dev));
+ fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("txgbe",
+ sizeof(struct txgbe_fdir_filter *) *
+ TXGBE_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ return -ENOMEM;
+ }
+ fdir_info->mask_added = FALSE;
+
+ return 0;
+}
+
static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
{
struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
@@ -1866,6 +1926,9 @@ txgbe_dev_close(struct rte_eth_dev *dev)
rte_free(dev->data->hash_mac_addrs);
dev->data->hash_mac_addrs = NULL;
+ /* remove all the fdir filters & hash */
+ txgbe_fdir_filter_uninit(dev);
+
/* remove all the L2 tunnel filters & hash */
txgbe_l2_tn_filter_uninit(dev);
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 577fc616c..059b0164b 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -59,8 +59,55 @@
#define TXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define TXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+#define TXGBE_MAX_FDIR_FILTER_NUM (1024 * 32)
#define TXGBE_MAX_L2_TN_FILTER_NUM 128
+/*
+ * Information about the fdir mode.
+ */
+struct txgbe_hw_fdir_mask {
+ uint16_t vlan_tci_mask;
+ uint32_t src_ipv4_mask;
+ uint32_t dst_ipv4_mask;
+ uint16_t src_ipv6_mask;
+ uint16_t dst_ipv6_mask;
+ uint16_t src_port_mask;
+ uint16_t dst_port_mask;
+ uint16_t flex_bytes_mask;
+ uint8_t mac_addr_byte_mask;
+ uint32_t tunnel_id_mask;
+ uint8_t tunnel_type_mask;
+};
+
+struct txgbe_fdir_filter {
+ TAILQ_ENTRY(txgbe_fdir_filter) entries;
+ struct txgbe_atr_input input; /* key of fdir filter*/
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t fdirhash; /* hash value for fdir */
+ uint8_t queue; /* assigned rx queue */
+};
+
+/* list of fdir filters */
+TAILQ_HEAD(txgbe_fdir_filter_list, txgbe_fdir_filter);
+
+struct txgbe_hw_fdir_info {
+ struct txgbe_hw_fdir_mask mask;
+ uint8_t flex_bytes_offset;
+ uint16_t collision;
+ uint16_t free;
+ uint16_t maxhash;
+ uint8_t maxlen;
+ uint64_t add;
+ uint64_t remove;
+ uint64_t f_add;
+ uint64_t f_remove;
+ struct txgbe_fdir_filter_list fdir_list; /* filter list*/
+ /* store the pointers of the filters, index is the hash value. */
+ struct txgbe_fdir_filter **hash_map;
+ struct rte_hash *hash_handle; /* cuckoo hash handler */
+ bool mask_added; /* If already got mask from consistent filter */
+};
+
/* structure for interrupt relative data */
struct txgbe_interrupt {
uint32_t flags;
@@ -206,6 +253,7 @@ struct txgbe_bw_conf {
struct txgbe_adapter {
struct txgbe_hw hw;
struct txgbe_hw_stats stats;
+ struct txgbe_hw_fdir_info fdir;
struct txgbe_interrupt intr;
struct txgbe_stat_mappings stat_mappings;
struct txgbe_vfta shadow_vfta;
@@ -238,6 +286,9 @@ struct txgbe_adapter {
#define TXGBE_DEV_INTR(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->intr)
+#define TXGBE_DEV_FDIR(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->fdir)
+
#define TXGBE_DEV_STAT_MAPPINGS(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->stat_mappings)
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 14/37] net/txgbe: configure FDIR filter
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (12 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 13/37] net/txgbe: add FDIR filter init and uninit Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 15/37] net/txgbe: support FDIR add and delete operations Jiawen Wu
` (23 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Configure flow director filter with it enabled.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/base/txgbe_type.h | 6 +
drivers/net/txgbe/meson.build | 1 +
drivers/net/txgbe/txgbe_ethdev.c | 6 +
drivers/net/txgbe/txgbe_ethdev.h | 6 +
drivers/net/txgbe/txgbe_fdir.c | 407 ++++++++++++++++++++++++++++
5 files changed, 426 insertions(+)
create mode 100644 drivers/net/txgbe/txgbe_fdir.c
diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index b9d31ab83..633692cd7 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -21,6 +21,8 @@
#define TXGBE_MAX_QP (128)
#define TXGBE_MAX_UTA 128
+#define TXGBE_FDIR_INIT_DONE_POLL 10
+
#define TXGBE_ALIGN 128 /* as intel did */
#include "txgbe_status.h"
@@ -65,6 +67,10 @@ enum {
#define TXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
#define TXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+/* Software ATR hash keys */
+#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+
#define TXGBE_ATR_HASH_MASK 0x7fff
/* Flow Director ATR input struct. */
diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build
index 45379175d..bb1683631 100644
--- a/drivers/net/txgbe/meson.build
+++ b/drivers/net/txgbe/meson.build
@@ -6,6 +6,7 @@ objs = [base_objs]
sources = files(
'txgbe_ethdev.c',
+ 'txgbe_fdir.c',
'txgbe_flow.c',
'txgbe_ptypes.c',
'txgbe_pf.c',
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 4f4e51fe1..df2efe80f 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -1633,6 +1633,12 @@ txgbe_dev_start(struct rte_eth_dev *dev)
txgbe_configure_port(dev);
txgbe_configure_dcb(dev);
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ err = txgbe_fdir_configure(dev);
+ if (err)
+ goto error;
+ }
+
/* Restore vf rate limit */
if (vfinfo != NULL) {
for (vf = 0; vf < pci_dev->max_vfs; vf++)
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 059b0164b..5a5d9c4d3 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -13,6 +13,7 @@
#include <rte_time.h>
#include <rte_hash.h>
#include <rte_hash_crc.h>
+#include <rte_ethdev.h>
/* need update link, bit flag */
#define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -430,6 +431,11 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
+/*
+ * Flow director function prototypes
+ */
+int txgbe_fdir_configure(struct rte_eth_dev *dev);
+int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
void txgbe_configure_pb(struct rte_eth_dev *dev);
void txgbe_configure_port(struct rte_eth_dev *dev);
void txgbe_configure_dcb(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
new file mode 100644
index 000000000..df6125d4a
--- /dev/null
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+
+#include "txgbe_logs.h"
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+
+#define TXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /*default flexbytes offset in bytes*/
+#define TXGBE_MAX_FLX_SOURCE_OFF 62
+
+#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
+ uint8_t ipv6_addr[16]; \
+ uint8_t i; \
+ rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
+ (ipv6m) = 0; \
+ for (i = 0; i < sizeof(ipv6_addr); i++) { \
+ if (ipv6_addr[i] == UINT8_MAX) \
+ (ipv6m) |= 1 << i; \
+ else if (ipv6_addr[i] != 0) { \
+ PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
+ return -EINVAL; \
+ } \
+ } \
+} while (0)
+
+#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
+ uint8_t ipv6_addr[16]; \
+ uint8_t i; \
+ for (i = 0; i < sizeof(ipv6_addr); i++) { \
+ if ((ipv6m) & (1 << i)) \
+ ipv6_addr[i] = UINT8_MAX; \
+ else \
+ ipv6_addr[i] = 0; \
+ } \
+ rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
+} while (0)
+
+/**
+ * Initialize Flow Director control registers
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+static int
+txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl)
+{
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Prime the keys for hashing */
+ wr32(hw, TXGBE_FDIRBKTHKEY, TXGBE_ATR_BUCKET_HASH_KEY);
+ wr32(hw, TXGBE_FDIRSIGHKEY, TXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= TXGBE_FDIRCTL_MAXLEN(0xA) |
+ TXGBE_FDIRCTL_FULLTHR(4);
+
+ /*
+ * Poll init-done after we write the register. Estimated times:
+ * 10G: PBALLOC = 11b, timing is 60us
+ * 1G: PBALLOC = 11b, timing is 600us
+ * 100M: PBALLOC = 11b, timing is 6ms
+ *
+ * Multiple these timings by 4 if under full Rx load
+ *
+ * So we'll poll for TXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+ * 1 msec per poll time. If we're at line rate and drop to 100M, then
+ * this might not finish in our poll time, but we can live with that
+ * for now.
+ */
+ wr32(hw, TXGBE_FDIRCTL, fdirctrl);
+ txgbe_flush(hw);
+ for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (rd32(hw, TXGBE_FDIRCTL) & TXGBE_FDIRCTL_INITDONE)
+ break;
+ msec_delay(1);
+ }
+
+ if (i >= TXGBE_FDIR_INIT_DONE_POLL) {
+ PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * Set appropriate bits in fdirctrl for: variable reporting levels, moving
+ * flexbytes matching field, and drop queue (only for perfect matching mode).
+ */
+static inline int
+configure_fdir_flags(const struct rte_fdir_conf *conf,
+ uint32_t *fdirctrl, uint32_t *flex)
+{
+ *fdirctrl = 0;
+ *flex = 0;
+
+ switch (conf->pballoc) {
+ case RTE_FDIR_PBALLOC_64K:
+ /* 8k - 1 signature filters */
+ *fdirctrl |= TXGBE_FDIRCTL_BUF_64K;
+ break;
+ case RTE_FDIR_PBALLOC_128K:
+ /* 16k - 1 signature filters */
+ *fdirctrl |= TXGBE_FDIRCTL_BUF_128K;
+ break;
+ case RTE_FDIR_PBALLOC_256K:
+ /* 32k - 1 signature filters */
+ *fdirctrl |= TXGBE_FDIRCTL_BUF_256K;
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
+ return -EINVAL;
+ };
+
+ /* status flags: write hash & swindex in the rx descriptor */
+ switch (conf->status) {
+ case RTE_FDIR_NO_REPORT_STATUS:
+ /* do nothing, default mode */
+ break;
+ case RTE_FDIR_REPORT_STATUS:
+ /* report status when the packet matches a fdir rule */
+ *fdirctrl |= TXGBE_FDIRCTL_REPORT_MATCH;
+ break;
+ case RTE_FDIR_REPORT_STATUS_ALWAYS:
+ /* always report status */
+ *fdirctrl |= TXGBE_FDIRCTL_REPORT_ALWAYS;
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
+ return -EINVAL;
+ };
+
+ *flex |= TXGBE_FDIRFLEXCFG_BASE_MAC;
+ *flex |= TXGBE_FDIRFLEXCFG_OFST(TXGBE_DEFAULT_FLEXBYTES_OFFSET / 2);
+
+ switch (conf->mode) {
+ case RTE_FDIR_MODE_SIGNATURE:
+ break;
+ case RTE_FDIR_MODE_PERFECT:
+ *fdirctrl |= TXGBE_FDIRCTL_PERFECT;
+ *fdirctrl |= TXGBE_FDIRCTL_DROPQP(conf->drop_queue);
+ break;
+ default:
+ /* bad value */
+ PMD_INIT_LOG(ERR, "Invalid fdir_conf->mode value");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline uint32_t
+reverse_fdir_bmks(uint16_t hi_dword, uint16_t lo_dword)
+{
+ uint32_t mask = hi_dword << 16;
+
+ mask |= lo_dword;
+ mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+ mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+ mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+ return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+int
+txgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+ /*
+ * mask VM pool and DIPv6 since there are currently not supported
+ * mask FLEX byte, it will be set in flex_conf
+ */
+ uint32_t fdirm = TXGBE_FDIRMSK_POOL;
+ uint32_t fdirtcpm; /* TCP source and destination port masks. */
+ uint32_t fdiripv6m; /* IPv6 source and destination masks. */
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (mode != RTE_FDIR_MODE_SIGNATURE &&
+ mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+ }
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ */
+ if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0) {
+ /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+ fdirm |= TXGBE_FDIRMSK_L4P;
+ }
+
+ /* TBD: don't support encapsulation yet */
+ wr32(hw, TXGBE_FDIRMSK, fdirm);
+
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = reverse_fdir_bmks(rte_be_to_cpu_16(info->mask.dst_port_mask),
+ rte_be_to_cpu_16(info->mask.src_port_mask));
+
+ /* write all the same so that UDP, TCP and SCTP use the same mask
+ * (little-endian)
+ */
+ wr32(hw, TXGBE_FDIRTCPMSK, ~fdirtcpm);
+ wr32(hw, TXGBE_FDIRUDPMSK, ~fdirtcpm);
+ wr32(hw, TXGBE_FDIRSCTPMSK, ~fdirtcpm);
+
+ /* Store source and destination IPv4 masks (big-endian) */
+ wr32(hw, TXGBE_FDIRSIP4MSK, ~info->mask.src_ipv4_mask);
+ wr32(hw, TXGBE_FDIRDIP4MSK, ~info->mask.dst_ipv4_mask);
+
+ if (mode == RTE_FDIR_MODE_SIGNATURE) {
+ /*
+ * Store source and destination IPv6 masks (bit reversed)
+ */
+ fdiripv6m = TXGBE_FDIRIP6MSK_DST(info->mask.dst_ipv6_mask) |
+ TXGBE_FDIRIP6MSK_SRC(info->mask.src_ipv6_mask);
+
+ wr32(hw, TXGBE_FDIRIP6MSK, ~fdiripv6m);
+ }
+
+ return 0;
+}
+
+static int
+txgbe_fdir_store_input_mask(struct rte_eth_dev *dev)
+{
+ struct rte_eth_fdir_masks *input_mask =
+ &dev->data->dev_conf.fdir_conf.mask;
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ uint16_t dst_ipv6m = 0;
+ uint16_t src_ipv6m = 0;
+
+ if (mode != RTE_FDIR_MODE_SIGNATURE &&
+ mode != RTE_FDIR_MODE_PERFECT) {
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+ }
+
+ memset(&info->mask, 0, sizeof(struct txgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.src_port_mask = input_mask->src_port_mask;
+ info->mask.dst_port_mask = input_mask->dst_port_mask;
+ info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+ info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+ info->mask.src_ipv6_mask = src_ipv6m;
+ info->mask.dst_ipv6_mask = dst_ipv6m;
+
+ return 0;
+}
+
+/*
+ * txgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
+ * arguments are valid
+ */
+static int
+txgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, uint32_t flex)
+{
+ const struct rte_eth_fdir_flex_conf *conf =
+ &dev->data->dev_conf.fdir_conf.flex_conf;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ const struct rte_eth_flex_payload_cfg *flex_cfg;
+ const struct rte_eth_fdir_flex_mask *flex_mask;
+ uint16_t flexbytes = 0;
+ uint16_t i;
+
+ if (conf == NULL) {
+ PMD_DRV_LOG(ERR, "NULL pointer.");
+ return -EINVAL;
+ }
+
+ flex |= TXGBE_FDIRFLEXCFG_DIA;
+
+ for (i = 0; i < conf->nb_payloads; i++) {
+ flex_cfg = &conf->flex_set[i];
+ if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "unsupported payload type.");
+ return -EINVAL;
+ }
+ if (((flex_cfg->src_offset[0] & 0x1) == 0) &&
+ (flex_cfg->src_offset[1] == flex_cfg->src_offset[0] + 1) &&
+ flex_cfg->src_offset[0] <= TXGBE_MAX_FLX_SOURCE_OFF) {
+ flex &= ~TXGBE_FDIRFLEXCFG_OFST_MASK;
+ flex |=
+ TXGBE_FDIRFLEXCFG_OFST(flex_cfg->src_offset[0] / 2);
+ } else {
+ PMD_DRV_LOG(ERR, "invalid flexbytes arguments.");
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < conf->nb_flexmasks; i++) {
+ flex_mask = &conf->flex_mask[i];
+ if (flex_mask->flow_type != RTE_ETH_FLOW_UNKNOWN) {
+ PMD_DRV_LOG(ERR, "flexmask should be set globally.");
+ return -EINVAL;
+ }
+ flexbytes = (uint16_t)(((flex_mask->mask[1] << 8) & 0xFF00) |
+ ((flex_mask->mask[0]) & 0xFF));
+ if (flexbytes == UINT16_MAX) {
+ flex &= ~TXGBE_FDIRFLEXCFG_DIA;
+ } else if (flexbytes != 0) {
+ /* TXGBE_FDIRFLEXCFG_DIA is set by default when set mask */
+ PMD_DRV_LOG(ERR, " invalid flexbytes mask arguments.");
+ return -EINVAL;
+ }
+ }
+
+ info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
+ info->flex_bytes_offset = (uint8_t)(TXGBD_FDIRFLEXCFG_OFST(flex) * 2);
+
+ for (i = 0; i < 64; i++) {
+ uint32_t flexreg;
+ flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4));
+ flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4));
+ flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4);
+ wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg);
+ }
+ return 0;
+}
+
+int
+txgbe_fdir_configure(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int err;
+ uint32_t fdirctrl, flex, pbsize;
+ int i;
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* supports mac-vlan and tunnel mode */
+ if (mode != RTE_FDIR_MODE_SIGNATURE &&
+ mode != RTE_FDIR_MODE_PERFECT)
+ return -ENOSYS;
+
+ err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf,
+ &fdirctrl, &flex);
+ if (err)
+ return err;
+
+ /*
+ * Before enabling Flow Director, the Rx Packet Buffer size
+ * must be reduced. The new value is the current size minus
+ * flow director memory usage size.
+ */
+ pbsize = rd32(hw, TXGBE_PBRXSIZE(0));
+ pbsize -= TXGBD_FDIRCTL_BUF_BYTE(fdirctrl);
+ wr32(hw, TXGBE_PBRXSIZE(0), pbsize);
+
+ /*
+ * The defaults in the HW for RX PB 1-7 are not zero and so should be
+ * initialized to zero for non DCB mode otherwise actual total RX PB
+ * would be bigger than programmed and filter space would run into
+ * the PB 0 region.
+ */
+ for (i = 1; i < 8; i++)
+ wr32(hw, TXGBE_PBRXSIZE(i), 0);
+
+ err = txgbe_fdir_store_input_mask(dev);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on setting FD mask");
+ return err;
+ }
+
+ err = txgbe_fdir_set_input_mask(dev);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on setting FD mask");
+ return err;
+ }
+
+ err = txgbe_set_fdir_flex_conf(dev, flex);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
+ return err;
+ }
+
+ err = txgbe_fdir_enable(hw, fdirctrl);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, " Error on enabling FD.");
+ return err;
+ }
+ return 0;
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 15/37] net/txgbe: support FDIR add and delete operations
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (13 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 14/37] net/txgbe: configure FDIR filter Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 16/37] net/txgbe: add FDIR parse normal rule Jiawen Wu
` (22 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Support add and delete operations on flow director.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/base/txgbe_type.h | 11 +
drivers/net/txgbe/txgbe_ethdev.h | 16 +
drivers/net/txgbe/txgbe_fdir.c | 472 +++++++++++++++++++++++++++-
3 files changed, 498 insertions(+), 1 deletion(-)
diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index 633692cd7..160d5253a 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -22,6 +22,7 @@
#define TXGBE_MAX_UTA 128
#define TXGBE_FDIR_INIT_DONE_POLL 10
+#define TXGBE_FDIRCMD_CMD_POLL 10
#define TXGBE_ALIGN 128 /* as intel did */
@@ -71,7 +72,17 @@ enum {
#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+/* Software ATR input stream values and masks */
#define TXGBE_ATR_HASH_MASK 0x7fff
+#define TXGBE_ATR_L3TYPE_MASK 0x4
+#define TXGBE_ATR_L3TYPE_IPV4 0x0
+#define TXGBE_ATR_L3TYPE_IPV6 0x4
+#define TXGBE_ATR_L4TYPE_MASK 0x3
+#define TXGBE_ATR_L4TYPE_UDP 0x1
+#define TXGBE_ATR_L4TYPE_TCP 0x2
+#define TXGBE_ATR_L4TYPE_SCTP 0x3
+#define TXGBE_ATR_TUNNEL_MASK 0x10
+#define TXGBE_ATR_TUNNEL_ANY 0x10
/* Flow Director ATR input struct. */
struct txgbe_atr_input {
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 5a5d9c4d3..5e7fd0a86 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -91,6 +91,18 @@ struct txgbe_fdir_filter {
/* list of fdir filters */
TAILQ_HEAD(txgbe_fdir_filter_list, txgbe_fdir_filter);
+struct txgbe_fdir_rule {
+ struct txgbe_hw_fdir_mask mask;
+ struct txgbe_atr_input input; /* key of fdir filter */
+ bool b_spec; /* If TRUE, input, fdirflags, queue have meaning. */
+ bool b_mask; /* If TRUE, mask has meaning. */
+ enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t soft_id; /* an unique value for this rule */
+ uint8_t queue; /* assigned rx queue */
+ uint8_t flex_bytes_offset;
+};
+
struct txgbe_hw_fdir_info {
struct txgbe_hw_fdir_mask mask;
uint8_t flex_bytes_offset;
@@ -436,6 +448,10 @@ void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
*/
int txgbe_fdir_configure(struct rte_eth_dev *dev);
int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int txgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct txgbe_fdir_rule *rule,
+ bool del, bool update);
+
void txgbe_configure_pb(struct rte_eth_dev *dev);
void txgbe_configure_port(struct rte_eth_dev *dev);
void txgbe_configure_dcb(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index df6125d4a..d38e21e9e 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -7,7 +7,7 @@
#include <stdarg.h>
#include <errno.h>
#include <sys/queue.h>
-
+#include <rte_malloc.h>
#include "txgbe_logs.h"
#include "base/txgbe.h"
@@ -15,6 +15,7 @@
#define TXGBE_DEFAULT_FLEXBYTES_OFFSET 12 /*default flexbytes offset in bytes*/
#define TXGBE_MAX_FLX_SOURCE_OFF 62
+#define TXGBE_FDIRCMD_CMD_INTERVAL_US 10
#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
uint8_t ipv6_addr[16]; \
@@ -405,3 +406,472 @@ txgbe_fdir_configure(struct rte_eth_dev *dev)
return 0;
}
+/*
+ * Note that the bkt_hash field in the txgbe_atr_input structure is also never
+ * set.
+ *
+ * Compute the hashes for SW ATR
+ * @stream: input bitstream to compute the hash on
+ * @key: 32-bit hash key
+ **/
+static uint32_t
+txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input,
+ uint32_t key)
+{
+ /*
+ * The algorithm is as follows:
+ * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
+ * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
+ * and A[n] x B[n] is bitwise AND between same length strings
+ *
+ * K[n] is 16 bits, defined as:
+ * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
+ * for n modulo 32 < 15, K[n] =
+ * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
+ *
+ * S[n] is 16 bits, defined as:
+ * for n >= 15, S[n] = S[n:n - 15]
+ * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
+ *
+ * To simplify for programming, the algorithm is implemented
+ * in software this way:
+ *
+ * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+ *
+ * for (i = 0; i < 352; i+=32)
+ * hi_hash_dword[31:0] ^= Stream[(i+31):i];
+ *
+ * lo_hash_dword[15:0] ^= Stream[15:0];
+ * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
+ * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
+ *
+ * hi_hash_dword[31:0] ^= Stream[351:320];
+ *
+ * if (key[0])
+ * hash[15:0] ^= Stream[15:0];
+ *
+ * for (i = 0; i < 16; i++) {
+ * if (key[i])
+ * hash[15:0] ^= lo_hash_dword[(i+15):i];
+ * if (key[i + 16])
+ * hash[15:0] ^= hi_hash_dword[(i+15):i];
+ * }
+ *
+ */
+ __be32 *dword_stream = (__be32 *)atr_input;
+ __be32 common_hash_dword = 0;
+ u32 hi_hash_dword, lo_hash_dword, flow_pool_ptid;
+ u32 hash_result = 0;
+ u8 i;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_pool_ptid = be_to_cpu32(dword_stream[0]);
+
+ /* generate common hash dword */
+ for (i = 1; i <= 10; i++)
+ common_hash_dword ^= dword_stream[i];
+
+ hi_hash_dword = be_to_cpu32(common_hash_dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply (Flow ID/VM Pool/Packet Type) bits to hash words */
+ hi_hash_dword ^= flow_pool_ptid ^ (flow_pool_ptid >> 16);
+
+ /* Process bits 0 and 16 */
+ if (key & 0x0001)
+ hash_result ^= lo_hash_dword;
+ if (key & 0x00010000)
+ hash_result ^= hi_hash_dword;
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_pool_ptid ^ (flow_pool_ptid << 16);
+
+ /* process the remaining 30 bits in the key 2 bits at a time */
+ for (i = 15; i; i--) {
+ if (key & (0x0001 << i))
+ hash_result ^= lo_hash_dword >> i;
+ if (key & (0x00010000 << i))
+ hash_result ^= hi_hash_dword >> i;
+ }
+
+ return hash_result;
+}
+
+static uint32_t
+atr_compute_perfect_hash(struct txgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc)
+{
+ uint32_t bucket_hash;
+
+ bucket_hash = txgbe_atr_compute_hash(input,
+ TXGBE_ATR_BUCKET_HASH_KEY);
+ if (pballoc == RTE_FDIR_PBALLOC_256K)
+ bucket_hash &= PERFECT_BUCKET_256KB_HASH_MASK;
+ else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ bucket_hash &= PERFECT_BUCKET_128KB_HASH_MASK;
+ else
+ bucket_hash &= PERFECT_BUCKET_64KB_HASH_MASK;
+
+ return TXGBE_FDIRPIHASH_BKT(bucket_hash);
+}
+
+/**
+ * txgbe_fdir_check_cmd_complete - poll to check whether FDIRPICMD is complete
+ * @hw: pointer to hardware structure
+ */
+static inline int
+txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, uint32_t *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < TXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = rd32(hw, TXGBE_FDIRPICMD);
+ if (!(*fdircmd & TXGBE_FDIRPICMD_OP_MASK))
+ return 0;
+ rte_delay_us(TXGBE_FDIRCMD_CMD_INTERVAL_US);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Calculate the hash value needed for signature-match filters. In the FreeBSD
+ * driver, this is done by the optimised function
+ * txgbe_atr_compute_sig_hash_raptor(). However that can't be used here as it
+ * doesn't support calculating a hash for an IPv6 filter.
+ */
+static uint32_t
+atr_compute_signature_hash(struct txgbe_atr_input *input,
+ enum rte_fdir_pballoc_type pballoc)
+{
+ uint32_t bucket_hash, sig_hash;
+
+ bucket_hash = txgbe_atr_compute_hash(input,
+ TXGBE_ATR_BUCKET_HASH_KEY);
+ if (pballoc == RTE_FDIR_PBALLOC_256K)
+ bucket_hash &= SIG_BUCKET_256KB_HASH_MASK;
+ else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ bucket_hash &= SIG_BUCKET_128KB_HASH_MASK;
+ else
+ bucket_hash &= SIG_BUCKET_64KB_HASH_MASK;
+
+ sig_hash = txgbe_atr_compute_hash(input,
+ TXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ return TXGBE_FDIRPIHASH_SIG(sig_hash) |
+ TXGBE_FDIRPIHASH_BKT(bucket_hash);
+}
+
+/**
+ * With the ability to set extra flags in FDIRPICMD register
+ * added, and IPv6 support also added. The hash value is also pre-calculated
+ * as the pballoc value is needed to do it.
+ */
+static int
+fdir_write_perfect_filter(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input, uint8_t queue,
+ uint32_t fdircmd, uint32_t fdirhash,
+ enum rte_fdir_mode mode)
+{
+ uint32_t fdirport, fdirflex;
+ int err = 0;
+
+ UNREFERENCED_PARAMETER(mode);
+
+ /* record the IPv4 address (little-endian)
+ * can not use wr32.
+ */
+ wr32(hw, TXGBE_FDIRPISIP4, be_to_le32(input->src_ip[0]));
+ wr32(hw, TXGBE_FDIRPIDIP4, be_to_le32(input->dst_ip[0]));
+
+ /* record source and destination port (little-endian)*/
+ fdirport = TXGBE_FDIRPIPORT_DST(be_to_le16(input->dst_port));
+ fdirport |= TXGBE_FDIRPIPORT_SRC(be_to_le16(input->src_port));
+ wr32(hw, TXGBE_FDIRPIPORT, fdirport);
+
+ /* record pkt_type (little-endian) and flex_bytes(big-endian) */
+ fdirflex = TXGBE_FDIRPIFLEX_FLEX(be_to_npu16(input->flex_bytes));
+ fdirflex |= TXGBE_FDIRPIFLEX_PTYPE(be_to_le16(input->pkt_type));
+ wr32(hw, TXGBE_FDIRPIFLEX, fdirflex);
+
+ /* configure FDIRHASH register */
+ fdirhash |= TXGBE_FDIRPIHASH_VLD;
+ wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ txgbe_flush(hw);
+
+ /* configure FDIRPICMD register */
+ fdircmd |= TXGBE_FDIRPICMD_OP_ADD |
+ TXGBE_FDIRPICMD_UPD |
+ TXGBE_FDIRPICMD_LAST |
+ TXGBE_FDIRPICMD_QPENA;
+ fdircmd |= TXGBE_FDIRPICMD_FT(input->flow_type);
+ fdircmd |= TXGBE_FDIRPICMD_QP(queue);
+ fdircmd |= TXGBE_FDIRPICMD_POOL(input->vm_pool);
+
+ wr32(hw, TXGBE_FDIRPICMD, fdircmd);
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
+
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
+
+ return err;
+}
+
+/**
+ * This function supports setting extra fields in the FDIRPICMD register, and
+ * removes the code that was verifying the flow_type field. According to the
+ * documentation, a flow type of 00 (i.e. not TCP, UDP, or SCTP) is not
+ * supported, however it appears to work ok...
+ * Adds a signature hash filter
+ * @hw: pointer to hardware structure
+ * @input: unique input dword
+ * @queue: queue index to direct traffic to
+ * @fdircmd: any extra flags to set in fdircmd register
+ * @fdirhash: pre-calculated hash value for the filter
+ **/
+static int
+fdir_add_signature_filter(struct txgbe_hw *hw,
+ struct txgbe_atr_input *input, uint8_t queue, uint32_t fdircmd,
+ uint32_t fdirhash)
+{
+ int err = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* configure FDIRPICMD register */
+ fdircmd |= TXGBE_FDIRPICMD_OP_ADD |
+ TXGBE_FDIRPICMD_UPD |
+ TXGBE_FDIRPICMD_LAST |
+ TXGBE_FDIRPICMD_QPENA;
+ fdircmd |= TXGBE_FDIRPICMD_FT(input->flow_type);
+ fdircmd |= TXGBE_FDIRPICMD_QP(queue);
+
+ fdirhash |= TXGBE_FDIRPIHASH_VLD;
+ wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
+ wr32(hw, TXGBE_FDIRPICMD, fdircmd);
+
+ PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
+
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Timeout writing flow director filter.");
+
+ return err;
+}
+
+/*
+ * This is modified to take in the hash as a parameter so that
+ * it can be used for removing signature and perfect filters.
+ */
+static int
+fdir_erase_filter_raptor(struct txgbe_hw *hw, uint32_t fdirhash)
+{
+ uint32_t fdircmd = 0;
+ int err = 0;
+
+ wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
+
+ /* flush hash to HW */
+ txgbe_flush(hw);
+
+ /* Query if filter is present */
+ wr32(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_OP_QRY);
+
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0) {
+ PMD_INIT_LOG(ERR, "Timeout querying for flow director filter.");
+ return err;
+ }
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & TXGBE_FDIRPICMD_VLD) {
+ wr32(hw, TXGBE_FDIRPIHASH, fdirhash);
+ txgbe_flush(hw);
+ wr32(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_OP_REM);
+ }
+
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err < 0)
+ PMD_INIT_LOG(ERR, "Timeout erasing flow director filter.");
+
+ return err;
+}
+
+static inline struct txgbe_fdir_filter *
+txgbe_fdir_filter_lookup(struct txgbe_hw_fdir_info *fdir_info,
+ struct txgbe_atr_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+static inline int
+txgbe_insert_fdir_filter(struct txgbe_hw_fdir_info *fdir_info,
+ struct txgbe_fdir_filter *fdir_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_handle, &fdir_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ fdir_info->hash_map[ret] = fdir_filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
+
+ return 0;
+}
+
+static inline int
+txgbe_remove_fdir_filter(struct txgbe_hw_fdir_info *fdir_info,
+ struct txgbe_atr_input *input)
+{
+ int ret;
+ struct txgbe_fdir_filter *fdir_filter;
+
+ ret = rte_hash_del_key(fdir_info->hash_handle, input);
+ if (ret < 0)
+ return ret;
+
+ fdir_filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
+ rte_free(fdir_filter);
+
+ return 0;
+}
+
+int
+txgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct txgbe_fdir_rule *rule,
+ bool del,
+ bool update)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t fdirhash;
+ uint8_t queue;
+ bool is_perfect = FALSE;
+ int err;
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ struct txgbe_fdir_filter *node;
+
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
+ return -ENOTSUP;
+
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT)
+ is_perfect = TRUE;
+
+ if (is_perfect) {
+ if (rule->input.flow_type & TXGBE_ATR_L3TYPE_IPV6) {
+ PMD_DRV_LOG(ERR, "IPv6 is not supported in"
+ " perfect mode!");
+ return -ENOTSUP;
+ }
+ fdirhash = atr_compute_perfect_hash(&rule->input,
+ dev->data->dev_conf.fdir_conf.pballoc);
+ fdirhash |= TXGBE_FDIRPIHASH_IDX(rule->soft_id);
+ } else {
+ fdirhash = atr_compute_signature_hash(&rule->input,
+ dev->data->dev_conf.fdir_conf.pballoc);
+ }
+
+ if (del) {
+ err = txgbe_remove_fdir_filter(info, &rule->input);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR,
+ "No such fdir filter to delete %d!", err);
+ return err;
+ }
+
+ err = fdir_erase_filter_raptor(hw, fdirhash);
+ if (err < 0)
+ PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
+ else
+ PMD_DRV_LOG(DEBUG, "Success to delete FDIR filter!");
+ return err;
+ }
+
+ /* add or update an fdir filter*/
+ if (rule->fdirflags & TXGBE_FDIRPICMD_DROP) {
+ if (!is_perfect) {
+ PMD_DRV_LOG(ERR, "Drop option is not supported in"
+ " signature mode.");
+ return -EINVAL;
+ }
+ queue = dev->data->dev_conf.fdir_conf.drop_queue;
+ } else if (rule->queue < TXGBE_MAX_RX_QUEUE_NUM) {
+ queue = rule->queue;
+ } else {
+ return -EINVAL;
+ }
+
+ node = txgbe_fdir_filter_lookup(info, &rule->input);
+ if (node) {
+ if (!update) {
+ PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
+ return -EINVAL;
+ }
+ node->fdirflags = rule->fdirflags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+ } else {
+ node = rte_zmalloc("txgbe_fdir",
+ sizeof(struct txgbe_fdir_filter), 0);
+ if (!node)
+ return -ENOMEM;
+ rte_memcpy(&node->input, &rule->input,
+ sizeof(struct txgbe_atr_input));
+ node->fdirflags = rule->fdirflags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+
+ err = txgbe_insert_fdir_filter(info, node);
+ if (err < 0) {
+ rte_free(node);
+ return err;
+ }
+ }
+
+ if (is_perfect)
+ err = fdir_write_perfect_filter(hw, &node->input,
+ node->queue, node->fdirflags,
+ node->fdirhash, fdir_mode);
+ else
+ err = fdir_add_signature_filter(hw, &node->input,
+ node->queue, node->fdirflags,
+ node->fdirhash);
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
+ txgbe_remove_fdir_filter(info, &rule->input);
+ } else {
+ PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
+ }
+
+ return err;
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 16/37] net/txgbe: add FDIR parse normal rule
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (14 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 15/37] net/txgbe: support FDIR add and delete operations Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 17/37] net/txgbe: add FDIR parse tunnel rule Jiawen Wu
` (21 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to parse flow for fdir filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/base/txgbe_type.h | 10 +
drivers/net/txgbe/txgbe_flow.c | 856 ++++++++++++++++++++++++++++
2 files changed, 866 insertions(+)
diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index 160d5253a..a73f66d39 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -83,6 +83,16 @@ enum {
#define TXGBE_ATR_L4TYPE_SCTP 0x3
#define TXGBE_ATR_TUNNEL_MASK 0x10
#define TXGBE_ATR_TUNNEL_ANY 0x10
+enum txgbe_atr_flow_type {
+ TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
/* Flow Director ATR input struct. */
struct txgbe_atr_input {
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 8589e3328..ba1be9f12 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -27,6 +27,7 @@
#define TXGBE_MIN_N_TUPLE_PRIO 1
#define TXGBE_MAX_N_TUPLE_PRIO 7
+#define TXGBE_MAX_FLX_SOURCE_OFF 62
/**
* Endless loop will never happen with below assumption
@@ -1242,3 +1243,858 @@ txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
return ret;
}
+/* Parse to get the attr and action info of flow director rule. */
+static int
+txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct txgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark;
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* check if the first not void action is QUEUE or DROP. */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ rule->queue = act_q->index;
+ } else { /* drop */
+ /* signature mode does not support drop action. */
+ if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+ rule->fdirflags = TXGBE_FDIRPICMD_DROP;
+ }
+
+ /* check if the next not void item is MARK */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rule->soft_id = 0;
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark = (const struct rte_flow_action_mark *)act->conf;
+ rule->soft_id = mark->id;
+ act = next_no_void_action(actions, act);
+ }
+
+ /* check if the next not void item is END */
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* search next no void pattern and skip fuzzy */
+static inline
+const struct rte_flow_item *next_no_fuzzy_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ next_no_void_pattern(pattern, cur);
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
+ return next;
+ next = next_no_void_pattern(pattern, next);
+ }
+}
+
+static inline uint8_t signature_match(const struct rte_flow_item pattern[])
+{
+ const struct rte_flow_item_fuzzy *spec, *last, *mask;
+ const struct rte_flow_item *item;
+ uint32_t sh, lh, mh;
+ int i = 0;
+
+ while (1) {
+ item = pattern + i;
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
+ spec = item->spec;
+ last = item->last;
+ mask = item->mask;
+
+ if (!spec || !mask)
+ return 0;
+
+ sh = spec->thresh;
+
+ if (!last)
+ lh = sh;
+ else
+ lh = last->thresh;
+
+ mh = mask->thresh;
+ sh = sh & mh;
+ lh = lh & mh;
+
+ if (!sh || sh > lh)
+ return 0;
+
+ return 1;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
+ * The next not void item could be UDP or TCP or SCTP (optional)
+ * The next not void item could be RAW (for flexbyte, optional)
+ * The next not void item must be END.
+ * A Fuzzy Match pattern can appear at any place before END.
+ * Fuzzy Match is optional for IPV4 but is required for IPV6
+ * MAC VLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be MAC VLAN.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP/SCTP pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * UDP/TCP/SCTP src_port 80 0xFFFF
+ * dst_port 80 0xFFFF
+ * FLEX relative 0 0x1
+ * search 0 0x1
+ * reserved 0 0
+ * offset 12 0xFFFFFFFF
+ * limit 0 0xFFFF
+ * length 2 0xFFFF
+ * pattern[0] 0x86 0xFF
+ * pattern[1] 0xDD 0xFF
+ * END
+ * MAC VLAN pattern example:
+ * ITEM Spec Mask
+ * ETH dst_addr
+ {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
+ 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
+static int
+txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct txgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ const struct rte_flow_item_ipv6 *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_raw *raw_mask;
+ const struct rte_flow_item_raw *raw_spec;
+ u32 ptype = 0;
+ uint8_t j;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+ rule->mask.flex_bytes_mask = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or TCP or UDP or SCTP.
+ */
+ item = next_no_fuzzy_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ if (signature_match(pattern))
+ rule->mode = RTE_FDIR_MODE_SIGNATURE;
+ else
+ rule->mode = RTE_FDIR_MODE_PERFECT;
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+ if (item->spec && !item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ if (item->mask) {
+ rule->b_mask = TRUE;
+ eth_mask = item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type ||
+ rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* If ethernet has meaning, it means MAC VLAN mode. */
+ rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
+ /**
+ * src MAC address must be masked,
+ * and don't support dst MAC address mask.
+ */
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j] ||
+ eth_mask->dst.addr_bytes[j] != 0xFF) {
+ memset(rule, 0,
+ sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no VLAN, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+ }
+ /*** If both spec and mask are item,
+ * it means don't care about ETH.
+ * Do nothing.
+ */
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ } else {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ }
+
+ /* Get the IPV4 info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst addresses,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ ipv4_mask = item->mask;
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
+ rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv4_spec = item->spec;
+ rule->input.dst_ip[0] =
+ ipv4_spec->hdr.dst_addr;
+ rule->input.src_ip[0] =
+ ipv4_spec->hdr.src_addr;
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the IPV6 info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
+
+ /**
+ * 1. must signature match
+ * 2. not support last
+ * 3. mask must not null
+ */
+ if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
+ item->last ||
+ !item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ rule->b_mask = TRUE;
+ ipv6_mask = item->mask;
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check src addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
+ rule->mask.src_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.src_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* check dst addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
+ rule->mask.dst_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv6_spec = item->spec;
+ rte_memcpy(rule->input.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(rule->input.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ tcp_mask = item->mask;
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = tcp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ tcp_spec = item->spec;
+ rule->input.src_port =
+ tcp_spec->hdr.src_port;
+ rule->input.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the UDP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ udp_mask = item->mask;
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = udp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ udp_spec = item->spec;
+ rule->input.src_port =
+ udp_spec->hdr.src_port;
+ rule->input.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the SCTP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
+ ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ sctp_mask = item->mask;
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = sctp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ sctp_spec = item->spec;
+ rule->input.src_port =
+ sctp_spec->hdr.src_port;
+ rule->input.dst_port =
+ sctp_spec->hdr.dst_port;
+ }
+ /* others even sctp port is not supported */
+ sctp_mask = item->mask;
+ if (sctp_mask &&
+ (sctp_mask->hdr.src_port ||
+ sctp_mask->hdr.dst_port ||
+ sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum)) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the flex byte info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+ /* Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* mask should not be null */
+ if (!item->mask || !item->spec) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ raw_mask = item->mask;
+
+ /* check mask */
+ if (raw_mask->relative != 0x1 ||
+ raw_mask->search != 0x1 ||
+ raw_mask->reserved != 0x0 ||
+ (uint32_t)raw_mask->offset != 0xffffffff ||
+ raw_mask->limit != 0xffff ||
+ raw_mask->length != 0xffff) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ raw_spec = item->spec;
+
+ /* check spec */
+ if (raw_spec->relative != 0 ||
+ raw_spec->search != 0 ||
+ raw_spec->reserved != 0 ||
+ raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
+ raw_spec->offset % 2 ||
+ raw_spec->limit != 0 ||
+ raw_spec->length != 2 ||
+ /* pattern can't be 0xffff */
+ (raw_spec->pattern[0] == 0xff &&
+ raw_spec->pattern[1] == 0xff)) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check pattern mask */
+ if (raw_mask->pattern[0] != 0xff ||
+ raw_mask->pattern[1] != 0xff) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mask.flex_bytes_mask = 0xffff;
+ rule->input.flex_bytes =
+ (((uint16_t)raw_spec->pattern[1]) << 8) |
+ raw_spec->pattern[0];
+ rule->flex_bytes_offset = raw_spec->offset;
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* check if the next not void item is END */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
+
+ return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+static int
+txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct txgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
+ actions, rule, error);
+ if (!ret)
+ goto step_next;
+
+step_next:
+
+ if (hw->mac.type == txgbe_mac_raptor &&
+ rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
+ (rule->input.src_port != 0 || rule->input.dst_port != 0))
+ return -ENOTSUP;
+
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
+ return -ENOTSUP;
+
+ if (rule->queue >= dev->data->nb_rx_queues)
+ return -ENOTSUP;
+
+ return ret;
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 17/37] net/txgbe: add FDIR parse tunnel rule
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (15 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 16/37] net/txgbe: add FDIR parse normal rule Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:07 ` [dpdk-dev] [PATCH 18/37] net/txgbe: add FDIR restore operation Jiawen Wu
` (20 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to parse tunnel flow for fdir filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/base/txgbe_type.h | 8 +
drivers/net/txgbe/txgbe_flow.c | 290 ++++++++++++++++++++++++++++
2 files changed, 298 insertions(+)
diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index a73f66d39..22efcef78 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -92,6 +92,14 @@ enum txgbe_atr_flow_type {
TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
};
/* Flow Director ATR input struct. */
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index ba1be9f12..b7d0e08a9 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2064,6 +2064,291 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
}
+/**
+ * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
+ * And get the flow director filter info BTW.
+ * VxLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * NVGRE PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * VxLAN pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * UDP NULL NULL
+ * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * NEGRV pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * NVGRE protocol 0x6558 0xFFFF
+ * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct txgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_mask;
+ uint32_t j;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or IPv6 or UDP or VxLAN.
+ */
+ item = next_no_void_pattern(pattern, NULL);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
+
+ /* Skip MAC. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is IPv4 or IPv6. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is UDP or NVGRE. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip UDP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is VxLAN. */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* check if the next not void item is MAC */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ eth_mask = item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* src MAC address should be masked. */
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j]) {
+ memset(rule, 0,
+ sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ rule->mask.mac_addr_byte_mask = 0;
+ for (j = 0; j < ETH_ADDR_LEN; j++) {
+ /* It's a per byte mask. */
+ if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+ rule->mask.mac_addr_byte_mask |= 0x1 << j;
+ } else if (eth_mask->dst.addr_bytes[j]) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no vlan, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ item = next_no_void_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /**
+ * If the tags is 0, it means don't care about the VLAN.
+ * Do nothing.
+ */
+
+ return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
static int
txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -2081,6 +2366,11 @@ txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
if (!ret)
goto step_next;
+ ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
+ actions, rule, error);
+ if (ret)
+ return ret;
+
step_next:
if (hw->mac.type == txgbe_mac_raptor &&
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 18/37] net/txgbe: add FDIR restore operation
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (16 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 17/37] net/txgbe: add FDIR parse tunnel rule Jiawen Wu
@ 2020-11-03 10:07 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 19/37] net/txgbe: add RSS filter parse rule Jiawen Wu
` (19 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:07 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add restore operation on FDIR filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 1 +
drivers/net/txgbe/txgbe_ethdev.h | 2 ++
drivers/net/txgbe/txgbe_fdir.c | 34 ++++++++++++++++++++++++++++++++
3 files changed, 37 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index df2efe80f..a17d7b190 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -5077,6 +5077,7 @@ txgbe_filter_restore(struct rte_eth_dev *dev)
txgbe_ntuple_filter_restore(dev);
txgbe_ethertype_filter_restore(dev);
txgbe_syn_filter_restore(dev);
+ txgbe_fdir_filter_restore(dev);
txgbe_l2_tn_filter_restore(dev);
return 0;
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 5e7fd0a86..88c7d8191 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -469,6 +469,8 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
+void txgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+
void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index d38e21e9e..2faf7fd84 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -875,3 +875,37 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev,
return err;
}
+/* restore flow director filter */
+void
+txgbe_fdir_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_fdir_filter *node;
+ bool is_perfect = FALSE;
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ is_perfect = TRUE;
+
+ if (is_perfect) {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_write_perfect_filter(hw,
+ &node->input,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash,
+ fdir_mode);
+ }
+ } else {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_add_signature_filter(hw,
+ &node->input,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash);
+ }
+ }
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 19/37] net/txgbe: add RSS filter parse rule
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (17 preceding siblings ...)
2020-11-03 10:07 ` [dpdk-dev] [PATCH 18/37] net/txgbe: add FDIR restore operation Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 20/37] net/txgbe: add RSS filter restore operation Jiawen Wu
` (18 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to parse flow for RSS filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.h | 8 +++
drivers/net/txgbe/txgbe_flow.c | 114 +++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_rxtx.c | 20 ++++++
3 files changed, 142 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 88c7d8191..924c57faf 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -121,6 +121,12 @@ struct txgbe_hw_fdir_info {
bool mask_added; /* If already got mask from consistent filter */
};
+struct txgbe_rte_flow_rss_conf {
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint8_t key[TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
+ uint16_t queue[TXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
+};
+
/* structure for interrupt relative data */
struct txgbe_interrupt {
uint32_t flags;
@@ -480,6 +486,8 @@ int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t tx_rate);
+int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
static inline int
txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info,
uint16_t ethertype)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index b7d0e08a9..0d90ef33a 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2388,3 +2388,117 @@ txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
return ret;
}
+static int
+txgbe_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct txgbe_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ uint16_t n;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ if (!rss || !rss->queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->queue_num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (txgbe_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 00214b48f..ab812dbff 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -4633,3 +4633,23 @@ txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
+int
+txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 20/37] net/txgbe: add RSS filter restore operation
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (18 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 19/37] net/txgbe: add RSS filter parse rule Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 21/37] net/txgbe: add filter list init and uninit Jiawen Wu
` (17 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add restore operation on RSS filter.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 12 +++++
drivers/net/txgbe/txgbe_ethdev.h | 7 +++
drivers/net/txgbe/txgbe_rxtx.c | 76 ++++++++++++++++++++++++++++++++
3 files changed, 95 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index a17d7b190..5ed96479e 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -5071,6 +5071,17 @@ txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
}
}
+/* restore rss filter */
+static inline void
+txgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->rss_info.conf.queue_num)
+ txgbe_config_rss_filter(dev,
+ &filter_info->rss_info, TRUE);
+}
+
static int
txgbe_filter_restore(struct rte_eth_dev *dev)
{
@@ -5079,6 +5090,7 @@ txgbe_filter_restore(struct rte_eth_dev *dev)
txgbe_syn_filter_restore(dev);
txgbe_fdir_filter_restore(dev);
txgbe_l2_tn_filter_restore(dev);
+ txgbe_rss_filter_restore(dev);
return 0;
}
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 924c57faf..df7a14506 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -237,6 +237,8 @@ struct txgbe_filter_info {
struct txgbe_5tuple_filter_list fivetuple_list;
/* store the SYN filter info */
uint32_t syn_info;
+ /* store the rss filter info */
+ struct txgbe_rte_flow_rss_conf rss_info;
};
struct txgbe_l2_tn_key {
@@ -488,6 +490,11 @@ int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t tx_rate);
int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in);
+int txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
+int txgbe_config_rss_filter(struct rte_eth_dev *dev,
+ struct txgbe_rte_flow_rss_conf *conf, bool add);
+
static inline int
txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info,
uint16_t ethertype)
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index ab812dbff..babda3a79 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -4653,3 +4653,79 @@ txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
return 0;
}
+int
+txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
+txgbe_config_rss_filter(struct rte_eth_dev *dev,
+ struct txgbe_rte_flow_rss_conf *conf, bool add)
+{
+ struct txgbe_hw *hw;
+ uint32_t reta;
+ uint16_t i;
+ uint16_t j;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ PMD_INIT_FUNC_TRACE();
+ hw = TXGBE_DEV_HW(dev);
+
+ if (!add) {
+ if (txgbe_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
+ txgbe_rss_disable(dev);
+ memset(&filter_info->rss_info, 0,
+ sizeof(struct txgbe_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (filter_info->rss_info.conf.queue_num)
+ return -EINVAL;
+ /* Fill in redirection table
+ * The byte-swap is needed because NIC registers are in
+ * little-endian order.
+ */
+ reta = 0;
+ for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+ if (j == conf->conf.queue_num)
+ j = 0;
+ reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
+ if ((i & 3) == 3)
+ wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+ }
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ if ((rss_conf.rss_hf & TXGBE_RSS_OFFLOAD_ALL) == 0) {
+ txgbe_rss_disable(dev);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = rss_intel_key; /* Default hash key */
+ txgbe_dev_rss_hash_update(dev, &rss_conf);
+
+ if (txgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
+
+ return 0;
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 21/37] net/txgbe: add filter list init and uninit
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (19 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 20/37] net/txgbe: add RSS filter restore operation Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 22/37] net/txgbe: add flow API Jiawen Wu
` (16 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add filter list init and uninit.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 6 ++
drivers/net/txgbe/txgbe_ethdev.h | 8 ++
drivers/net/txgbe/txgbe_flow.c | 127 +++++++++++++++++++++++++++++++
3 files changed, 141 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 5ed96479e..45a785fec 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -698,6 +698,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
/* initialize l2 tunnel filter list & hash */
txgbe_l2_tn_filter_init(eth_dev);
+ /* initialize flow filter lists */
+ txgbe_filterlist_init();
+
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
@@ -1941,6 +1944,9 @@ txgbe_dev_close(struct rte_eth_dev *dev)
/* Remove all ntuple filters of the device */
txgbe_ntuple_filter_uninit(dev);
+ /* clear all the filters list */
+ txgbe_filterlist_flush();
+
return ret;
}
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index df7a14506..e12324404 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -263,6 +263,11 @@ struct txgbe_l2_tn_info {
uint16_t e_tag_ether_type; /* ether type for e-tag */
};
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
/* The configuration of bandwidth */
struct txgbe_bw_conf {
uint8_t tc_num; /* Number of TCs. */
@@ -448,6 +453,9 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
int
txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
struct txgbe_l2_tunnel_conf *l2_tunnel);
+void txgbe_filterlist_init(void);
+void txgbe_filterlist_flush(void);
+
void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 0d90ef33a..77e025863 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -16,6 +16,7 @@
#include <rte_cycles.h>
#include <rte_bus_pci.h>
+#include <rte_malloc.h>
#include <rte_hash_crc.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
@@ -29,6 +30,58 @@
#define TXGBE_MAX_N_TUPLE_PRIO 7
#define TXGBE_MAX_FLX_SOURCE_OFF 62
+/* ntuple filter list structure */
+struct txgbe_ntuple_filter_ele {
+ TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct txgbe_ethertype_filter_ele {
+ TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct txgbe_eth_syn_filter_ele {
+ TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct txgbe_fdir_rule_ele {
+ TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
+ struct txgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct txgbe_eth_l2_tunnel_conf_ele {
+ TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
+ struct txgbe_l2_tunnel_conf filter_info;
+};
+/* rss filter list structure */
+struct txgbe_rss_conf_ele {
+ TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
+ struct txgbe_rte_flow_rss_conf filter_info;
+};
+/* txgbe_flow memory list structure */
+struct txgbe_flow_mem {
+ TAILQ_ENTRY(txgbe_flow_mem) entries;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
+TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
+TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
+TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
+TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
+TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
+TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
+
+static struct txgbe_ntuple_filter_list filter_ntuple_list;
+static struct txgbe_ethertype_filter_list filter_ethertype_list;
+static struct txgbe_syn_filter_list filter_syn_list;
+static struct txgbe_fdir_rule_filter_list filter_fdir_list;
+static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+static struct txgbe_rss_filter_list filter_rss_list;
+static struct txgbe_flow_mem_list txgbe_flow_list;
+
/**
* Endless loop will never happen with below assumption
* 1. there is at least one no-void item(END)
@@ -2502,3 +2555,77 @@ txgbe_parse_rss_filter(struct rte_eth_dev *dev,
return 0;
}
+void
+txgbe_filterlist_init(void)
+{
+ TAILQ_INIT(&filter_ntuple_list);
+ TAILQ_INIT(&filter_ethertype_list);
+ TAILQ_INIT(&filter_syn_list);
+ TAILQ_INIT(&filter_fdir_list);
+ TAILQ_INIT(&filter_l2_tunnel_list);
+ TAILQ_INIT(&filter_rss_list);
+ TAILQ_INIT(&txgbe_flow_list);
+}
+
+void
+txgbe_filterlist_flush(void)
+{
+ struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+ struct txgbe_rss_conf_ele *rss_filter_ptr;
+
+ while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr,
+ entries);
+ rte_free(ntuple_filter_ptr);
+ }
+
+ while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr,
+ entries);
+ rte_free(ethertype_filter_ptr);
+ }
+
+ while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ rte_free(syn_filter_ptr);
+ }
+
+ while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr,
+ entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+
+ while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr,
+ entries);
+ rte_free(fdir_rule_ptr);
+ }
+
+ while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr,
+ entries);
+ rte_free(rss_filter_ptr);
+ }
+
+ while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
+ TAILQ_REMOVE(&txgbe_flow_list,
+ txgbe_flow_mem_ptr,
+ entries);
+ rte_free(txgbe_flow_mem_ptr->flow);
+ rte_free(txgbe_flow_mem_ptr);
+ }
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 22/37] net/txgbe: add flow API
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (20 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 21/37] net/txgbe: add filter list init and uninit Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 23/37] net/txgbe: add flow API create function Jiawen Wu
` (15 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add flow API with generic flow operaions, validate first.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
doc/guides/nics/features/txgbe.ini | 1 +
doc/guides/nics/txgbe.rst | 1 +
drivers/net/txgbe/txgbe_ethdev.c | 25 ++++++++++++
drivers/net/txgbe/txgbe_ethdev.h | 2 +
drivers/net/txgbe/txgbe_flow.c | 61 ++++++++++++++++++++++++++++++
5 files changed, 90 insertions(+)
diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index 7c457fede..9db2ccde0 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -26,6 +26,7 @@ SR-IOV = Y
DCB = Y
VLAN filter = Y
Flow control = Y
+Flow API = Y
Rate limitation = Y
Traffic mirroring = Y
CRC offload = P
diff --git a/doc/guides/nics/txgbe.rst b/doc/guides/nics/txgbe.rst
index cd293698b..5a7299964 100644
--- a/doc/guides/nics/txgbe.rst
+++ b/doc/guides/nics/txgbe.rst
@@ -29,6 +29,7 @@ Features
- IEEE 1588
- FW version
- LRO
+- Generic flow API
Prerequisites
-------------
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 45a785fec..cc061e0d6 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -4234,6 +4234,30 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
return 0;
}
+static int
+txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &txgbe_flow_ops;
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static u8 *
txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
u8 **mc_addr_ptr, u32 *vmdq)
@@ -5218,6 +5242,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
.reta_query = txgbe_dev_rss_reta_query,
.rss_hash_update = txgbe_dev_rss_hash_update,
.rss_hash_conf_get = txgbe_dev_rss_hash_conf_get,
+ .filter_ctrl = txgbe_dev_filter_ctrl,
.set_mc_addr_list = txgbe_dev_set_mc_addr_list,
.rxq_info_get = txgbe_rxq_info_get,
.txq_info_get = txgbe_txq_info_get,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index e12324404..2b5c0e35d 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -487,6 +487,8 @@ uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
void txgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+extern const struct rte_flow_ops txgbe_flow_ops;
+
void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 77e025863..884a8545f 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2629,3 +2629,64 @@ txgbe_filterlist_flush(void)
}
}
+/**
+ * Check if the flow rule is supported by txgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+txgbe_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct txgbe_l2_tunnel_conf l2_tn_filter;
+ struct txgbe_fdir_rule fdir_rule;
+ struct txgbe_rte_flow_rss_conf rss_conf;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, ðertype_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = txgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
+ ret = txgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret)
+ return 0;
+
+ memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ ret = txgbe_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
+
+ return ret;
+}
+
+const struct rte_flow_ops txgbe_flow_ops = {
+ .validate = txgbe_flow_validate,
+};
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 23/37] net/txgbe: add flow API create function
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (21 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 22/37] net/txgbe: add flow API Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 24/37] net/txgbe: add flow API destroy function Jiawen Wu
` (14 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to create operation for flow API.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.h | 2 +
drivers/net/txgbe/txgbe_fdir.c | 27 ++++
drivers/net/txgbe/txgbe_flow.c | 257 +++++++++++++++++++++++++++++++
3 files changed, 286 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 2b5c0e35d..f439a201b 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -464,6 +464,8 @@ void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
*/
int txgbe_fdir_configure(struct rte_eth_dev *dev);
int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+ uint16_t offset);
int txgbe_fdir_filter_program(struct rte_eth_dev *dev,
struct txgbe_fdir_rule *rule,
bool del, bool update);
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 2faf7fd84..2342cf681 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -270,6 +270,33 @@ txgbe_fdir_store_input_mask(struct rte_eth_dev *dev)
return 0;
}
+int
+txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+ uint16_t offset)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ uint32_t flexreg, flex;
+ flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4));
+ flex = TXGBE_FDIRFLEXCFG_BASE_MAC;
+ flex |= TXGBE_FDIRFLEXCFG_OFST(offset / 2);
+ flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4));
+ flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4);
+ wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg);
+ }
+
+ txgbe_flush(hw);
+ for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (rd32(hw, TXGBE_FDIRCTL) &
+ TXGBE_FDIRCTL_INITDONE)
+ break;
+ msec_delay(1);
+ }
+ return 0;
+}
+
/*
* txgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
* arguments are valid
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 884a8545f..4141352bf 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2629,6 +2629,262 @@ txgbe_filterlist_flush(void)
}
}
+/**
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+txgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct txgbe_fdir_rule fdir_rule;
+ struct txgbe_l2_tunnel_conf l2_tn_filter;
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_rte_flow_rss_conf rss_conf;
+ struct rte_flow *flow = NULL;
+ struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct txgbe_rss_conf_ele *rss_filter_ptr;
+ struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+ uint8_t first_mask = FALSE;
+
+ flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+ txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
+ sizeof(struct txgbe_flow_mem), 0);
+ if (!txgbe_flow_mem_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ rte_free(flow);
+ return NULL;
+ }
+ txgbe_flow_mem_ptr->flow = flow;
+ TAILQ_INSERT_TAIL(&txgbe_flow_list,
+ txgbe_flow_mem_ptr, entries);
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+
+ if (!ret) {
+ ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+ if (!ret) {
+ ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
+ sizeof(struct txgbe_ntuple_filter_ele), 0);
+ if (!ntuple_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
+ TAILQ_INSERT_TAIL(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ flow->rule = ntuple_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, ðertype_filter, error);
+ if (!ret) {
+ ret = txgbe_add_del_ethertype_filter(dev,
+ ðertype_filter, TRUE);
+ if (!ret) {
+ ethertype_filter_ptr =
+ rte_zmalloc("txgbe_ethertype_filter",
+ sizeof(struct txgbe_ethertype_filter_ele), 0);
+ if (!ethertype_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(ðertype_filter_ptr->filter_info,
+ ðertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
+ TAILQ_INSERT_TAIL(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ flow->rule = ethertype_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = txgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret) {
+ ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
+ if (!ret) {
+ syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
+ sizeof(struct txgbe_eth_syn_filter_ele), 0);
+ if (!syn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&syn_filter_ptr->filter_info,
+ &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
+ TAILQ_INSERT_TAIL(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ flow->rule = syn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_SYN;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
+ ret = txgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret) {
+ /* A mask cannot be deleted. */
+ if (fdir_rule.b_mask) {
+ if (!fdir_info->mask_added) {
+ /* It's the first time the mask is set. */
+ rte_memcpy(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct txgbe_hw_fdir_mask));
+ fdir_info->flex_bytes_offset =
+ fdir_rule.flex_bytes_offset;
+
+ if (fdir_rule.mask.flex_bytes_mask)
+ txgbe_fdir_set_flexbytes_offset(dev,
+ fdir_rule.flex_bytes_offset);
+
+ ret = txgbe_fdir_set_input_mask(dev);
+ if (ret)
+ goto out;
+
+ fdir_info->mask_added = TRUE;
+ first_mask = TRUE;
+ } else {
+ /**
+ * Only support one global mask,
+ * all the masks should be the same.
+ */
+ ret = memcmp(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct txgbe_hw_fdir_mask));
+ if (ret)
+ goto out;
+
+ if (fdir_info->flex_bytes_offset !=
+ fdir_rule.flex_bytes_offset)
+ goto out;
+ }
+ }
+
+ if (fdir_rule.b_spec) {
+ ret = txgbe_fdir_filter_program(dev, &fdir_rule,
+ FALSE, FALSE);
+ if (!ret) {
+ fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
+ sizeof(struct txgbe_fdir_rule_ele), 0);
+ if (!fdir_rule_ptr) {
+ PMD_DRV_LOG(ERR,
+ "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&fdir_rule_ptr->filter_info,
+ &fdir_rule,
+ sizeof(struct txgbe_fdir_rule));
+ TAILQ_INSERT_TAIL(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ flow->rule = fdir_rule_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FDIR;
+
+ return flow;
+ }
+
+ if (ret) {
+ /**
+ * clean the mask_added flag if fail to
+ * program
+ **/
+ if (first_mask)
+ fdir_info->mask_added = FALSE;
+ goto out;
+ }
+ }
+
+ goto out;
+ }
+
+ memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+ ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+ if (!ret) {
+ ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
+ if (!ret) {
+ l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
+ sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
+ if (!l2_tn_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&l2_tn_filter_ptr->filter_info,
+ &l2_tn_filter,
+ sizeof(struct txgbe_l2_tunnel_conf));
+ TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ flow->rule = l2_tn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
+ return flow;
+ }
+ }
+
+ memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+ ret = txgbe_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
+ if (!ret) {
+ ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
+ if (!ret) {
+ rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
+ sizeof(struct txgbe_rss_conf_ele), 0);
+ if (!rss_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
+ TAILQ_INSERT_TAIL(&filter_rss_list,
+ rss_filter_ptr, entries);
+ flow->rule = rss_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_HASH;
+ return flow;
+ }
+ }
+
+out:
+ TAILQ_REMOVE(&txgbe_flow_list,
+ txgbe_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(txgbe_flow_mem_ptr);
+ rte_free(flow);
+ return NULL;
+}
+
/**
* Check if the flow rule is supported by txgbe.
* It only checkes the format. Don't guarantee the rule can be programmed into
@@ -2688,5 +2944,6 @@ txgbe_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_ops txgbe_flow_ops = {
.validate = txgbe_flow_validate,
+ .create = txgbe_flow_create,
};
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 24/37] net/txgbe: add flow API destroy function
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (22 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 23/37] net/txgbe: add flow API create function Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 25/37] net/txgbe: add flow API flush function Jiawen Wu
` (13 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to destroy operation for flow API.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_flow.c | 128 +++++++++++++++++++++++++++++++++
1 file changed, 128 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 4141352bf..8d5362175 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2942,8 +2942,136 @@ txgbe_flow_validate(struct rte_eth_dev *dev,
return ret;
}
+/* Destroy a flow rule on txgbe. */
+static int
+txgbe_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct txgbe_fdir_rule fdir_rule;
+ struct txgbe_l2_tunnel_conf l2_tn_filter;
+ struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_rss_conf_ele *rss_filter_ptr;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&ntuple_filter,
+ &ntuple_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ntuple_filter));
+ ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(ðertype_filter,
+ ðertype_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ethertype_filter));
+ ret = txgbe_add_del_ethertype_filter(dev,
+ ðertype_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&syn_filter,
+ &syn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_syn_filter));
+ ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
+ rte_memcpy(&fdir_rule,
+ &fdir_rule_ptr->filter_info,
+ sizeof(struct txgbe_fdir_rule));
+ ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ rte_free(fdir_rule_ptr);
+ if (TAILQ_EMPTY(&filter_fdir_list))
+ fdir_info->mask_added = false;
+ }
+ break;
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
+ pmd_flow->rule;
+ rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+ sizeof(struct txgbe_l2_tunnel_conf));
+ ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_HASH:
+ rss_filter_ptr = (struct txgbe_rss_conf_ele *)
+ pmd_flow->rule;
+ ret = txgbe_config_rss_filter(dev,
+ &rss_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr, entries);
+ rte_free(rss_filter_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
+ if (txgbe_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&txgbe_flow_list,
+ txgbe_flow_mem_ptr, entries);
+ rte_free(txgbe_flow_mem_ptr);
+ }
+ }
+ rte_free(flow);
+
+ return ret;
+}
+
const struct rte_flow_ops txgbe_flow_ops = {
.validate = txgbe_flow_validate,
.create = txgbe_flow_create,
+ .destroy = txgbe_flow_destroy,
};
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 25/37] net/txgbe: add flow API flush function
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (23 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 24/37] net/txgbe: add flow API destroy function Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 26/37] net/txgbe: support UDP tunnel port add and delete Jiawen Wu
` (12 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to flush operation for flow API.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/base/txgbe_hw.c | 87 +++++++++++++++++++++++++++++++
drivers/net/txgbe/base/txgbe_hw.h | 1 +
drivers/net/txgbe/txgbe_ethdev.c | 21 ++++++++
drivers/net/txgbe/txgbe_ethdev.h | 2 +
drivers/net/txgbe/txgbe_fdir.c | 47 +++++++++++++++++
drivers/net/txgbe/txgbe_flow.c | 43 +++++++++++++++
6 files changed, 201 insertions(+)
diff --git a/drivers/net/txgbe/base/txgbe_hw.c b/drivers/net/txgbe/base/txgbe_hw.c
index 5ee13b0f8..dc419d7d4 100644
--- a/drivers/net/txgbe/base/txgbe_hw.c
+++ b/drivers/net/txgbe/base/txgbe_hw.c
@@ -3649,6 +3649,93 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw)
return status;
}
+/**
+ * txgbe_fdir_check_cmd_complete - poll to check whether FDIRPICMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+static s32 txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, u32 *fdircmd)
+{
+ int i;
+
+ for (i = 0; i < TXGBE_FDIRCMD_CMD_POLL; i++) {
+ *fdircmd = rd32(hw, TXGBE_FDIRPICMD);
+ if (!(*fdircmd & TXGBE_FDIRPICMD_OP_MASK))
+ return 0;
+ usec_delay(10);
+ }
+
+ return TXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
+ * txgbe_reinit_fdir_tables - Reinitialize Flow Director tables.
+ * @hw: pointer to hardware structure
+ **/
+s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw)
+{
+ s32 err;
+ int i;
+ u32 fdirctrl = rd32(hw, TXGBE_FDIRCTL);
+ u32 fdircmd;
+ fdirctrl &= ~TXGBE_FDIRCTL_INITDONE;
+
+ DEBUGFUNC("txgbe_reinit_fdir_tables");
+
+ /*
+ * Before starting reinitialization process,
+ * FDIRPICMD.OP must be zero.
+ */
+ err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+ if (err) {
+ DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n");
+ return err;
+ }
+
+ wr32(hw, TXGBE_FDIRFREE, 0);
+ txgbe_flush(hw);
+ /*
+ * adapters flow director init flow cannot be restarted,
+ * Workaround silicon errata by performing the following steps
+ * before re-writing the FDIRCTL control register with the same value.
+ * - write 1 to bit 8 of FDIRPICMD register &
+ * - write 0 to bit 8 of FDIRPICMD register
+ */
+ wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, TXGBE_FDIRPICMD_CLR);
+ txgbe_flush(hw);
+ wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, 0);
+ txgbe_flush(hw);
+ /*
+ * Clear FDIR Hash register to clear any leftover hashes
+ * waiting to be programmed.
+ */
+ wr32(hw, TXGBE_FDIRPIHASH, 0x00);
+ txgbe_flush(hw);
+
+ wr32(hw, TXGBE_FDIRCTL, fdirctrl);
+ txgbe_flush(hw);
+
+ /* Poll init-done after we write FDIRCTL register */
+ for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+ if (rd32m(hw, TXGBE_FDIRCTL, TXGBE_FDIRCTL_INITDONE))
+ break;
+ msec_delay(1);
+ }
+ if (i >= TXGBE_FDIR_INIT_DONE_POLL) {
+ DEBUGOUT("Flow Director Signature poll time exceeded!\n");
+ return TXGBE_ERR_FDIR_REINIT_FAILED;
+ }
+
+ /* Clear FDIR statistics registers (read to clear) */
+ rd32(hw, TXGBE_FDIRUSED);
+ rd32(hw, TXGBE_FDIRFAIL);
+ rd32(hw, TXGBE_FDIRMATCH);
+ rd32(hw, TXGBE_FDIRMISS);
+ rd32(hw, TXGBE_FDIRLEN);
+
+ return 0;
+}
+
/**
* txgbe_start_hw_raptor - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
diff --git a/drivers/net/txgbe/base/txgbe_hw.h b/drivers/net/txgbe/base/txgbe_hw.h
index 09298ea0c..a7473e7e5 100644
--- a/drivers/net/txgbe/base/txgbe_hw.h
+++ b/drivers/net/txgbe/base/txgbe_hw.h
@@ -108,5 +108,6 @@ s32 txgbe_init_phy_raptor(struct txgbe_hw *hw);
s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval);
s32 txgbe_prot_autoc_read_raptor(struct txgbe_hw *hw, bool *locked, u64 *value);
s32 txgbe_prot_autoc_write_raptor(struct txgbe_hw *hw, bool locked, u64 value);
+s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw);
bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw);
#endif /* _TXGBE_HW_H_ */
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index cc061e0d6..83c078a2a 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -5186,6 +5186,27 @@ txgbe_clear_syn_filter(struct rte_eth_dev *dev)
}
}
+/* remove all the L2 tunnel filters */
+int
+txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+ struct txgbe_l2_tn_filter *l2_tn_filter;
+ struct txgbe_l2_tunnel_conf l2_tn_conf;
+ int ret = 0;
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
+ l2_tn_conf.pool = l2_tn_filter->pool;
+ ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure = txgbe_dev_configure,
.dev_infos_get = txgbe_dev_info_get,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index f439a201b..1df74ab9b 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -488,12 +488,14 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
void txgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+int txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
extern const struct rte_flow_ops txgbe_flow_ops;
void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
+int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
int txgbe_vt_check(struct txgbe_hw *hw);
int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 2342cf681..6ddb023c0 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -902,6 +902,27 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev,
return err;
}
+static int
+txgbe_fdir_flush(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ int ret;
+
+ ret = txgbe_reinit_fdir_tables(hw);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
+ return ret;
+ }
+
+ info->f_add = 0;
+ info->f_remove = 0;
+ info->add = 0;
+ info->remove = 0;
+
+ return ret;
+}
+
/* restore flow director filter */
void
txgbe_fdir_filter_restore(struct rte_eth_dev *dev)
@@ -936,3 +957,29 @@ txgbe_fdir_filter_restore(struct rte_eth_dev *dev)
}
}
+/* remove all the flow director filters */
+int
+txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_fdir_filter *fdir_filter;
+ struct txgbe_fdir_filter *filter_flag;
+ int ret = 0;
+
+ /* flush flow director */
+ rte_hash_reset(fdir_info->hash_handle);
+ memset(fdir_info->hash_map, 0,
+ sizeof(struct txgbe_fdir_filter *) * TXGBE_MAX_FDIR_FILTER_NUM);
+ filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ if (filter_flag != NULL)
+ ret = txgbe_fdir_flush(dev);
+
+ return ret;
+}
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 8d5362175..b5f4073e2 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2555,6 +2555,16 @@ txgbe_parse_rss_filter(struct rte_eth_dev *dev,
return 0;
}
+/* remove the rss filter */
+static void
+txgbe_clear_rss_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+ if (filter_info->rss_info.conf.queue_num)
+ txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
+}
+
void
txgbe_filterlist_init(void)
{
@@ -3069,9 +3079,42 @@ txgbe_flow_destroy(struct rte_eth_dev *dev,
return ret;
}
+/* Destroy all flow rules associated with a port on txgbe. */
+static int
+txgbe_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ txgbe_clear_all_ntuple_filter(dev);
+ txgbe_clear_all_ethertype_filter(dev);
+ txgbe_clear_syn_filter(dev);
+
+ ret = txgbe_clear_all_fdir_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ret = txgbe_clear_all_l2_tn_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ txgbe_clear_rss_filter(dev);
+
+ txgbe_filterlist_flush();
+
+ return 0;
+}
+
const struct rte_flow_ops txgbe_flow_ops = {
.validate = txgbe_flow_validate,
.create = txgbe_flow_create,
.destroy = txgbe_flow_destroy,
+ .flush = txgbe_flow_flush,
};
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 26/37] net/txgbe: support UDP tunnel port add and delete
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (24 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 25/37] net/txgbe: add flow API flush function Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 27/37] net/txgbe: add TM configuration init and uninit Jiawen Wu
` (11 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Support UDP tunnel port add and delete operations.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 105 +++++++++++++++++++++++++++++++
1 file changed, 105 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 83c078a2a..337ebf2e0 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -5038,6 +5038,109 @@ txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
return ret;
}
+/* Add UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret = 0;
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (udp_tunnel->udp_port == 0) {
+ PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
+ wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (udp_tunnel->udp_port == 0) {
+ PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
+ break;
+ case RTE_TUNNEL_TYPE_TEREDO:
+ if (udp_tunnel->udp_port == 0) {
+ PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ txgbe_flush(hw);
+
+ return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret = 0;
+ uint16_t cur_port;
+
+ if (udp_tunnel == NULL)
+ return -EINVAL;
+
+ switch (udp_tunnel->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
+ if (cur_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Port %u does not exist.",
+ udp_tunnel->udp_port);
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_VXLANPORT, 0);
+ wr32(hw, TXGBE_VXLANPORTGPE, 0);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
+ if (cur_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Port %u does not exist.",
+ udp_tunnel->udp_port);
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_GENEVEPORT, 0);
+ break;
+ case RTE_TUNNEL_TYPE_TEREDO:
+ cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
+ if (cur_port != udp_tunnel->udp_port) {
+ PMD_DRV_LOG(ERR, "Port %u does not exist.",
+ udp_tunnel->udp_port);
+ ret = -EINVAL;
+ break;
+ }
+ wr32(hw, TXGBE_TEREDOPORT, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ txgbe_flush(hw);
+
+ return ret;
+}
+
/* restore n-tuple filter */
static inline void
txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
@@ -5281,6 +5384,8 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
.timesync_adjust_time = txgbe_timesync_adjust_time,
.timesync_read_time = txgbe_timesync_read_time,
.timesync_write_time = txgbe_timesync_write_time,
+ .udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del,
.tx_done_cleanup = txgbe_dev_tx_done_cleanup,
};
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 27/37] net/txgbe: add TM configuration init and uninit
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (25 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 26/37] net/txgbe: support UDP tunnel port add and delete Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 28/37] net/txgbe: add TM capabilities get operation Jiawen Wu
` (10 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add traffic manager configuration init and uninit operations.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/meson.build | 1 +
drivers/net/txgbe/txgbe_ethdev.c | 16 +++++++++
drivers/net/txgbe/txgbe_ethdev.h | 60 ++++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_tm.c | 57 ++++++++++++++++++++++++++++++
4 files changed, 134 insertions(+)
create mode 100644 drivers/net/txgbe/txgbe_tm.c
diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build
index bb1683631..352baad8b 100644
--- a/drivers/net/txgbe/meson.build
+++ b/drivers/net/txgbe/meson.build
@@ -11,6 +11,7 @@ sources = files(
'txgbe_ptypes.c',
'txgbe_pf.c',
'txgbe_rxtx.c',
+ 'txgbe_tm.c',
)
deps += ['hash']
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 337ebf2e0..2b73abae6 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -704,6 +704,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
+ /* initialize Traffic Manager configuration */
+ txgbe_tm_conf_init(eth_dev);
+
return 0;
}
@@ -1545,6 +1548,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
int status;
uint16_t vf, idx;
uint32_t *link_speeds;
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
PMD_INIT_FUNC_TRACE();
@@ -1748,6 +1752,11 @@ txgbe_dev_start(struct rte_eth_dev *dev)
txgbe_l2_tunnel_conf(dev);
txgbe_filter_restore(dev);
+ if (tm_conf->root && !tm_conf->committed)
+ PMD_DRV_LOG(WARNING,
+ "please call hierarchy_commit() "
+ "before starting the port");
+
/*
* Update link status right before return, because it may
* start link configuration process in a separate thread.
@@ -1780,6 +1789,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int vf;
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
if (hw->adapter_stopped)
return 0;
@@ -1832,6 +1842,9 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
intr_handle->intr_vec = NULL;
}
+ /* reset hierarchy commit */
+ tm_conf->committed = false;
+
adapter->rss_reta_updated = 0;
wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
@@ -1947,6 +1960,9 @@ txgbe_dev_close(struct rte_eth_dev *dev)
/* clear all the filters list */
txgbe_filterlist_flush();
+ /* Remove all Traffic Manager configuration */
+ txgbe_tm_conf_uninit(dev);
+
return ret;
}
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 1df74ab9b..0f1a39918 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -14,6 +14,7 @@
#include <rte_hash.h>
#include <rte_hash_crc.h>
#include <rte_ethdev.h>
+#include <rte_tm_driver.h>
/* need update link, bit flag */
#define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -273,6 +274,60 @@ struct txgbe_bw_conf {
uint8_t tc_num; /* Number of TCs. */
};
+/* Struct to store Traffic Manager shaper profile. */
+struct txgbe_tm_shaper_profile {
+ TAILQ_ENTRY(txgbe_tm_shaper_profile) node;
+ uint32_t shaper_profile_id;
+ uint32_t reference_count;
+ struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile);
+
+/* Struct to store Traffic Manager node configuration. */
+struct txgbe_tm_node {
+ TAILQ_ENTRY(txgbe_tm_node) node;
+ uint32_t id;
+ uint32_t priority;
+ uint32_t weight;
+ uint32_t reference_count;
+ uint16_t no;
+ struct txgbe_tm_node *parent;
+ struct txgbe_tm_shaper_profile *shaper_profile;
+ struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(txgbe_tm_node_list, txgbe_tm_node);
+
+/* The configuration of Traffic Manager */
+struct txgbe_tm_conf {
+ struct txgbe_shaper_profile_list shaper_profile_list;
+ struct txgbe_tm_node *root; /* root node - port */
+ struct txgbe_tm_node_list tc_list; /* node list for all the TCs */
+ struct txgbe_tm_node_list queue_list; /* node list for all the queues */
+ /**
+ * The number of added TC nodes.
+ * It should be no more than the TC number of this port.
+ */
+ uint32_t nb_tc_node;
+ /**
+ * The number of added queue nodes.
+ * It should be no more than the queue number of this port.
+ */
+ uint32_t nb_queue_node;
+ /**
+ * This flag is used to check if APP can change the TM node
+ * configuration.
+ * When it's true, means the configuration is applied to HW,
+ * APP should not change the configuration.
+ * As we don't support on-the-fly configuration, when starting
+ * the port, APP should call the hierarchy_commit API to set this
+ * flag to true. When stopping the port, this flag should be set
+ * to false.
+ */
+ bool committed;
+};
+
/*
* Structure to store private data for each driver instance (for each port).
*/
@@ -295,6 +350,7 @@ struct txgbe_adapter {
struct rte_timecounter systime_tc;
struct rte_timecounter rx_tstamp_tc;
struct rte_timecounter tx_tstamp_tc;
+ struct txgbe_tm_conf tm_conf;
/* For RSS reta table update */
uint8_t rss_reta_updated;
@@ -345,6 +401,8 @@ struct txgbe_adapter {
#define TXGBE_DEV_BW_CONF(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->bw_conf)
+#define TXGBE_DEV_TM_CONF(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->tm_conf)
/*
* RX/TX function prototypes
@@ -500,6 +558,8 @@ int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
int txgbe_vt_check(struct txgbe_hw *hw);
int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
+void txgbe_tm_conf_init(struct rte_eth_dev *dev);
+void txgbe_tm_conf_uninit(struct rte_eth_dev *dev);
int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t tx_rate);
int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
new file mode 100644
index 000000000..78f426964
--- /dev/null
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <rte_malloc.h>
+
+#include "txgbe_ethdev.h"
+
+void
+txgbe_tm_conf_init(struct rte_eth_dev *dev)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+
+ /* initialize shaper profile list */
+ TAILQ_INIT(&tm_conf->shaper_profile_list);
+
+ /* initialize node configuration */
+ tm_conf->root = NULL;
+ TAILQ_INIT(&tm_conf->queue_list);
+ TAILQ_INIT(&tm_conf->tc_list);
+ tm_conf->nb_tc_node = 0;
+ tm_conf->nb_queue_node = 0;
+ tm_conf->committed = false;
+}
+
+void
+txgbe_tm_conf_uninit(struct rte_eth_dev *dev)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_tm_shaper_profile *shaper_profile;
+ struct txgbe_tm_node *tm_node;
+
+ /* clear node configuration */
+ while ((tm_node = TAILQ_FIRST(&tm_conf->queue_list))) {
+ TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ tm_conf->nb_queue_node = 0;
+ while ((tm_node = TAILQ_FIRST(&tm_conf->tc_list))) {
+ TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+ rte_free(tm_node);
+ }
+ tm_conf->nb_tc_node = 0;
+ if (tm_conf->root) {
+ rte_free(tm_conf->root);
+ tm_conf->root = NULL;
+ }
+
+ /* Remove all shaper profiles */
+ while ((shaper_profile =
+ TAILQ_FIRST(&tm_conf->shaper_profile_list))) {
+ TAILQ_REMOVE(&tm_conf->shaper_profile_list,
+ shaper_profile, node);
+ rte_free(shaper_profile);
+ }
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 28/37] net/txgbe: add TM capabilities get operation
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (26 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 27/37] net/txgbe: add TM configuration init and uninit Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 29/37] net/txgbe: support TM shaper profile add and delete Jiawen Wu
` (9 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to get traffic manager capabilities.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 1 +
drivers/net/txgbe/txgbe_ethdev.h | 9 +
drivers/net/txgbe/txgbe_tm.c | 278 +++++++++++++++++++++++++++++++
3 files changed, 288 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 2b73abae6..80ea1038c 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -5402,6 +5402,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
.timesync_write_time = txgbe_timesync_write_time,
.udp_tunnel_port_add = txgbe_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = txgbe_dev_udp_tunnel_port_del,
+ .tm_ops_get = txgbe_tm_ops_get,
.tx_done_cleanup = txgbe_dev_tx_done_cleanup,
};
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 0f1a39918..cf377809e 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -284,6 +284,14 @@ struct txgbe_tm_shaper_profile {
TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile);
+/* node type of Traffic Manager */
+enum txgbe_tm_node_type {
+ TXGBE_TM_NODE_TYPE_PORT,
+ TXGBE_TM_NODE_TYPE_TC,
+ TXGBE_TM_NODE_TYPE_QUEUE,
+ TXGBE_TM_NODE_TYPE_MAX,
+};
+
/* Struct to store Traffic Manager node configuration. */
struct txgbe_tm_node {
TAILQ_ENTRY(txgbe_tm_node) node;
@@ -558,6 +566,7 @@ int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
int txgbe_vt_check(struct txgbe_hw *hw);
int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
+int txgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
void txgbe_tm_conf_init(struct rte_eth_dev *dev);
void txgbe_tm_conf_uninit(struct rte_eth_dev *dev);
int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 78f426964..545590ba2 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -6,6 +6,36 @@
#include "txgbe_ethdev.h"
+static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error);
+static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error);
+static int txgbe_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error);
+
+const struct rte_tm_ops txgbe_tm_ops = {
+ .capabilities_get = txgbe_tm_capabilities_get,
+ .level_capabilities_get = txgbe_level_capabilities_get,
+ .node_capabilities_get = txgbe_node_capabilities_get,
+};
+
+int
+txgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+ void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ *(const void **)arg = &txgbe_tm_ops;
+
+ return 0;
+}
+
void
txgbe_tm_conf_init(struct rte_eth_dev *dev)
{
@@ -55,3 +85,251 @@ txgbe_tm_conf_uninit(struct rte_eth_dev *dev)
}
}
+static inline uint8_t
+txgbe_tc_nb_get(struct rte_eth_dev *dev)
+{
+ struct rte_eth_conf *eth_conf;
+ uint8_t nb_tcs = 0;
+
+ eth_conf = &dev->data->dev_conf;
+ if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+ } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+ ETH_32_POOLS)
+ nb_tcs = ETH_4_TCS;
+ else
+ nb_tcs = ETH_8_TCS;
+ } else {
+ nb_tcs = 1;
+ }
+
+ return nb_tcs;
+}
+
+static int
+txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint8_t tc_nb = txgbe_tc_nb_get(dev);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (tc_nb > hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ /* set all the parameters to 0 first. */
+ memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+ /**
+ * here is the max capability not the current configuration.
+ */
+ /* port + TCs + queues */
+ cap->n_nodes_max = 1 + TXGBE_DCB_TC_MAX +
+ hw->mac.max_tx_queues;
+ cap->n_levels_max = 3;
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+ cap->shaper_n_max = cap->n_nodes_max;
+ cap->shaper_private_n_max = cap->n_nodes_max;
+ cap->shaper_private_dual_rate_n_max = 0;
+ cap->shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->shaper_private_rate_max = 1250000000ull;
+ cap->shaper_shared_n_max = 0;
+ cap->shaper_shared_n_nodes_per_shaper_max = 0;
+ cap->shaper_shared_n_shapers_per_node_max = 0;
+ cap->shaper_shared_dual_rate_n_max = 0;
+ cap->shaper_shared_rate_min = 0;
+ cap->shaper_shared_rate_max = 0;
+ cap->sched_n_children_max = hw->mac.max_tx_queues;
+ /**
+ * HW supports SP. But no plan to support it now.
+ * So, all the nodes should have the same priority.
+ */
+ cap->sched_sp_n_priorities_max = 1;
+ cap->sched_wfq_n_children_per_group_max = 0;
+ cap->sched_wfq_n_groups_max = 0;
+ /**
+ * SW only supports fair round robin now.
+ * So, all the nodes should have the same weight.
+ */
+ cap->sched_wfq_weight_max = 1;
+ cap->cman_head_drop_supported = 0;
+ cap->dynamic_update_mask = 0;
+ cap->shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD;
+ cap->shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
+ cap->cman_wred_context_n_max = 0;
+ cap->cman_wred_context_private_n_max = 0;
+ cap->cman_wred_context_shared_n_max = 0;
+ cap->cman_wred_context_shared_n_nodes_per_context_max = 0;
+ cap->cman_wred_context_shared_n_contexts_per_node_max = 0;
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
+static inline struct txgbe_tm_node *
+txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
+ enum txgbe_tm_node_type *node_type)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_tm_node *tm_node;
+
+ if (tm_conf->root && tm_conf->root->id == node_id) {
+ *node_type = TXGBE_TM_NODE_TYPE_PORT;
+ return tm_conf->root;
+ }
+
+ TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = TXGBE_TM_NODE_TYPE_TC;
+ return tm_node;
+ }
+ }
+
+ TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+ if (tm_node->id == node_id) {
+ *node_type = TXGBE_TM_NODE_TYPE_QUEUE;
+ return tm_node;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+txgbe_level_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (level_id >= TXGBE_TM_NODE_TYPE_MAX) {
+ error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+ error->message = "too deep level";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (level_id == TXGBE_TM_NODE_TYPE_PORT) {
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->n_nodes_leaf_max = 0;
+ } else if (level_id == TXGBE_TM_NODE_TYPE_TC) {
+ /* TC */
+ cap->n_nodes_max = TXGBE_DCB_TC_MAX;
+ cap->n_nodes_nonleaf_max = TXGBE_DCB_TC_MAX;
+ cap->n_nodes_leaf_max = 0;
+ } else {
+ /* queue */
+ cap->n_nodes_max = hw->mac.max_tx_queues;
+ cap->n_nodes_nonleaf_max = 0;
+ cap->n_nodes_leaf_max = hw->mac.max_tx_queues;
+ }
+
+ cap->non_leaf_nodes_identical = true;
+ cap->leaf_nodes_identical = true;
+
+ if (level_id != TXGBE_TM_NODE_TYPE_QUEUE) {
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = false;
+ cap->nonleaf.shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->nonleaf.shaper_private_rate_max = 1250000000ull;
+ cap->nonleaf.shaper_shared_n_max = 0;
+ if (level_id == TXGBE_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ TXGBE_DCB_TC_MAX;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->mac.max_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ cap->nonleaf.stats_mask = 0;
+
+ return 0;
+ }
+
+ /* queue node */
+ cap->leaf.shaper_private_supported = true;
+ cap->leaf.shaper_private_dual_rate_supported = false;
+ cap->leaf.shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->leaf.shaper_private_rate_max = 1250000000ull;
+ cap->leaf.shaper_shared_n_max = 0;
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ cap->leaf.stats_mask = 0;
+
+ return 0;
+}
+
+static int
+txgbe_node_capabilities_get(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
+ struct txgbe_tm_node *tm_node;
+
+ if (!cap || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported = false;
+ cap->shaper_private_rate_min = 0;
+ /* 10Gbps -> 1.25GBps */
+ cap->shaper_private_rate_max = 1250000000ull;
+ cap->shaper_shared_n_max = 0;
+
+ if (node_type == TXGBE_TM_NODE_TYPE_QUEUE) {
+ cap->leaf.cman_head_drop_supported = false;
+ cap->leaf.cman_wred_context_private_supported = true;
+ cap->leaf.cman_wred_context_shared_n_max = 0;
+ } else {
+ if (node_type == TXGBE_TM_NODE_TYPE_PORT)
+ cap->nonleaf.sched_n_children_max =
+ TXGBE_DCB_TC_MAX;
+ else
+ cap->nonleaf.sched_n_children_max =
+ hw->mac.max_tx_queues;
+ cap->nonleaf.sched_sp_n_priorities_max = 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max = 0;
+ cap->nonleaf.sched_wfq_n_groups_max = 0;
+ cap->nonleaf.sched_wfq_weight_max = 1;
+ }
+
+ cap->stats_mask = 0;
+
+ return 0;
+}
+
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 29/37] net/txgbe: support TM shaper profile add and delete
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (27 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 28/37] net/txgbe: add TM capabilities get operation Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 30/37] net/txgbe: support TM node " Jiawen Wu
` (8 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Support traffic manager profile add and delete operations.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_tm.c | 129 +++++++++++++++++++++++++++++++++++
1 file changed, 129 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 545590ba2..8adb03825 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -9,6 +9,13 @@
static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
struct rte_tm_capabilities *cap,
struct rte_tm_error *error);
+static int txgbe_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error);
+static int txgbe_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error);
static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
uint32_t level_id,
struct rte_tm_level_capabilities *cap,
@@ -20,6 +27,8 @@ static int txgbe_node_capabilities_get(struct rte_eth_dev *dev,
const struct rte_tm_ops txgbe_tm_ops = {
.capabilities_get = txgbe_tm_capabilities_get,
+ .shaper_profile_add = txgbe_shaper_profile_add,
+ .shaper_profile_delete = txgbe_shaper_profile_del,
.level_capabilities_get = txgbe_level_capabilities_get,
.node_capabilities_get = txgbe_node_capabilities_get,
};
@@ -174,6 +183,126 @@ txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
return 0;
}
+static inline struct txgbe_tm_shaper_profile *
+txgbe_shaper_profile_search(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_shaper_profile_list *shaper_profile_list =
+ &tm_conf->shaper_profile_list;
+ struct txgbe_tm_shaper_profile *shaper_profile;
+
+ TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+ if (shaper_profile_id == shaper_profile->shaper_profile_id)
+ return shaper_profile;
+ }
+
+ return NULL;
+}
+
+static int
+txgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ /* min rate not supported */
+ if (profile->committed.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "committed rate not supported";
+ return -EINVAL;
+ }
+ /* min bucket size not supported */
+ if (profile->committed.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ error->message = "committed bucket size not supported";
+ return -EINVAL;
+ }
+ /* max bucket size not supported */
+ if (profile->peak.size) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ error->message = "peak bucket size not supported";
+ return -EINVAL;
+ }
+ /* length adjustment not supported */
+ if (profile->pkt_length_adjust) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ error->message = "packet length adjustment not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+txgbe_shaper_profile_add(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_tm_shaper_profile *shaper_profile;
+ int ret;
+
+ if (!profile || !error)
+ return -EINVAL;
+
+ ret = txgbe_shaper_profile_param_check(profile, error);
+ if (ret)
+ return ret;
+
+ shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id);
+
+ if (shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID exist";
+ return -EINVAL;
+ }
+
+ shaper_profile = rte_zmalloc("txgbe_tm_shaper_profile",
+ sizeof(struct txgbe_tm_shaper_profile),
+ 0);
+ if (!shaper_profile)
+ return -ENOMEM;
+ shaper_profile->shaper_profile_id = shaper_profile_id;
+ rte_memcpy(&shaper_profile->profile, profile,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
+ shaper_profile, node);
+
+ return 0;
+}
+
+static int
+txgbe_shaper_profile_del(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_tm_shaper_profile *shaper_profile;
+
+ if (!error)
+ return -EINVAL;
+
+ shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id);
+
+ if (!shaper_profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "profile ID not exist";
+ return -EINVAL;
+ }
+
+ /* don't delete a profile if it's used by one or several nodes */
+ if (shaper_profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "profile in use";
+ return -EINVAL;
+ }
+
+ TAILQ_REMOVE(&tm_conf->shaper_profile_list, shaper_profile, node);
+ rte_free(shaper_profile);
+
+ return 0;
+}
+
static inline struct txgbe_tm_node *
txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
enum txgbe_tm_node_type *node_type)
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 30/37] net/txgbe: support TM node add and delete
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (28 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 29/37] net/txgbe: support TM shaper profile add and delete Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 31/37] net/txgbe: add TM hierarchy commit Jiawen Wu
` (7 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Support traffic manager node add and delete operations.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.h | 1 +
drivers/net/txgbe/txgbe_tm.c | 488 +++++++++++++++++++++++++++++++
2 files changed, 489 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index cf377809e..1d90c6a06 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -14,6 +14,7 @@
#include <rte_hash.h>
#include <rte_hash_crc.h>
#include <rte_ethdev.h>
+#include <rte_bus_pci.h>
#include <rte_tm_driver.h>
/* need update link, bit flag */
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 8adb03825..6dd593e54 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -16,6 +16,15 @@ static int txgbe_shaper_profile_add(struct rte_eth_dev *dev,
static int txgbe_shaper_profile_del(struct rte_eth_dev *dev,
uint32_t shaper_profile_id,
struct rte_tm_error *error);
+static int txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+static int txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error);
+static int txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error);
static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
uint32_t level_id,
struct rte_tm_level_capabilities *cap,
@@ -29,6 +38,9 @@ const struct rte_tm_ops txgbe_tm_ops = {
.capabilities_get = txgbe_tm_capabilities_get,
.shaper_profile_add = txgbe_shaper_profile_add,
.shaper_profile_delete = txgbe_shaper_profile_del,
+ .node_add = txgbe_node_add,
+ .node_delete = txgbe_node_delete,
+ .node_type_get = txgbe_node_type_get,
.level_capabilities_get = txgbe_level_capabilities_get,
.node_capabilities_get = txgbe_node_capabilities_get,
};
@@ -332,6 +344,482 @@ txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t node_id,
return NULL;
}
+static void
+txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
+ uint16_t *base, uint16_t *nb)
+{
+ uint8_t nb_tcs = txgbe_tc_nb_get(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ uint16_t vf_num = pci_dev->max_vfs;
+
+ *base = 0;
+ *nb = 0;
+
+ /* VT on */
+ if (vf_num) {
+ /* no DCB */
+ if (nb_tcs == 1) {
+ if (vf_num >= ETH_32_POOLS) {
+ *nb = 2;
+ *base = vf_num * 2;
+ } else if (vf_num >= ETH_16_POOLS) {
+ *nb = 4;
+ *base = vf_num * 4;
+ } else {
+ *nb = 8;
+ *base = vf_num * 8;
+ }
+ } else {
+ /* DCB */
+ *nb = 1;
+ *base = vf_num * nb_tcs + tc_node_no;
+ }
+ } else {
+ /* VT off */
+ if (nb_tcs == ETH_8_TCS) {
+ switch (tc_node_no) {
+ case 0:
+ *base = 0;
+ *nb = 32;
+ break;
+ case 1:
+ *base = 32;
+ *nb = 32;
+ break;
+ case 2:
+ *base = 64;
+ *nb = 16;
+ break;
+ case 3:
+ *base = 80;
+ *nb = 16;
+ break;
+ case 4:
+ *base = 96;
+ *nb = 8;
+ break;
+ case 5:
+ *base = 104;
+ *nb = 8;
+ break;
+ case 6:
+ *base = 112;
+ *nb = 8;
+ break;
+ case 7:
+ *base = 120;
+ *nb = 8;
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (tc_node_no) {
+ /**
+ * If no VF and no DCB, only 64 queues can be used.
+ * This case also be covered by this "case 0".
+ */
+ case 0:
+ *base = 0;
+ *nb = 64;
+ break;
+ case 1:
+ *base = 64;
+ *nb = 32;
+ break;
+ case 2:
+ *base = 96;
+ *nb = 16;
+ break;
+ case 3:
+ *base = 112;
+ *nb = 16;
+ break;
+ default:
+ return;
+ }
+ }
+ }
+}
+
+static int
+txgbe_node_param_check(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t priority, uint32_t weight,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ if (priority) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority should be 0";
+ return -EINVAL;
+ }
+
+ if (weight != 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "weight must be 1";
+ return -EINVAL;
+ }
+
+ /* not support shared shaper */
+ if (params->shared_shaper_id) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+ if (params->n_shared_shapers) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS;
+ error->message = "shared shaper not supported";
+ return -EINVAL;
+ }
+
+ /* for non-leaf node */
+ if (node_id >= dev->data->nb_tx_queues) {
+ /* check the unsupported parameters */
+ if (params->nonleaf.wfq_weight_mode) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFQ not supported";
+ return -EINVAL;
+ }
+ if (params->nonleaf.n_sp_priorities != 1) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES;
+ error->message = "SP priority not supported";
+ return -EINVAL;
+ } else if (params->nonleaf.wfq_weight_mode &&
+ !(*params->nonleaf.wfq_weight_mode)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE;
+ error->message = "WFP should be byte mode";
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* for leaf node */
+ /* check the unsupported parameters */
+ if (params->leaf.cman) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN;
+ error->message = "Congestion management not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.wred_profile_id !=
+ RTE_TM_WRED_PROFILE_ID_NONE) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.shared_wred_context_id) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+ if (params->leaf.wred.n_shared_wred_contexts) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS;
+ error->message = "WRED not supported";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Now the TC and queue configuration is controlled by DCB.
+ * We need check if the node configuration follows the DCB configuration.
+ * In the future, we may use TM to cover DCB.
+ */
+static int
+txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
+ enum txgbe_tm_node_type parent_node_type = TXGBE_TM_NODE_TYPE_MAX;
+ struct txgbe_tm_shaper_profile *shaper_profile = NULL;
+ struct txgbe_tm_node *tm_node;
+ struct txgbe_tm_node *parent_node;
+ uint8_t nb_tcs;
+ uint16_t q_base = 0;
+ uint16_t q_nb = 0;
+ int ret;
+
+ if (!params || !error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (tm_conf->committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ ret = txgbe_node_param_check(dev, node_id, priority, weight,
+ params, error);
+ if (ret)
+ return ret;
+
+ /* check if the node ID is already used */
+ if (txgbe_tm_node_search(dev, node_id, &node_type)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node id already used";
+ return -EINVAL;
+ }
+
+ /* check the shaper profile id */
+ if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ shaper_profile = txgbe_shaper_profile_search(dev,
+ params->shaper_profile_id);
+ if (!shaper_profile) {
+ error->type =
+ RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID;
+ error->message = "shaper profile not exist";
+ return -EINVAL;
+ }
+ }
+
+ /* root node if not have a parent */
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id > TXGBE_TM_NODE_TYPE_PORT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* obviously no more than one root */
+ if (tm_conf->root) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "already have a root";
+ return -EINVAL;
+ }
+
+ /* add the root node */
+ tm_node = rte_zmalloc("txgbe_tm_node",
+ sizeof(struct txgbe_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->no = 0;
+ tm_node->parent = NULL;
+ tm_node->shaper_profile = shaper_profile;
+ rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ tm_conf->root = tm_node;
+
+ /* increase the reference counter of the shaper profile */
+ if (shaper_profile)
+ shaper_profile->reference_count++;
+
+ return 0;
+ }
+
+ /* TC or queue node */
+ /* check the parent node */
+ parent_node = txgbe_tm_node_search(dev, parent_node_id,
+ &parent_node_type);
+ if (!parent_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent not exist";
+ return -EINVAL;
+ }
+ if (parent_node_type != TXGBE_TM_NODE_TYPE_PORT &&
+ parent_node_type != TXGBE_TM_NODE_TYPE_TC) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "parent is not port or TC";
+ return -EINVAL;
+ }
+ /* check level */
+ if (level_id != RTE_TM_NODE_LEVEL_ID_ANY &&
+ level_id != parent_node_type + 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARAMS;
+ error->message = "Wrong level";
+ return -EINVAL;
+ }
+
+ /* check the node number */
+ if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
+ /* check TC number */
+ nb_tcs = txgbe_tc_nb_get(dev);
+ if (tm_conf->nb_tc_node >= nb_tcs) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many TCs";
+ return -EINVAL;
+ }
+ } else {
+ /* check queue number */
+ if (tm_conf->nb_queue_node >= dev->data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues";
+ return -EINVAL;
+ }
+
+ txgbe_queue_base_nb_get(dev, parent_node->no, &q_base, &q_nb);
+ if (parent_node->reference_count >= q_nb) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too many queues than TC supported";
+ return -EINVAL;
+ }
+
+ /**
+ * check the node id.
+ * For queue, the node id means queue id.
+ */
+ if (node_id >= dev->data->nb_tx_queues) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "too large queue id";
+ return -EINVAL;
+ }
+ }
+
+ /* add the TC or queue node */
+ tm_node = rte_zmalloc("txgbe_tm_node",
+ sizeof(struct txgbe_tm_node),
+ 0);
+ if (!tm_node)
+ return -ENOMEM;
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->reference_count = 0;
+ tm_node->parent = parent_node;
+ tm_node->shaper_profile = shaper_profile;
+ rte_memcpy(&tm_node->params, params,
+ sizeof(struct rte_tm_node_params));
+ if (parent_node_type == TXGBE_TM_NODE_TYPE_PORT) {
+ tm_node->no = parent_node->reference_count;
+ TAILQ_INSERT_TAIL(&tm_conf->tc_list,
+ tm_node, node);
+ tm_conf->nb_tc_node++;
+ } else {
+ tm_node->no = q_base + parent_node->reference_count;
+ TAILQ_INSERT_TAIL(&tm_conf->queue_list,
+ tm_node, node);
+ tm_conf->nb_queue_node++;
+ }
+ tm_node->parent->reference_count++;
+
+ /* increase the reference counter of the shaper profile */
+ if (shaper_profile)
+ shaper_profile->reference_count++;
+
+ return 0;
+}
+
+static int
+txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
+ struct txgbe_tm_node *tm_node;
+
+ if (!error)
+ return -EINVAL;
+
+ /* if already committed */
+ if (tm_conf->committed) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "already committed";
+ return -EINVAL;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check the if the node id exists */
+ tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* the node should have no child */
+ if (tm_node->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message =
+ "cannot delete a node which has children";
+ return -EINVAL;
+ }
+
+ /* root node */
+ if (node_type == TXGBE_TM_NODE_TYPE_PORT) {
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
+ rte_free(tm_node);
+ tm_conf->root = NULL;
+ return 0;
+ }
+
+ /* TC or queue node */
+ if (tm_node->shaper_profile)
+ tm_node->shaper_profile->reference_count--;
+ tm_node->parent->reference_count--;
+ if (node_type == TXGBE_TM_NODE_TYPE_TC) {
+ TAILQ_REMOVE(&tm_conf->tc_list, tm_node, node);
+ tm_conf->nb_tc_node--;
+ } else {
+ TAILQ_REMOVE(&tm_conf->queue_list, tm_node, node);
+ tm_conf->nb_queue_node--;
+ }
+ rte_free(tm_node);
+
+ return 0;
+}
+
+static int
+txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ enum txgbe_tm_node_type node_type = TXGBE_TM_NODE_TYPE_MAX;
+ struct txgbe_tm_node *tm_node;
+
+ if (!is_leaf || !error)
+ return -EINVAL;
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ /* check if the node id exists */
+ tm_node = txgbe_tm_node_search(dev, node_id, &node_type);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (node_type == TXGBE_TM_NODE_TYPE_QUEUE)
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
static int
txgbe_level_capabilities_get(struct rte_eth_dev *dev,
uint32_t level_id,
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 31/37] net/txgbe: add TM hierarchy commit
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (29 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 30/37] net/txgbe: support TM node " Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 32/37] net/txgbe: add macsec setting Jiawen Wu
` (6 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add traffic manager hierarchy commit.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_tm.c | 70 ++++++++++++++++++++++++++++++++++++
1 file changed, 70 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 6dd593e54..b8edd78bf 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -33,6 +33,9 @@ static int txgbe_node_capabilities_get(struct rte_eth_dev *dev,
uint32_t node_id,
struct rte_tm_node_capabilities *cap,
struct rte_tm_error *error);
+static int txgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
const struct rte_tm_ops txgbe_tm_ops = {
.capabilities_get = txgbe_tm_capabilities_get,
@@ -43,6 +46,7 @@ const struct rte_tm_ops txgbe_tm_ops = {
.node_type_get = txgbe_node_type_get,
.level_capabilities_get = txgbe_level_capabilities_get,
.node_capabilities_get = txgbe_node_capabilities_get,
+ .hierarchy_commit = txgbe_hierarchy_commit,
};
int
@@ -950,3 +954,69 @@ txgbe_node_capabilities_get(struct rte_eth_dev *dev,
return 0;
}
+static int
+txgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_tm_node *tm_node;
+ uint64_t bw;
+ int ret;
+
+ if (!error)
+ return -EINVAL;
+
+ /* check the setting */
+ if (!tm_conf->root)
+ goto done;
+
+ /* not support port max bandwidth yet */
+ if (tm_conf->root->shaper_profile &&
+ tm_conf->root->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no port max bandwidth";
+ goto fail_clear;
+ }
+
+ /* HW not support TC max bandwidth */
+ TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+ if (tm_node->shaper_profile &&
+ tm_node->shaper_profile->profile.peak.rate) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "no TC max bandwidth";
+ goto fail_clear;
+ }
+ }
+
+ /* queue max bandwidth */
+ TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+ if (tm_node->shaper_profile)
+ bw = tm_node->shaper_profile->profile.peak.rate;
+ else
+ bw = 0;
+ if (bw) {
+ /* interpret Bps to Mbps */
+ bw = bw * 8 / 1000 / 1000;
+ ret = txgbe_set_queue_rate_limit(dev, tm_node->no, bw);
+ if (ret) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message =
+ "failed to set queue max bandwidth";
+ goto fail_clear;
+ }
+ }
+ }
+
+done:
+ tm_conf->committed = true;
+ return 0;
+
+fail_clear:
+ /* clear all the traffic manager configuration */
+ if (clear_on_fail) {
+ txgbe_tm_conf_uninit(dev);
+ txgbe_tm_conf_init(dev);
+ }
+ return -EINVAL;
+}
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 32/37] net/txgbe: add macsec setting
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (30 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 31/37] net/txgbe: add TM hierarchy commit Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 33/37] net/txgbe: add IPsec context creation Jiawen Wu
` (5 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add macsec register enable and setting reset operations.
Add macsec offload suuport.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
doc/guides/nics/features/txgbe.ini | 1 +
drivers/net/txgbe/txgbe_ethdev.c | 87 ++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ethdev.h | 17 ++++++
drivers/net/txgbe/txgbe_rxtx.c | 3 ++
4 files changed, 108 insertions(+)
diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index 9db2ccde0..d6705f7aa 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -34,6 +34,7 @@ VLAN offload = P
QinQ offload = P
L3 checksum offload = P
L4 checksum offload = P
+MACsec offload = P
Inner L3 checksum = P
Inner L4 checksum = P
Packet type parsing = Y
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 80ea1038c..c92a4aa5f 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -487,6 +487,8 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
PMD_INIT_FUNC_TRACE();
+ txgbe_dev_macsec_setting_reset(eth_dev);
+
eth_dev->dev_ops = &txgbe_eth_dev_ops;
eth_dev->rx_queue_count = txgbe_dev_rx_queue_count;
eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
@@ -1549,6 +1551,8 @@ txgbe_dev_start(struct rte_eth_dev *dev)
uint16_t vf, idx;
uint32_t *link_speeds;
struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+ struct txgbe_macsec_setting *macsec_setting =
+ TXGBE_DEV_MACSEC_SETTING(dev);
PMD_INIT_FUNC_TRACE();
@@ -1763,6 +1767,10 @@ txgbe_dev_start(struct rte_eth_dev *dev)
*/
txgbe_dev_link_update(dev, 0);
+ /* setup the macsec ctrl register */
+ if (macsec_setting->offload_en)
+ txgbe_dev_macsec_register_enable(dev, macsec_setting);
+
wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
txgbe_read_stats_registers(hw, hw_stats);
@@ -5326,6 +5334,85 @@ txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
return 0;
}
+void
+txgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev)
+{
+ struct txgbe_macsec_setting *macsec = TXGBE_DEV_MACSEC_SETTING(dev);
+
+ macsec->offload_en = 0;
+ macsec->encrypt_en = 0;
+ macsec->replayprotect_en = 0;
+}
+
+void
+txgbe_dev_macsec_register_enable(struct rte_eth_dev *dev,
+ struct txgbe_macsec_setting *macsec_setting)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t ctrl;
+ uint8_t en = macsec_setting->encrypt_en;
+ uint8_t rp = macsec_setting->replayprotect_en;
+
+ /**
+ * Workaround:
+ * As no txgbe_disable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ * The hardware support has been checked by
+ * txgbe_disable_sec_rx_path().
+ */
+ txgbe_disable_sec_tx_path(hw);
+
+ /* Enable Ethernet CRC (required by MACsec offload) */
+ ctrl = rd32(hw, TXGBE_SECRXCTL);
+ ctrl |= TXGBE_SECRXCTL_CRCSTRIP;
+ wr32(hw, TXGBE_SECRXCTL, ctrl);
+
+ /* Enable the TX and RX crypto engines */
+ ctrl = rd32(hw, TXGBE_SECTXCTL);
+ ctrl &= ~TXGBE_SECTXCTL_XDSA;
+ wr32(hw, TXGBE_SECTXCTL, ctrl);
+
+ ctrl = rd32(hw, TXGBE_SECRXCTL);
+ ctrl &= ~TXGBE_SECRXCTL_XDSA;
+ wr32(hw, TXGBE_SECRXCTL, ctrl);
+
+ ctrl = rd32(hw, TXGBE_SECTXIFG);
+ ctrl &= ~TXGBE_SECTXIFG_MIN_MASK;
+ ctrl |= TXGBE_SECTXIFG_MIN(0x3);
+ wr32(hw, TXGBE_SECTXIFG, ctrl);
+
+ /* Enable SA lookup */
+ ctrl = rd32(hw, TXGBE_LSECTXCTL);
+ ctrl &= ~TXGBE_LSECTXCTL_MODE_MASK;
+ ctrl |= en ? TXGBE_LSECTXCTL_MODE_AENC : TXGBE_LSECTXCTL_MODE_AUTH;
+ ctrl &= ~TXGBE_LSECTXCTL_PNTRH_MASK;
+ ctrl |= TXGBE_LSECTXCTL_PNTRH(TXGBE_MACSEC_PNTHRSH);
+ wr32(hw, TXGBE_LSECTXCTL, ctrl);
+
+ ctrl = rd32(hw, TXGBE_LSECRXCTL);
+ ctrl &= ~TXGBE_LSECRXCTL_MODE_MASK;
+ ctrl |= TXGBE_LSECRXCTL_MODE_STRICT;
+ ctrl &= ~TXGBE_LSECRXCTL_POSTHDR;
+ if (rp)
+ ctrl |= TXGBE_LSECRXCTL_REPLAY;
+ else
+ ctrl &= ~TXGBE_LSECRXCTL_REPLAY;
+ wr32(hw, TXGBE_LSECRXCTL, ctrl);
+
+ /* Start the data paths */
+ txgbe_enable_sec_rx_path(hw);
+ /**
+ * Workaround:
+ * As no txgbe_enable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ */
+ txgbe_enable_sec_tx_path(hw);
+}
+
static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure = txgbe_dev_configure,
.dev_infos_get = txgbe_dev_info_get,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 1d90c6a06..2d15e1ac3 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -62,6 +62,8 @@
#define TXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define TXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+#define TXGBE_MACSEC_PNTHRSH 0xFFFFFE00
+
#define TXGBE_MAX_FDIR_FILTER_NUM (1024 * 32)
#define TXGBE_MAX_L2_TN_FILTER_NUM 128
@@ -270,6 +272,12 @@ struct rte_flow {
void *rule;
};
+struct txgbe_macsec_setting {
+ uint8_t offload_en;
+ uint8_t encrypt_en;
+ uint8_t replayprotect_en;
+};
+
/* The configuration of bandwidth */
struct txgbe_bw_conf {
uint8_t tc_num; /* Number of TCs. */
@@ -343,6 +351,7 @@ struct txgbe_tm_conf {
struct txgbe_adapter {
struct txgbe_hw hw;
struct txgbe_hw_stats stats;
+ struct txgbe_macsec_setting macsec_setting;
struct txgbe_hw_fdir_info fdir;
struct txgbe_interrupt intr;
struct txgbe_stat_mappings stat_mappings;
@@ -374,6 +383,9 @@ struct txgbe_adapter {
#define TXGBE_DEV_STATS(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->stats)
+#define TXGBE_DEV_MACSEC_SETTING(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->macsec_setting)
+
#define TXGBE_DEV_INTR(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->intr)
@@ -579,6 +591,11 @@ int txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
int txgbe_config_rss_filter(struct rte_eth_dev *dev,
struct txgbe_rte_flow_rss_conf *conf, bool add);
+void txgbe_dev_macsec_register_enable(struct rte_eth_dev *dev,
+ struct txgbe_macsec_setting *macsec_setting);
+
+void txgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev);
+
static inline int
txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info,
uint16_t ethertype)
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index babda3a79..f99924a3f 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -56,6 +56,9 @@ static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
PKT_TX_L4_MASK |
PKT_TX_TCP_SEG |
PKT_TX_TUNNEL_MASK |
+#ifdef RTE_LIBRTE_MACSEC
+ PKT_TX_MACSEC |
+#endif
PKT_TX_OUTER_IP_CKSUM |
TXGBE_TX_IEEE1588_TMST);
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 33/37] net/txgbe: add IPsec context creation
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (31 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 32/37] net/txgbe: add macsec setting Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 34/37] net/txgbe: add security session create operation Jiawen Wu
` (4 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Initialize securiry context, and add support to get
security capabilities.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
doc/guides/nics/features/txgbe.ini | 1 +
drivers/net/txgbe/meson.build | 3 +-
drivers/net/txgbe/txgbe_ethdev.c | 13 +++
drivers/net/txgbe/txgbe_ethdev.h | 3 +
drivers/net/txgbe/txgbe_ipsec.c | 181 +++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ipsec.h | 13 +++
6 files changed, 213 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/txgbe/txgbe_ipsec.c
create mode 100644 drivers/net/txgbe/txgbe_ipsec.h
diff --git a/doc/guides/nics/features/txgbe.ini b/doc/guides/nics/features/txgbe.ini
index d6705f7aa..54daa382a 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -29,6 +29,7 @@ Flow control = Y
Flow API = Y
Rate limitation = Y
Traffic mirroring = Y
+Inline crypto = Y
CRC offload = P
VLAN offload = P
QinQ offload = P
diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build
index 352baad8b..f6a51a998 100644
--- a/drivers/net/txgbe/meson.build
+++ b/drivers/net/txgbe/meson.build
@@ -8,13 +8,14 @@ sources = files(
'txgbe_ethdev.c',
'txgbe_fdir.c',
'txgbe_flow.c',
+ 'txgbe_ipsec.c',
'txgbe_ptypes.c',
'txgbe_pf.c',
'txgbe_rxtx.c',
'txgbe_tm.c',
)
-deps += ['hash']
+deps += ['hash', 'security']
includes += include_directories('base')
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index c92a4aa5f..c0582e264 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -16,6 +16,9 @@
#include <rte_memory.h>
#include <rte_eal.h>
#include <rte_alarm.h>
+#ifdef RTE_LIB_SECURITY
+#include <rte_security_driver.h>
+#endif
#include "txgbe_logs.h"
#include "base/txgbe.h"
@@ -549,6 +552,12 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
/* Unlock any pending hardware semaphore */
txgbe_swfw_lock_reset(hw);
+#ifdef RTE_LIB_SECURITY
+ /* Initialize security_ctx only for primary process*/
+ if (txgbe_ipsec_ctx_create(eth_dev))
+ return -ENOMEM;
+#endif
+
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
txgbe_dcb_init(hw, dcb_config);
@@ -1971,6 +1980,10 @@ txgbe_dev_close(struct rte_eth_dev *dev)
/* Remove all Traffic Manager configuration */
txgbe_tm_conf_uninit(dev);
+#ifdef RTE_LIB_SECURITY
+ rte_free(dev->security_ctx);
+#endif
+
return ret;
}
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 2d15e1ac3..1e7fa1f87 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -9,6 +9,9 @@
#include "base/txgbe.h"
#include "txgbe_ptypes.h"
+#ifdef RTE_LIB_SECURITY
+#include "txgbe_ipsec.h"
+#endif
#include <rte_flow.h>
#include <rte_time.h>
#include <rte_hash.h>
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
new file mode 100644
index 000000000..b21bba237
--- /dev/null
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include <rte_ethdev_pci.h>
+#include <rte_ip.h>
+#include <rte_jhash.h>
+#include <rte_security_driver.h>
+#include <rte_cryptodev.h>
+#include <rte_flow.h>
+
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "txgbe_ipsec.h"
+
+static const struct rte_security_capability *
+txgbe_crypto_capabilities_get(void *device __rte_unused)
+{
+ static const struct rte_cryptodev_capabilities
+ aes_gcm_gmac_crypto_capabilities[] = {
+ { /* AES GMAC (128-bit) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (128-bit) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ {
+ .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
+ }, }
+ },
+ };
+
+ static const struct rte_security_capability
+ txgbe_security_capabilities[] = {
+ { /* IPsec Inline Crypto ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Crypto ESP Transport Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = 0
+ },
+ { /* IPsec Inline Crypto ESP Tunnel Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Crypto ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ {.ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ } },
+ .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+ .ol_flags = 0
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+ };
+
+ return txgbe_security_capabilities;
+}
+
+static struct rte_security_ops txgbe_security_ops = {
+ .capabilities_get = txgbe_crypto_capabilities_get
+};
+
+static int
+txgbe_crypto_capable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t reg_i, reg, capable = 1;
+ /* test if rx crypto can be enabled and then write back initial value*/
+ reg_i = rd32(hw, TXGBE_SECRXCTL);
+ wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
+ reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
+ if (reg != 0)
+ capable = 0;
+ wr32(hw, TXGBE_SECRXCTL, reg_i);
+ return capable;
+}
+
+int
+txgbe_ipsec_ctx_create(struct rte_eth_dev *dev)
+{
+ struct rte_security_ctx *ctx = NULL;
+
+ if (txgbe_crypto_capable(dev)) {
+ ctx = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (ctx) {
+ ctx->device = (void *)dev;
+ ctx->ops = &txgbe_security_ops;
+ ctx->sess_cnt = 0;
+ dev->security_ctx = ctx;
+ } else {
+ return -ENOMEM;
+ }
+ }
+ if (rte_security_dynfield_register() < 0)
+ return -rte_errno;
+ return 0;
+}
diff --git a/drivers/net/txgbe/txgbe_ipsec.h b/drivers/net/txgbe/txgbe_ipsec.h
new file mode 100644
index 000000000..f58ebab3d
--- /dev/null
+++ b/drivers/net/txgbe/txgbe_ipsec.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#ifndef TXGBE_IPSEC_H_
+#define TXGBE_IPSEC_H_
+
+#include <rte_ethdev_core.h>
+#include <rte_security.h>
+
+int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
+
+#endif /*TXGBE_IPSEC_H_*/
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 34/37] net/txgbe: add security session create operation
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (32 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 33/37] net/txgbe: add IPsec context creation Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 35/37] net/txgbe: support security session destroy Jiawen Wu
` (3 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to configure a security session.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.h | 6 +
drivers/net/txgbe/txgbe_ipsec.c | 250 +++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ipsec.h | 66 ++++++++
3 files changed, 322 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 1e7fa1f87..eb68e12b2 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -367,6 +367,9 @@ struct txgbe_adapter {
struct txgbe_filter_info filter;
struct txgbe_l2_tn_info l2_tn;
struct txgbe_bw_conf bw_conf;
+#ifdef RTE_LIB_SECURITY
+ struct txgbe_ipsec ipsec;
+#endif
bool rx_bulk_alloc_allowed;
struct rte_timecounter systime_tc;
struct rte_timecounter rx_tstamp_tc;
@@ -428,6 +431,9 @@ struct txgbe_adapter {
#define TXGBE_DEV_TM_CONF(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->tm_conf)
+#define TXGBE_DEV_IPSEC(dev) \
+ (&((struct txgbe_adapter *)(dev)->data->dev_private)->ipsec)
+
/*
* RX/TX function prototypes
*/
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index b21bba237..7501e25af 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -13,6 +13,255 @@
#include "txgbe_ethdev.h"
#include "txgbe_ipsec.h"
+#define CMP_IP(a, b) (\
+ (a).ipv6[0] == (b).ipv6[0] && \
+ (a).ipv6[1] == (b).ipv6[1] && \
+ (a).ipv6[2] == (b).ipv6[2] && \
+ (a).ipv6[3] == (b).ipv6[3])
+
+static int
+txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session)
+{
+ struct rte_eth_dev *dev = ic_session->dev;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
+ uint32_t reg_val;
+ int sa_index = -1;
+
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ int i, ip_index = -1;
+ uint8_t *key;
+
+ /* Find a match in the IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (CMP_IP(priv->rx_ip_tbl[i].ip,
+ ic_session->dst_ip)) {
+ ip_index = i;
+ break;
+ }
+ }
+ /* If no match, find a free entry in the IP table*/
+ if (ip_index < 0) {
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (priv->rx_ip_tbl[i].ref_count == 0) {
+ ip_index = i;
+ break;
+ }
+ }
+ }
+
+ /* Fail if no match and no free entries*/
+ if (ip_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Rx IP table\n");
+ return -1;
+ }
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->rx_sa_tbl[i].used == 0) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no free entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Rx SA table\n");
+ return -1;
+ }
+
+ priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
+ ic_session->dst_ip.ipv6[0];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
+ ic_session->dst_ip.ipv6[1];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
+ ic_session->dst_ip.ipv6[2];
+ priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
+ ic_session->dst_ip.ipv6[3];
+ priv->rx_ip_tbl[ip_index].ref_count++;
+
+ priv->rx_sa_tbl[sa_index].spi = ic_session->spi;
+ priv->rx_sa_tbl[sa_index].ip_index = ip_index;
+ priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION)
+ priv->rx_sa_tbl[sa_index].mode |=
+ (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
+ if (ic_session->dst_ip.type == IPv6) {
+ priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
+ priv->rx_ip_tbl[ip_index].ip.type = IPv6;
+ } else if (ic_session->dst_ip.type == IPv4) {
+ priv->rx_ip_tbl[ip_index].ip.type = IPv4;
+ }
+ priv->rx_sa_tbl[sa_index].used = 1;
+
+ /* write IP table entry*/
+ reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_IP | (ip_index << 3);
+ if (priv->rx_ip_tbl[ip_index].ip.type == IPv4) {
+ wr32(hw, TXGBE_IPSRXADDR(0), 0);
+ wr32(hw, TXGBE_IPSRXADDR(1), 0);
+ wr32(hw, TXGBE_IPSRXADDR(2), 0);
+ wr32(hw, TXGBE_IPSRXADDR(3),
+ priv->rx_ip_tbl[ip_index].ip.ipv4);
+ } else {
+ wr32(hw, TXGBE_IPSRXADDR(0),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[0]);
+ wr32(hw, TXGBE_IPSRXADDR(1),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[1]);
+ wr32(hw, TXGBE_IPSRXADDR(2),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[2]);
+ wr32(hw, TXGBE_IPSRXADDR(3),
+ priv->rx_ip_tbl[ip_index].ip.ipv6[3]);
+ }
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+
+ /* write SPI table entry*/
+ reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
+ wr32(hw, TXGBE_IPSRXSPI,
+ priv->rx_sa_tbl[sa_index].spi);
+ wr32(hw, TXGBE_IPSRXADDRIDX,
+ priv->rx_sa_tbl[sa_index].ip_index);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+
+ /* write Key table entry*/
+ key = malloc(ic_session->key_len);
+ if (!key)
+ return -ENOMEM;
+
+ memcpy(key, ic_session->key, ic_session->key_len);
+
+ reg_val = TXGBE_IPSRXIDX_ENA | TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3);
+ wr32(hw, TXGBE_IPSRXKEY(0),
+ rte_cpu_to_be_32(*(uint32_t *)&key[12]));
+ wr32(hw, TXGBE_IPSRXKEY(1),
+ rte_cpu_to_be_32(*(uint32_t *)&key[8]));
+ wr32(hw, TXGBE_IPSRXKEY(2),
+ rte_cpu_to_be_32(*(uint32_t *)&key[4]));
+ wr32(hw, TXGBE_IPSRXKEY(3),
+ rte_cpu_to_be_32(*(uint32_t *)&key[0]));
+ wr32(hw, TXGBE_IPSRXSALT,
+ rte_cpu_to_be_32(ic_session->salt));
+ wr32(hw, TXGBE_IPSRXMODE,
+ priv->rx_sa_tbl[sa_index].mode);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+
+ free(key);
+ } else { /* sess->dir == RTE_CRYPTO_OUTBOUND */
+ uint8_t *key;
+ int i;
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->tx_sa_tbl[i].used == 0) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no free entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "No free entry left in the Tx SA table\n");
+ return -1;
+ }
+
+ priv->tx_sa_tbl[sa_index].spi =
+ rte_cpu_to_be_32(ic_session->spi);
+ priv->tx_sa_tbl[i].used = 1;
+ ic_session->sa_index = sa_index;
+
+ key = malloc(ic_session->key_len);
+ if (!key)
+ return -ENOMEM;
+
+ memcpy(key, ic_session->key, ic_session->key_len);
+
+ /* write Key table entry*/
+ reg_val = TXGBE_IPSRXIDX_ENA |
+ TXGBE_IPSRXIDX_WRITE | (sa_index << 3);
+ wr32(hw, TXGBE_IPSTXKEY(0),
+ rte_cpu_to_be_32(*(uint32_t *)&key[12]));
+ wr32(hw, TXGBE_IPSTXKEY(1),
+ rte_cpu_to_be_32(*(uint32_t *)&key[8]));
+ wr32(hw, TXGBE_IPSTXKEY(2),
+ rte_cpu_to_be_32(*(uint32_t *)&key[4]));
+ wr32(hw, TXGBE_IPSTXKEY(3),
+ rte_cpu_to_be_32(*(uint32_t *)&key[0]));
+ wr32(hw, TXGBE_IPSTXSALT,
+ rte_cpu_to_be_32(ic_session->salt));
+ wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
+
+ free(key);
+ }
+
+ return 0;
+}
+
+static int
+txgbe_crypto_create_session(void *device,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *session,
+ struct rte_mempool *mempool)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct txgbe_crypto_session *ic_session = NULL;
+ struct rte_crypto_aead_xform *aead_xform;
+ struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
+
+ if (rte_mempool_get(mempool, (void **)&ic_session)) {
+ PMD_DRV_LOG(ERR, "Cannot get object from ic_session mempool");
+ return -ENOMEM;
+ }
+
+ if (conf->crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD ||
+ conf->crypto_xform->aead.algo !=
+ RTE_CRYPTO_AEAD_AES_GCM) {
+ PMD_DRV_LOG(ERR, "Unsupported crypto transformation mode\n");
+ rte_mempool_put(mempool, (void *)ic_session);
+ return -ENOTSUP;
+ }
+ aead_xform = &conf->crypto_xform->aead;
+
+ if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ ic_session->op = TXGBE_OP_AUTHENTICATED_DECRYPTION;
+ } else {
+ PMD_DRV_LOG(ERR, "IPsec decryption not enabled\n");
+ rte_mempool_put(mempool, (void *)ic_session);
+ return -ENOTSUP;
+ }
+ } else {
+ if (dev_conf->txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ ic_session->op = TXGBE_OP_AUTHENTICATED_ENCRYPTION;
+ } else {
+ PMD_DRV_LOG(ERR, "IPsec encryption not enabled\n");
+ rte_mempool_put(mempool, (void *)ic_session);
+ return -ENOTSUP;
+ }
+ }
+
+ ic_session->key = aead_xform->key.data;
+ ic_session->key_len = aead_xform->key.length;
+ memcpy(&ic_session->salt,
+ &aead_xform->key.data[aead_xform->key.length], 4);
+ ic_session->spi = conf->ipsec.spi;
+ ic_session->dev = eth_dev;
+
+ set_sec_session_private_data(session, ic_session);
+
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+ if (txgbe_crypto_add_sa(ic_session)) {
+ PMD_DRV_LOG(ERR, "Failed to add SA\n");
+ rte_mempool_put(mempool, (void *)ic_session);
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
static const struct rte_security_capability *
txgbe_crypto_capabilities_get(void *device __rte_unused)
{
@@ -140,6 +389,7 @@ txgbe_crypto_capabilities_get(void *device __rte_unused)
}
static struct rte_security_ops txgbe_security_ops = {
+ .session_create = txgbe_crypto_create_session,
.capabilities_get = txgbe_crypto_capabilities_get
};
diff --git a/drivers/net/txgbe/txgbe_ipsec.h b/drivers/net/txgbe/txgbe_ipsec.h
index f58ebab3d..c94775636 100644
--- a/drivers/net/txgbe/txgbe_ipsec.h
+++ b/drivers/net/txgbe/txgbe_ipsec.h
@@ -5,9 +5,75 @@
#ifndef TXGBE_IPSEC_H_
#define TXGBE_IPSEC_H_
+#include <rte_ethdev.h>
#include <rte_ethdev_core.h>
#include <rte_security.h>
+#define IPSRXMOD_VALID 0x00000001
+#define IPSRXMOD_PROTO 0x00000004
+#define IPSRXMOD_DECRYPT 0x00000008
+#define IPSRXMOD_IPV6 0x00000010
+
+#define IPSEC_MAX_RX_IP_COUNT 128
+#define IPSEC_MAX_SA_COUNT 1024
+
+enum txgbe_operation {
+ TXGBE_OP_AUTHENTICATED_ENCRYPTION,
+ TXGBE_OP_AUTHENTICATED_DECRYPTION
+};
+
+/**
+ * Generic IP address structure
+ * TODO: Find better location for this rte_net.h possibly.
+ **/
+struct ipaddr {
+ enum ipaddr_type {
+ IPv4,
+ IPv6
+ } type;
+ /**< IP Address Type - IPv4/IPv6 */
+
+ union {
+ uint32_t ipv4;
+ uint32_t ipv6[4];
+ };
+};
+
+/** inline crypto crypto private session structure */
+struct txgbe_crypto_session {
+ enum txgbe_operation op;
+ const uint8_t *key;
+ uint32_t key_len;
+ uint32_t salt;
+ uint32_t sa_index;
+ uint32_t spi;
+ struct ipaddr src_ip;
+ struct ipaddr dst_ip;
+ struct rte_eth_dev *dev;
+} __rte_cache_aligned;
+
+struct txgbe_crypto_rx_ip_table {
+ struct ipaddr ip;
+ uint16_t ref_count;
+};
+struct txgbe_crypto_rx_sa_table {
+ uint32_t spi;
+ uint32_t ip_index;
+ uint8_t mode;
+ uint8_t used;
+};
+
+struct txgbe_crypto_tx_sa_table {
+ uint32_t spi;
+ uint8_t used;
+};
+
+struct txgbe_ipsec {
+ struct txgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
+ struct txgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
+ struct txgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
+};
+
int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
#endif /*TXGBE_IPSEC_H_*/
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 35/37] net/txgbe: support security session destroy
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (33 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 34/37] net/txgbe: add security session create operation Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 36/37] net/txgbe: add security offload in Rx and Tx process Jiawen Wu
` (2 subsequent siblings)
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add support to clear a security session's private data,
get the size of a security session,
add update the mbuf with provided metadata.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ipsec.c | 167 ++++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ipsec.h | 15 +++
2 files changed, 182 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index 7501e25af..0bdd1c061 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -199,6 +199,106 @@ txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session)
return 0;
}
+static int
+txgbe_crypto_remove_sa(struct rte_eth_dev *dev,
+ struct txgbe_crypto_session *ic_session)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
+ uint32_t reg_val;
+ int sa_index = -1;
+
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ int i, ip_index = -1;
+
+ /* Find a match in the IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
+ ip_index = i;
+ break;
+ }
+ }
+
+ /* Fail if no match*/
+ if (ip_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Rx IP table\n");
+ return -1;
+ }
+
+ /* Find a free entry in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->rx_sa_tbl[i].spi ==
+ rte_cpu_to_be_32(ic_session->spi)) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no match*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Rx SA table\n");
+ return -1;
+ }
+
+ /* Disable and clear Rx SPI and key table table entryes*/
+ reg_val = TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
+ wr32(hw, TXGBE_IPSRXSPI, 0);
+ wr32(hw, TXGBE_IPSRXADDRIDX, 0);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+ reg_val = TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3);
+ wr32(hw, TXGBE_IPSRXKEY(0), 0);
+ wr32(hw, TXGBE_IPSRXKEY(1), 0);
+ wr32(hw, TXGBE_IPSRXKEY(2), 0);
+ wr32(hw, TXGBE_IPSRXKEY(3), 0);
+ wr32(hw, TXGBE_IPSRXSALT, 0);
+ wr32(hw, TXGBE_IPSRXMODE, 0);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+ priv->rx_sa_tbl[sa_index].used = 0;
+
+ /* If last used then clear the IP table entry*/
+ priv->rx_ip_tbl[ip_index].ref_count--;
+ if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
+ reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_IP |
+ (ip_index << 3);
+ wr32(hw, TXGBE_IPSRXADDR(0), 0);
+ wr32(hw, TXGBE_IPSRXADDR(1), 0);
+ wr32(hw, TXGBE_IPSRXADDR(2), 0);
+ wr32(hw, TXGBE_IPSRXADDR(3), 0);
+ }
+ } else { /* session->dir == RTE_CRYPTO_OUTBOUND */
+ int i;
+
+ /* Find a match in the SA table*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ if (priv->tx_sa_tbl[i].spi ==
+ rte_cpu_to_be_32(ic_session->spi)) {
+ sa_index = i;
+ break;
+ }
+ }
+ /* Fail if no match entries*/
+ if (sa_index < 0) {
+ PMD_DRV_LOG(ERR,
+ "Entry not found in the Tx SA table\n");
+ return -1;
+ }
+ reg_val = TXGBE_IPSRXIDX_WRITE | (sa_index << 3);
+ wr32(hw, TXGBE_IPSTXKEY(0), 0);
+ wr32(hw, TXGBE_IPSTXKEY(1), 0);
+ wr32(hw, TXGBE_IPSTXKEY(2), 0);
+ wr32(hw, TXGBE_IPSTXKEY(3), 0);
+ wr32(hw, TXGBE_IPSTXSALT, 0);
+ wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
+
+ priv->tx_sa_tbl[sa_index].used = 0;
+ }
+
+ return 0;
+}
+
static int
txgbe_crypto_create_session(void *device,
struct rte_security_session_conf *conf,
@@ -262,6 +362,70 @@ txgbe_crypto_create_session(void *device,
return 0;
}
+static unsigned int
+txgbe_crypto_session_get_size(__rte_unused void *device)
+{
+ return sizeof(struct txgbe_crypto_session);
+}
+
+static int
+txgbe_crypto_remove_session(void *device,
+ struct rte_security_session *session)
+{
+ struct rte_eth_dev *eth_dev = device;
+ struct txgbe_crypto_session *ic_session =
+ (struct txgbe_crypto_session *)
+ get_sec_session_private_data(session);
+ struct rte_mempool *mempool = rte_mempool_from_obj(ic_session);
+
+ if (eth_dev != ic_session->dev) {
+ PMD_DRV_LOG(ERR, "Session not bound to this device\n");
+ return -ENODEV;
+ }
+
+ if (txgbe_crypto_remove_sa(eth_dev, ic_session)) {
+ PMD_DRV_LOG(ERR, "Failed to remove session\n");
+ return -EFAULT;
+ }
+
+ rte_mempool_put(mempool, (void *)ic_session);
+
+ return 0;
+}
+
+static inline uint8_t
+txgbe_crypto_compute_pad_len(struct rte_mbuf *m)
+{
+ if (m->nb_segs == 1) {
+ /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size
+ * payload padding size is stored at <pkt_len - 18>
+ */
+ uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *,
+ rte_pktmbuf_pkt_len(m) -
+ (ESP_TRAILER_SIZE + ESP_ICV_SIZE));
+ return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE;
+ }
+ return 0;
+}
+
+static int
+txgbe_crypto_update_mb(void *device __rte_unused,
+ struct rte_security_session *session,
+ struct rte_mbuf *m, void *params __rte_unused)
+{
+ struct txgbe_crypto_session *ic_session =
+ get_sec_session_private_data(session);
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+ union txgbe_crypto_tx_desc_md *mdata =
+ (union txgbe_crypto_tx_desc_md *)
+ rte_security_dynfield(m);
+ mdata->enc = 1;
+ mdata->sa_idx = ic_session->sa_index;
+ mdata->pad_len = txgbe_crypto_compute_pad_len(m);
+ }
+ return 0;
+}
+
static const struct rte_security_capability *
txgbe_crypto_capabilities_get(void *device __rte_unused)
{
@@ -390,6 +554,9 @@ txgbe_crypto_capabilities_get(void *device __rte_unused)
static struct rte_security_ops txgbe_security_ops = {
.session_create = txgbe_crypto_create_session,
+ .session_get_size = txgbe_crypto_session_get_size,
+ .session_destroy = txgbe_crypto_remove_session,
+ .set_pkt_metadata = txgbe_crypto_update_mb,
.capabilities_get = txgbe_crypto_capabilities_get
};
diff --git a/drivers/net/txgbe/txgbe_ipsec.h b/drivers/net/txgbe/txgbe_ipsec.h
index c94775636..d022a255f 100644
--- a/drivers/net/txgbe/txgbe_ipsec.h
+++ b/drivers/net/txgbe/txgbe_ipsec.h
@@ -17,6 +17,9 @@
#define IPSEC_MAX_RX_IP_COUNT 128
#define IPSEC_MAX_SA_COUNT 1024
+#define ESP_ICV_SIZE 16
+#define ESP_TRAILER_SIZE 2
+
enum txgbe_operation {
TXGBE_OP_AUTHENTICATED_ENCRYPTION,
TXGBE_OP_AUTHENTICATED_DECRYPTION
@@ -68,6 +71,18 @@ struct txgbe_crypto_tx_sa_table {
uint8_t used;
};
+union txgbe_crypto_tx_desc_md {
+ uint64_t data;
+ struct {
+ /**< SA table index */
+ uint32_t sa_idx;
+ /**< ICV and ESP trailer length */
+ uint8_t pad_len;
+ /**< enable encryption */
+ uint8_t enc;
+ };
+};
+
struct txgbe_ipsec {
struct txgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
struct txgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 36/37] net/txgbe: add security offload in Rx and Tx process
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (34 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 35/37] net/txgbe: support security session destroy Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-03 10:08 ` [dpdk-dev] [PATCH 37/37] net/txgbe: add security type in flow action Jiawen Wu
2020-11-09 19:21 ` [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Ferruh Yigit
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add security offload in Rx and Tx process.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ipsec.c | 106 ++++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ipsec.h | 1 +
drivers/net/txgbe/txgbe_rxtx.c | 93 +++++++++++++++++++++++++++-
drivers/net/txgbe/txgbe_rxtx.h | 13 ++++
4 files changed, 211 insertions(+), 2 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index 0bdd1c061..f8c54f3d4 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -19,6 +19,55 @@
(a).ipv6[2] == (b).ipv6[2] && \
(a).ipv6[3] == (b).ipv6[3])
+static void
+txgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
+ int i = 0;
+
+ /* clear Rx IP table*/
+ for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+ uint16_t index = i << 3;
+ uint32_t reg_val = TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_IP | index;
+ wr32(hw, TXGBE_IPSRXADDR(0), 0);
+ wr32(hw, TXGBE_IPSRXADDR(1), 0);
+ wr32(hw, TXGBE_IPSRXADDR(2), 0);
+ wr32(hw, TXGBE_IPSRXADDR(3), 0);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+ }
+
+ /* clear Rx SPI and Rx/Tx SA tables*/
+ for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+ uint32_t index = i << 3;
+ uint32_t reg_val = TXGBE_IPSRXIDX_WRITE |
+ TXGBE_IPSRXIDX_TB_SPI | index;
+ wr32(hw, TXGBE_IPSRXSPI, 0);
+ wr32(hw, TXGBE_IPSRXADDRIDX, 0);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+ reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_KEY | index;
+ wr32(hw, TXGBE_IPSRXKEY(0), 0);
+ wr32(hw, TXGBE_IPSRXKEY(1), 0);
+ wr32(hw, TXGBE_IPSRXKEY(2), 0);
+ wr32(hw, TXGBE_IPSRXKEY(3), 0);
+ wr32(hw, TXGBE_IPSRXSALT, 0);
+ wr32(hw, TXGBE_IPSRXMODE, 0);
+ wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+ reg_val = TXGBE_IPSTXIDX_WRITE | index;
+ wr32(hw, TXGBE_IPSTXKEY(0), 0);
+ wr32(hw, TXGBE_IPSTXKEY(1), 0);
+ wr32(hw, TXGBE_IPSTXKEY(2), 0);
+ wr32(hw, TXGBE_IPSTXKEY(3), 0);
+ wr32(hw, TXGBE_IPSTXSALT, 0);
+ wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
+ }
+
+ memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl));
+ memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl));
+ memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl));
+}
+
static int
txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session)
{
@@ -552,6 +601,63 @@ txgbe_crypto_capabilities_get(void *device __rte_unused)
return txgbe_security_capabilities;
}
+int
+txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t reg;
+ uint64_t rx_offloads;
+ uint64_t tx_offloads;
+
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+ /* sanity checks */
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
+ return -1;
+ }
+ if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+ PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
+ return -1;
+ }
+
+ /* Set TXGBE_SECTXBUFFAF to 0x14 as required in the datasheet*/
+ wr32(hw, TXGBE_SECTXBUFAF, 0x14);
+
+ /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
+ * hang will occur with heavy traffic.
+ */
+ reg = rd32(hw, TXGBE_SECTXIFG);
+ reg = (reg & ~TXGBE_SECTXIFG_MIN_MASK) | TXGBE_SECTXIFG_MIN(0x3);
+ wr32(hw, TXGBE_SECTXIFG, reg);
+
+ reg = rd32(hw, TXGBE_SECRXCTL);
+ reg |= TXGBE_SECRXCTL_CRCSTRIP;
+ wr32(hw, TXGBE_SECRXCTL, reg);
+
+ if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
+ reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
+ if (reg != 0) {
+ PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+ return -1;
+ }
+ }
+ if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+ wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
+ reg = rd32(hw, TXGBE_SECTXCTL);
+ if (reg != TXGBE_SECTXCTL_STFWD) {
+ PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+ return -1;
+ }
+ }
+
+ txgbe_crypto_clear_ipsec_tables(dev);
+
+ return 0;
+}
+
static struct rte_security_ops txgbe_security_ops = {
.session_create = txgbe_crypto_create_session,
.session_get_size = txgbe_crypto_session_get_size,
diff --git a/drivers/net/txgbe/txgbe_ipsec.h b/drivers/net/txgbe/txgbe_ipsec.h
index d022a255f..54c27d8ce 100644
--- a/drivers/net/txgbe/txgbe_ipsec.h
+++ b/drivers/net/txgbe/txgbe_ipsec.h
@@ -90,5 +90,6 @@ struct txgbe_ipsec {
};
int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
+int txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
#endif /*TXGBE_IPSEC_H_*/
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index f99924a3f..18588b985 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -20,6 +20,7 @@
#include <rte_debug.h>
#include <rte_ethdev.h>
#include <rte_ethdev_driver.h>
+#include <rte_security_driver.h>
#include <rte_memzone.h>
#include <rte_atomic.h>
#include <rte_mempool.h>
@@ -60,6 +61,9 @@ static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
PKT_TX_MACSEC |
#endif
PKT_TX_OUTER_IP_CKSUM |
+#ifdef RTE_LIB_SECURITY
+ PKT_TX_SEC_OFFLOAD |
+#endif
TXGBE_TX_IEEE1588_TMST);
#define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
@@ -314,7 +318,8 @@ txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
static inline void
txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
volatile struct txgbe_tx_ctx_desc *ctx_txd,
- uint64_t ol_flags, union txgbe_tx_offload tx_offload)
+ uint64_t ol_flags, union txgbe_tx_offload tx_offload,
+ __rte_unused uint64_t *mdata)
{
union txgbe_tx_offload tx_offload_mask;
uint32_t type_tucmd_mlhl;
@@ -408,6 +413,19 @@ txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
}
+#ifdef RTE_LIB_SECURITY
+ if (ol_flags & PKT_TX_SEC_OFFLOAD) {
+ union txgbe_crypto_tx_desc_md *md =
+ (union txgbe_crypto_tx_desc_md *)mdata;
+ tunnel_seed |= TXGBE_TXD_IPSEC_SAIDX(md->sa_idx);
+ type_tucmd_mlhl |= md->enc ?
+ (TXGBE_TXD_IPSEC_ESP | TXGBE_TXD_IPSEC_ESPENC) : 0;
+ type_tucmd_mlhl |= TXGBE_TXD_IPSEC_ESPLEN(md->pad_len);
+ tx_offload_mask.sa_idx |= ~0;
+ tx_offload_mask.sec_pad_len |= ~0;
+ }
+#endif
+
txq->ctx_cache[ctx_idx].flags = ol_flags;
txq->ctx_cache[ctx_idx].tx_offload.data[0] =
tx_offload_mask.data[0] & tx_offload.data[0];
@@ -704,6 +722,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint32_t ctx = 0;
uint32_t new_ctx;
union txgbe_tx_offload tx_offload;
+#ifdef RTE_LIB_SECURITY
+ uint8_t use_ipsec;
+#endif
tx_offload.data[0] = 0;
tx_offload.data[1] = 0;
@@ -730,6 +751,9 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
+#ifdef RTE_LIB_SECURITY
+ use_ipsec = txq->using_ipsec && (ol_flags & PKT_TX_SEC_OFFLOAD);
+#endif
/* If hardware offload required */
tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
@@ -745,6 +769,16 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
+#ifdef RTE_LIB_SECURITY
+ if (use_ipsec) {
+ union txgbe_crypto_tx_desc_md *ipsec_mdata =
+ (union txgbe_crypto_tx_desc_md *)
+ rte_security_dynfield(tx_pkt);
+ tx_offload.sa_idx = ipsec_mdata->sa_idx;
+ tx_offload.sec_pad_len = ipsec_mdata->pad_len;
+ }
+#endif
+
/* If new context need be built or reuse the exist ctx*/
ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
/* Only allocate context descriptor if required */
@@ -898,7 +932,8 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- tx_offload);
+ tx_offload,
+ rte_security_dynfield(tx_pkt));
txe->last_id = tx_last;
tx_id = txe->next_id;
@@ -917,6 +952,10 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);
+#ifdef RTE_LIB_SECURITY
+ if (use_ipsec)
+ olinfo_status |= TXGBE_TXD_IPSEC;
+#endif
m_seg = tx_pkt;
do {
@@ -1101,6 +1140,14 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
}
+#ifdef RTE_LIB_SECURITY
+ if (rx_status & TXGBE_RXD_STAT_SECP) {
+ pkt_flags |= PKT_RX_SEC_OFFLOAD;
+ if (rx_status & TXGBE_RXD_ERR_SECERR)
+ pkt_flags |= PKT_RX_SEC_OFFLOAD_FAILED;
+ }
+#endif
+
return pkt_flags;
}
@@ -1929,6 +1976,11 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+#ifdef RTE_LIB_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
return offloads;
}
@@ -2030,6 +2082,9 @@ txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
{
struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
if (txq->offloads == 0 &&
+#ifdef RTE_LIB_SECURITY
+ !(txq->using_ipsec) &&
+#endif
txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST)
return txgbe_tx_done_cleanup_simple(txq, free_cnt);
@@ -2113,6 +2168,9 @@ txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
if (txq->offloads == 0 &&
+#ifdef RTE_LIB_SECURITY
+ !(txq->using_ipsec) &&
+#endif
txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
@@ -2167,6 +2225,10 @@ txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+#ifdef RTE_LIB_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
return tx_offload_capa;
}
@@ -2265,6 +2327,10 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
+#ifdef RTE_LIB_SECURITY
+ txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_SECURITY);
+#endif
/* Modification to set tail pointer for virtual function
* if vf is detected.
@@ -4065,6 +4131,7 @@ txgbe_set_rsc(struct rte_eth_dev *dev)
void __rte_cold
txgbe_set_rx_function(struct rte_eth_dev *dev)
{
+ uint16_t i;
struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
/*
@@ -4125,6 +4192,15 @@ txgbe_set_rx_function(struct rte_eth_dev *dev)
dev->rx_pkt_burst = txgbe_recv_pkts;
}
+
+#ifdef RTE_LIB_SECURITY
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
+ rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_SECURITY);
+ }
+#endif
}
/*
@@ -4395,6 +4471,19 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
dev->data->dev_conf.lpbk_mode)
txgbe_setup_loopback_link_raptor(hw);
+#ifdef RTE_LIB_SECURITY
+ if ((dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) ||
+ (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY)) {
+ ret = txgbe_crypto_enable_ipsec(dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "txgbe_crypto_enable_ipsec fails with %d.",
+ ret);
+ return ret;
+ }
+ }
+#endif
+
return 0;
}
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index 6e0e86ce5..203bdeb88 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -293,6 +293,10 @@ struct txgbe_rx_queue {
uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+#ifdef RTE_LIB_SECURITY
+ uint8_t using_ipsec;
+ /**< indicates that IPsec RX feature is in use */
+#endif
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
uint16_t reg_idx; /**< RX queue register index. */
@@ -336,6 +340,11 @@ union txgbe_tx_offload {
uint64_t outer_tun_len:8; /**< Outer TUN (Tunnel) Hdr Length. */
uint64_t outer_l2_len:8; /**< Outer L2 (MAC) Hdr Length. */
uint64_t outer_l3_len:16; /**< Outer L3 (IP) Hdr Length. */
+#ifdef RTE_LIB_SECURITY
+ /* inline ipsec related*/
+ uint64_t sa_idx:8; /**< TX SA database entry index */
+ uint64_t sec_pad_len:4; /**< padding length */
+#endif
};
};
@@ -388,6 +397,10 @@ struct txgbe_tx_queue {
struct txgbe_ctx_info ctx_cache[TXGBE_CTX_NUM];
const struct txgbe_txq_ops *ops; /**< txq ops */
uint8_t tx_deferred_start; /**< not in global dev start. */
+#ifdef RTE_LIB_SECURITY
+ uint8_t using_ipsec;
+ /**< indicates that IPsec TX feature is in use */
+#endif
};
struct txgbe_txq_ops {
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* [dpdk-dev] [PATCH 37/37] net/txgbe: add security type in flow action
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (35 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 36/37] net/txgbe: add security offload in Rx and Tx process Jiawen Wu
@ 2020-11-03 10:08 ` Jiawen Wu
2020-11-09 19:21 ` [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Ferruh Yigit
37 siblings, 0 replies; 39+ messages in thread
From: Jiawen Wu @ 2020-11-03 10:08 UTC (permalink / raw)
To: dev; +Cc: Jiawen Wu
Add security type in flow action.
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_flow.c | 52 +++++++++++++++++++++++++++++++++
drivers/net/txgbe/txgbe_ipsec.c | 30 +++++++++++++++++++
drivers/net/txgbe/txgbe_ipsec.h | 3 ++
3 files changed, 85 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index b5f4073e2..5ca89d619 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -145,6 +145,9 @@ const struct rte_flow_action *next_no_void_action(
* END
* other members in mask and spec should set to 0x00.
* item->last should be NULL.
+ *
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
+ *
*/
static int
cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
@@ -193,6 +196,43 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
+#ifdef RTE_LIB_SECURITY
+ /**
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+ const void *conf = act->conf;
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* get the IP pattern*/
+ item = next_no_void_pattern(pattern, NULL);
+ while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ if (item->last ||
+ item->type == RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "IP pattern missing.");
+ return -rte_errno;
+ }
+ item = next_no_void_pattern(pattern, item);
+ }
+
+ filter->proto = IPPROTO_ESP;
+ return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+ }
+#endif
+
/* the first not void item can be MAC or IPv4 */
item = next_no_void_pattern(pattern, NULL);
@@ -563,6 +603,12 @@ txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+#ifdef RTE_LIB_SECURITY
+ /* ESP flow not really a flow */
+ if (filter->proto == IPPROTO_ESP)
+ return 0;
+#endif
+
/* txgbe doesn't support tcp flags */
if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
@@ -2690,6 +2736,12 @@ txgbe_flow_create(struct rte_eth_dev *dev,
ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
+#ifdef RTE_LIB_SECURITY
+ /* ESP flow not really a flow*/
+ if (ntuple_filter.proto == IPPROTO_ESP)
+ return flow;
+#endif
+
if (!ret) {
ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
if (!ret) {
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index f8c54f3d4..d0543ce0b 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -658,6 +658,36 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
return 0;
}
+int
+txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+ const void *ip_spec,
+ uint8_t is_ipv6)
+{
+ struct txgbe_crypto_session *ic_session =
+ get_sec_session_private_data(sess);
+
+ if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
+ if (is_ipv6) {
+ const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
+ ic_session->src_ip.type = IPv6;
+ ic_session->dst_ip.type = IPv6;
+ rte_memcpy(ic_session->src_ip.ipv6,
+ ipv6->hdr.src_addr, 16);
+ rte_memcpy(ic_session->dst_ip.ipv6,
+ ipv6->hdr.dst_addr, 16);
+ } else {
+ const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
+ ic_session->src_ip.type = IPv4;
+ ic_session->dst_ip.type = IPv4;
+ ic_session->src_ip.ipv4 = ipv4->hdr.src_addr;
+ ic_session->dst_ip.ipv4 = ipv4->hdr.dst_addr;
+ }
+ return txgbe_crypto_add_sa(ic_session);
+ }
+
+ return 0;
+}
+
static struct rte_security_ops txgbe_security_ops = {
.session_create = txgbe_crypto_create_session,
.session_get_size = txgbe_crypto_session_get_size,
diff --git a/drivers/net/txgbe/txgbe_ipsec.h b/drivers/net/txgbe/txgbe_ipsec.h
index 54c27d8ce..1e5678437 100644
--- a/drivers/net/txgbe/txgbe_ipsec.h
+++ b/drivers/net/txgbe/txgbe_ipsec.h
@@ -91,5 +91,8 @@ struct txgbe_ipsec {
int txgbe_ipsec_ctx_create(struct rte_eth_dev *dev);
int txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
+int txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+ const void *ip_spec,
+ uint8_t is_ipv6);
#endif /*TXGBE_IPSEC_H_*/
--
2.18.4
^ permalink raw reply [flat|nested] 39+ messages in thread
* Re: [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2
2020-11-03 10:07 [dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2 Jiawen Wu
` (36 preceding siblings ...)
2020-11-03 10:08 ` [dpdk-dev] [PATCH 37/37] net/txgbe: add security type in flow action Jiawen Wu
@ 2020-11-09 19:21 ` Ferruh Yigit
37 siblings, 0 replies; 39+ messages in thread
From: Ferruh Yigit @ 2020-11-09 19:21 UTC (permalink / raw)
To: Jiawen Wu, dev
On 11/3/2020 10:07 AM, Jiawen Wu wrote:
> Add the remaining part of txgbe PMD.
> Support include flow API, traffic manager, macsec and ipsec.
>
> Jiawen Wu (37):
> net/txgbe: add ntuple filter init and uninit
> net/txgbe: support ntuple filter add and delete
> net/txgbe: add ntuple parse rule
> net/txgbe: support ntuple filter remove operaion
> net/txgbe: support ethertype filter add and delete
> net/txgbe: add ethertype parse rule
> net/txgbe: support syn filter add and delete
> net/txgbe: add syn filter parse rule
> net/txgbe: add L2 tunnel filter init and uninit
> net/txgbe: config L2 tunnel filter with e-tag
> net/txgbe: support L2 tunnel filter add and delete
> net/txgbe: add L2 tunnel filter parse rule
> net/txgbe: add FDIR filter init and uninit.
> net/txgbe: configure FDIR filter
> net/txgbe: support FDIR add and delete operations
> net/txgbe: add FDIR parse normal rule
> net/txgbe: add FDIR parse tunnel rule
> net/txgbe: add FDIR restore operation
> net/txgbe: add RSS filter parse rule
> net/txgbe: add RSS filter restore operation
> net/txgbe: add filter list init and uninit
> net/txgbe: add flow API
> net/txgbe: add flow API create function
> net/txgbe: add flow API destroy function
> net/txgbe: add flow API flush function
> net/txgbe: support UDP tunnel port add and delete
> net/txgbe: add TM configuration init and uninit
> net/txgbe: add TM capabilities get operation
> net/txgbe: support TM shaper profile add and delete
> net/txgbe: support TM node add and delete
> net/txgbe: add TM hierarchy commit
> net/txgbe: add macsec setting
> net/txgbe: add IPsec context creation
> net/txgbe: add security session create operation
> net/txgbe: support security session destroy
> net/txgbe: add security offload in Rx and Tx process
> net/txgbe: add security type in flow action
>
Hi Jiawen,
Since fixes applied on top of the first set, we can proceed for this one.
As a first thing, this set doesn't apply cleanly, can you please rebase on top
of latest next-net and send a new version?
^ permalink raw reply [flat|nested] 39+ messages in thread