* [dpdk-dev] [RFC 2/5] net/igc: decouple dependency from superseded structures
@ 2020-09-29 7:48 Chenxu Di
2020-09-29 7:48 ` [dpdk-dev] [RFC 3/5] net/e1000: " Chenxu Di
2020-09-29 7:48 ` [dpdk-dev] [RFC 4/5] net/ixgbe: " Chenxu Di
0 siblings, 2 replies; 3+ messages in thread
From: Chenxu Di @ 2020-09-29 7:48 UTC (permalink / raw)
To: dev; +Cc: junyux.jiang, shougangx.wang, Jeff Guo, Haiyue Wang
From: Junyu Jiang <junyux.jiang@intel.com>
The legacy filter API will be removed, the associated rte_eth_ctrl.h
will also be removed. This patch replaces these superseded structures
by the PMD internal structures.
Signed-off-by: Junyu Jiang <junyux.jiang@intel.com>
---
drivers/net/igc/igc_filter.c | 2 +-
drivers/net/igc/igc_filter.h | 2 ++
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/igc/igc_filter.c b/drivers/net/igc/igc_filter.c
index 836621d4c..7b6f52a4c 100644
--- a/drivers/net/igc/igc_filter.c
+++ b/drivers/net/igc/igc_filter.c
@@ -216,7 +216,7 @@ igc_enable_tuple_filter(struct rte_eth_dev *dev,
ttqf &= ~IGC_TTQF_MASK_ENABLE;
/* TCP flags bits setting. */
- if (info->tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) {
+ if (info->tcp_flags & IGC_NTUPLE_TCP_FLAGS_MASK) {
if (info->tcp_flags & RTE_TCP_URG_FLAG)
imir_ext |= IGC_IMIREXT_CTRL_URG;
if (info->tcp_flags & RTE_TCP_ACK_FLAG)
diff --git a/drivers/net/igc/igc_filter.h b/drivers/net/igc/igc_filter.h
index 79951504f..34bc0a7e3 100644
--- a/drivers/net/igc/igc_filter.h
+++ b/drivers/net/igc/igc_filter.h
@@ -16,6 +16,8 @@
extern "C" {
#endif
+#define IGC_NTUPLE_TCP_FLAGS_MASK 0x3F /**< TCP flags filter can match. */
+
int igc_add_ethertype_filter(struct rte_eth_dev *dev,
const struct igc_ethertype_filter *filter);
int igc_del_ethertype_filter(struct rte_eth_dev *dev,
--
2.17.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* [dpdk-dev] [RFC 3/5] net/e1000: decouple dependency from superseded structures
2020-09-29 7:48 [dpdk-dev] [RFC 2/5] net/igc: decouple dependency from superseded structures Chenxu Di
@ 2020-09-29 7:48 ` Chenxu Di
2020-09-29 7:48 ` [dpdk-dev] [RFC 4/5] net/ixgbe: " Chenxu Di
1 sibling, 0 replies; 3+ messages in thread
From: Chenxu Di @ 2020-09-29 7:48 UTC (permalink / raw)
To: dev; +Cc: junyux.jiang, shougangx.wang, Jeff Guo, Haiyue Wang, Chenxu Di
The legacy filter API will be removed, the associated rte_eth_ctrl.h
will also be removed. This patch replaces these superseded structures
by the PMD internal structures. The macros RTE_ETH_FILTER_GENERIC and
RTE_ETH_FILTER_GET are not replaced, they are needed to follow librte
to change.
Signed-off-by: Chenxu Di <chenxux.di@intel.com>
---
drivers/net/e1000/e1000_ethdev.h | 113 ++++++++++++++++--
drivers/net/e1000/igb_ethdev.c | 80 ++++++-------
drivers/net/e1000/igb_flow.c | 199 ++++++++++++++++---------------
3 files changed, 245 insertions(+), 147 deletions(-)
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 1e41ae9de..3c30b9ebe 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -237,6 +237,88 @@ struct e1000_2tuple_filter {
uint16_t queue; /* rx queue assigned to */
};
+/* Define all structures for ntuple Filter type. */
+
+#define IGB_NTUPLE_FLAGS_DST_IP 0x0001 /* If set, dst_ip is part of ntuple */
+#define IGB_NTUPLE_FLAGS_SRC_IP 0x0002 /* If set, src_ip is part of ntuple */
+#define IGB_NTUPLE_FLAGS_DST_PORT 0x0004 /* If set, dstport is part of ntuple */
+#define IGB_NTUPLE_FLAGS_SRC_PORT 0x0008 /* If set, srcport is part of ntuple */
+#define IGB_NTUPLE_FLAGS_PROTO 0x0010 /* If set, proto is part of ntuple */
+#define IGB_NTUPLE_FLAGS_TCP_FLAG 0x0020 /* If set, tcp flag is involved */
+
+#define IGB_5TUPLE_FLAGS ( \
+ IGB_NTUPLE_FLAGS_DST_IP | \
+ IGB_NTUPLE_FLAGS_SRC_IP | \
+ IGB_NTUPLE_FLAGS_DST_PORT | \
+ IGB_NTUPLE_FLAGS_SRC_PORT | \
+ IGB_NTUPLE_FLAGS_PROTO)
+
+#define IGB_2TUPLE_FLAGS ( \
+ IGB_NTUPLE_FLAGS_DST_PORT | \
+ IGB_NTUPLE_FLAGS_PROTO)
+
+#define IGB_NTUPLE_TCP_FLAGS_MASK 0x3F /* TCP flags filter can match. */
+
+struct igb_flow_ntuple_filter {
+ uint16_t flags; /* Flags from IGB_NTUPLE_FLAGS_* */
+ uint32_t dst_ip; /* Destination IP address in big endian. */
+ uint32_t dst_ip_mask; /* Mask of destination IP address. */
+ uint32_t src_ip; /* Source IP address in big endian. */
+ uint32_t src_ip_mask; /* Mask of destination IP address. */
+ uint16_t dst_port; /* Destination port in big endian. */
+ uint16_t dst_port_mask; /* Mask of destination port. */
+ uint16_t src_port; /* Source Port in big endian. */
+ uint16_t src_port_mask; /* Mask of source port. */
+ uint8_t proto; /* L4 protocol. */
+ uint8_t proto_mask; /* Mask of L4 protocol. */
+ /* tcp_flags only meaningful when the proto is TCP.
+ * The packet matched above ntuple fields and contain
+ * any set bit in tcp_flags will hit this filter.
+ */
+ uint8_t tcp_flags;
+ /* seven levels (001b-111b), 111b is highest,
+ * used when more than one filter matches.
+ */
+ uint16_t priority;
+ uint16_t queue; /* Queue assigned to when match*/
+};
+
+/* bytes to use in flex filter. */
+#define IGB_FLEX_FILTER_MAXLEN 128
+/* mask bytes in flex filter. */
+#define IGB_FLEX_FILTER_MASK_SIZE \
+ (RTE_ALIGN(IGB_FLEX_FILTER_MAXLEN, CHAR_BIT) / CHAR_BIT)
+
+struct igb_flow_flex_filter {
+ uint16_t len;
+ uint8_t bytes[IGB_FLEX_FILTER_MAXLEN]; /* flex bytes in big endian.*/
+ /* if mask bit is 1b, do not compare corresponding byte. */
+ uint8_t mask[IGB_FLEX_FILTER_MASK_SIZE];
+ uint8_t priority;
+ uint16_t queue; /* Queue assigned to when match. */
+};
+
+struct igb_flow_syn_filter {
+ /* 1 - higher priority than other filters, 0 - lower priority. */
+ uint8_t hig_pri;
+ /* Queue assigned to when match */
+ uint16_t queue;
+};
+
+/**
+ * Define all structures for Ethertype Filter type.
+ */
+
+#define IGB_ETHTYPE_FLAGS_MAC 0x0001 /* If set, compare mac */
+#define IGB_ETHTYPE_FLAGS_DROP 0x0002 /* If set, drop packet when match */
+
+struct igb_flow_ethertype_filter {
+ struct rte_ether_addr mac_addr; /* Mac address to match. */
+ uint16_t ether_type; /* Ether type to match */
+ uint16_t flags; /* Flags from IGB_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
/* ethertype filter structure */
struct igb_ethertype_filter {
uint16_t ethertype;
@@ -308,33 +390,46 @@ struct e1000_adapter {
#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
(&((struct e1000_adapter *)adapter)->filter)
+/**
+ * Feature filter types
+ */
+enum igb_filter_type {
+ IGB_FILTER_NONE = 0,
+ IGB_FILTER_ETHERTYPE,
+ IGB_FILTER_FLEXIBLE,
+ IGB_FILTER_SYN,
+ IGB_FILTER_NTUPLE,
+ IGB_FILTER_HASH,
+ IGB_FILTER_MAX
+};
+
struct rte_flow {
- enum rte_filter_type filter_type;
+ enum igb_filter_type filter_type;
void *rule;
};
/* ntuple filter list structure */
struct igb_ntuple_filter_ele {
TAILQ_ENTRY(igb_ntuple_filter_ele) entries;
- struct rte_eth_ntuple_filter filter_info;
+ struct igb_flow_ntuple_filter filter_info;
};
/* ethertype filter list structure */
struct igb_ethertype_filter_ele {
TAILQ_ENTRY(igb_ethertype_filter_ele) entries;
- struct rte_eth_ethertype_filter filter_info;
+ struct igb_flow_ethertype_filter filter_info;
};
/* syn filter list structure */
struct igb_eth_syn_filter_ele {
TAILQ_ENTRY(igb_eth_syn_filter_ele) entries;
- struct rte_eth_syn_filter filter_info;
+ struct igb_flow_syn_filter filter_info;
};
/* flex filter list structure */
struct igb_flex_filter_ele {
TAILQ_ENTRY(igb_flex_filter_ele) entries;
- struct rte_eth_flex_filter filter_info;
+ struct igb_flow_flex_filter filter_info;
};
/* rss filter list structure */
@@ -507,15 +602,15 @@ void igb_remove_flex_filter(struct rte_eth_dev *dev,
int igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
uint8_t idx);
int igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter, bool add);
+ struct igb_flow_ntuple_filter *ntuple_filter, bool add);
int igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter,
+ struct igb_flow_ethertype_filter *filter,
bool add);
int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
+ struct igb_flow_syn_filter *filter,
bool add);
int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
- struct rte_eth_flex_filter *filter,
+ struct igb_flow_flex_filter *filter,
bool add);
int igb_rss_conf_init(struct rte_eth_dev *dev,
struct igb_rte_flow_rss_conf *out,
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 5ab74840a..588fdea11 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -192,20 +192,20 @@ static int eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int igb_add_2tuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter);
+ struct igb_flow_ntuple_filter *ntuple_filter);
static int igb_remove_2tuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter);
+ struct igb_flow_ntuple_filter *ntuple_filter);
static int eth_igb_get_flex_filter(struct rte_eth_dev *dev,
- struct rte_eth_flex_filter *filter);
+ struct igb_flow_flex_filter *filter);
static int eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter);
+ struct igb_flow_ntuple_filter *ntuple_filter);
static int igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter);
+ struct igb_flow_ntuple_filter *ntuple_filter);
static int igb_get_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *filter);
+ struct igb_flow_ntuple_filter *filter);
static int igb_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -3637,7 +3637,7 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
int
eth_igb_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
+ struct igb_flow_syn_filter *filter,
bool add)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -3717,12 +3717,12 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
ret = eth_igb_syn_filter_set(dev,
- (struct rte_eth_syn_filter *)arg,
+ (struct igb_flow_syn_filter *)arg,
TRUE);
break;
case RTE_ETH_FILTER_DELETE:
ret = eth_igb_syn_filter_set(dev,
- (struct rte_eth_syn_filter *)arg,
+ (struct igb_flow_syn_filter *)arg,
FALSE);
break;
case RTE_ETH_FILTER_GET:
@@ -3740,14 +3740,14 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
/* translate elements in struct rte_eth_ntuple_filter to struct e1000_2tuple_filter_info*/
static inline int
-ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
+ntuple_filter_to_2tuple(struct igb_flow_ntuple_filter *filter,
struct e1000_2tuple_filter_info *filter_info)
{
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
return -EINVAL;
if (filter->priority > E1000_2TUPLE_MAX_PRI)
return -EINVAL; /* filter index is out of range. */
- if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK)
+ if (filter->tcp_flags > IGB_NTUPLE_TCP_FLAGS_MASK)
return -EINVAL; /* flags is invalid. */
switch (filter->dst_port_mask) {
@@ -3777,7 +3777,7 @@ ntuple_filter_to_2tuple(struct rte_eth_ntuple_filter *filter,
}
filter_info->priority = (uint8_t)filter->priority;
- if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
+ if (filter->flags & IGB_NTUPLE_FLAGS_TCP_FLAG)
filter_info->tcp_flags = filter->tcp_flags;
else
filter_info->tcp_flags = 0;
@@ -3827,7 +3827,7 @@ igb_inject_2uple_filter(struct rte_eth_dev *dev,
ttqf &= ~E1000_TTQF_MASK_ENABLE;
/* tcp flags bits setting. */
- if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) {
+ if (filter->filter_info.tcp_flags & IGB_NTUPLE_TCP_FLAGS_MASK) {
if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_URG;
if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG)
@@ -3861,7 +3861,7 @@ igb_inject_2uple_filter(struct rte_eth_dev *dev,
*/
static int
igb_add_2tuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter)
+ struct igb_flow_ntuple_filter *ntuple_filter)
{
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
@@ -3942,7 +3942,7 @@ igb_delete_2tuple_filter(struct rte_eth_dev *dev,
*/
static int
igb_remove_2tuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter)
+ struct igb_flow_ntuple_filter *ntuple_filter)
{
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
@@ -4056,7 +4056,7 @@ igb_remove_flex_filter(struct rte_eth_dev *dev,
int
eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
- struct rte_eth_flex_filter *filter,
+ struct igb_flow_flex_filter *filter,
bool add)
{
struct e1000_filter_info *filter_info =
@@ -4130,7 +4130,7 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
static int
eth_igb_get_flex_filter(struct rte_eth_dev *dev,
- struct rte_eth_flex_filter *filter)
+ struct igb_flow_flex_filter *filter)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
@@ -4180,7 +4180,7 @@ eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
void *arg)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_flex_filter *filter;
+ struct igb_flow_flex_filter *filter;
int ret = 0;
MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
@@ -4194,7 +4194,7 @@ eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
return -EINVAL;
}
- filter = (struct rte_eth_flex_filter *)arg;
+ filter = (struct igb_flow_flex_filter *)arg;
if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN
|| filter->len % sizeof(uint64_t) != 0) {
PMD_DRV_LOG(ERR, "filter's length is out of range");
@@ -4226,14 +4226,14 @@ eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
/* translate elements in struct rte_eth_ntuple_filter to struct e1000_5tuple_filter_info*/
static inline int
-ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
+ntuple_filter_to_5tuple_82576(struct igb_flow_ntuple_filter *filter,
struct e1000_5tuple_filter_info *filter_info)
{
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576)
return -EINVAL;
if (filter->priority > E1000_2TUPLE_MAX_PRI)
return -EINVAL; /* filter index is out of range. */
- if (filter->tcp_flags > RTE_NTUPLE_TCP_FLAGS_MASK)
+ if (filter->tcp_flags > IGB_NTUPLE_TCP_FLAGS_MASK)
return -EINVAL; /* flags is invalid. */
switch (filter->dst_ip_mask) {
@@ -4302,7 +4302,7 @@ ntuple_filter_to_5tuple_82576(struct rte_eth_ntuple_filter *filter,
}
filter_info->priority = (uint8_t)filter->priority;
- if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG)
+ if (filter->flags & IGB_NTUPLE_FLAGS_TCP_FLAG)
filter_info->tcp_flags = filter->tcp_flags;
else
filter_info->tcp_flags = 0;
@@ -4363,7 +4363,7 @@ igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
imir |= filter->filter_info.priority << E1000_IMIR_PRIORITY_SHIFT;
/* tcp flags bits setting. */
- if (filter->filter_info.tcp_flags & RTE_NTUPLE_TCP_FLAGS_MASK) {
+ if (filter->filter_info.tcp_flags & IGB_NTUPLE_TCP_FLAGS_MASK) {
if (filter->filter_info.tcp_flags & RTE_TCP_URG_FLAG)
imir_ext |= E1000_IMIREXT_CTRL_URG;
if (filter->filter_info.tcp_flags & RTE_TCP_ACK_FLAG)
@@ -4396,7 +4396,7 @@ igb_inject_5tuple_filter_82576(struct rte_eth_dev *dev,
*/
static int
igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter)
+ struct igb_flow_ntuple_filter *ntuple_filter)
{
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
@@ -4483,7 +4483,7 @@ igb_delete_5tuple_filter_82576(struct rte_eth_dev *dev,
*/
static int
igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter)
+ struct igb_flow_ntuple_filter *ntuple_filter)
{
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
@@ -4568,7 +4568,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * ntuple_filter: Pointer to struct igb_flow_ntuple_filter
* add: if true, add filter, if false, remove filter
*
* @return
@@ -4577,15 +4577,15 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
*/
int
igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter,
+ struct igb_flow_ntuple_filter *ntuple_filter,
bool add)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret;
switch (ntuple_filter->flags) {
- case RTE_5TUPLE_FLAGS:
- case (RTE_5TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
+ case IGB_5TUPLE_FLAGS:
+ case (IGB_5TUPLE_FLAGS | IGB_NTUPLE_FLAGS_TCP_FLAG):
if (hw->mac.type != e1000_82576)
return -ENOTSUP;
if (add)
@@ -4595,8 +4595,8 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
ret = igb_remove_5tuple_filter_82576(dev,
ntuple_filter);
break;
- case RTE_2TUPLE_FLAGS:
- case (RTE_2TUPLE_FLAGS | RTE_NTUPLE_FLAGS_TCP_FLAG):
+ case IGB_2TUPLE_FLAGS:
+ case (IGB_2TUPLE_FLAGS | IGB_NTUPLE_FLAGS_TCP_FLAG):
if (hw->mac.type != e1000_82580 && hw->mac.type != e1000_i350 &&
hw->mac.type != e1000_i210 &&
hw->mac.type != e1000_i211)
@@ -4627,7 +4627,7 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
*/
static int
igb_get_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter)
+ struct igb_flow_ntuple_filter *ntuple_filter)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
@@ -4714,17 +4714,17 @@ igb_ntuple_filter_handle(struct rte_eth_dev *dev,
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
ret = igb_add_del_ntuple_filter(dev,
- (struct rte_eth_ntuple_filter *)arg,
+ (struct igb_flow_ntuple_filter *)arg,
TRUE);
break;
case RTE_ETH_FILTER_DELETE:
ret = igb_add_del_ntuple_filter(dev,
- (struct rte_eth_ntuple_filter *)arg,
+ (struct igb_flow_ntuple_filter *)arg,
FALSE);
break;
case RTE_ETH_FILTER_GET:
ret = igb_get_ntuple_filter(dev,
- (struct rte_eth_ntuple_filter *)arg);
+ (struct igb_flow_ntuple_filter *)arg);
break;
default:
PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
@@ -4780,7 +4780,7 @@ igb_ethertype_filter_remove(struct e1000_filter_info *filter_info,
int
igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter,
+ struct igb_flow_ethertype_filter *filter,
bool add)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -4796,11 +4796,11 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ if (filter->flags & IGB_ETHTYPE_FLAGS_MAC) {
PMD_DRV_LOG(ERR, "mac compare is unsupported.");
return -EINVAL;
}
- if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ if (filter->flags & IGB_ETHTYPE_FLAGS_DROP) {
PMD_DRV_LOG(ERR, "drop option is unsupported.");
return -EINVAL;
}
@@ -4895,12 +4895,12 @@ igb_ethertype_filter_handle(struct rte_eth_dev *dev,
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
ret = igb_add_del_ethertype_filter(dev,
- (struct rte_eth_ethertype_filter *)arg,
+ (struct igb_flow_ethertype_filter *)arg,
TRUE);
break;
case RTE_ETH_FILTER_DELETE:
ret = igb_add_del_ethertype_filter(dev,
- (struct rte_eth_ethertype_filter *)arg,
+ (struct igb_flow_ethertype_filter *)arg,
FALSE);
break;
case RTE_ETH_FILTER_GET:
diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c
index 43fef889b..eec7ae3db 100644
--- a/drivers/net/e1000/igb_flow.c
+++ b/drivers/net/e1000/igb_flow.c
@@ -91,7 +91,7 @@ static int
cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_ntuple_filter *filter,
+ struct igb_flow_ntuple_filter *filter,
struct rte_flow_error *error)
{
const struct rte_flow_item *item;
@@ -216,7 +216,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -225,7 +225,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* Not supported last point for range */
if (item->last) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
item, "Not supported last point for range");
@@ -248,7 +248,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
tcp_mask->hdr.cksum ||
tcp_mask->hdr.tcp_urp) {
memset(filter, 0,
- sizeof(struct rte_eth_ntuple_filter));
+ sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -258,12 +258,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = tcp_mask->hdr.dst_port;
filter->src_port_mask = tcp_mask->hdr.src_port;
if (tcp_mask->hdr.tcp_flags == 0xFF) {
- filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ filter->flags |= IGB_NTUPLE_FLAGS_TCP_FLAG;
} else if (!tcp_mask->hdr.tcp_flags) {
- filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ filter->flags &= ~IGB_NTUPLE_FLAGS_TCP_FLAG;
} else {
memset(filter, 0,
- sizeof(struct rte_eth_ntuple_filter));
+ sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -286,7 +286,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
if (udp_mask->hdr.dgram_len ||
udp_mask->hdr.dgram_cksum) {
memset(filter, 0,
- sizeof(struct rte_eth_ntuple_filter));
+ sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -311,7 +311,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
if (sctp_mask->hdr.tag ||
sctp_mask->hdr.cksum) {
memset(filter, 0,
- sizeof(struct rte_eth_ntuple_filter));
+ sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -331,7 +331,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
index++;
NEXT_ITEM_OF_PATTERN(item, pattern, index);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -347,7 +347,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
*/
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
item, "Not supported action.");
@@ -360,7 +360,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -370,7 +370,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* parse attr */
/* must be input direction */
if (!attr->ingress) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "Only support ingress.");
@@ -379,7 +379,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* not supported */
if (attr->egress) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
attr, "Not support egress.");
@@ -388,7 +388,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* not supported */
if (attr->transfer) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
attr, "No support for transfer.");
@@ -396,7 +396,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
if (attr->priority > 0xFFFF) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
attr, "Error priority.");
@@ -413,7 +413,7 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_ntuple_filter *filter,
+ struct igb_flow_ntuple_filter *filter,
struct rte_flow_error *error)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -428,7 +428,7 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev,
/* Igb doesn't support many priorities. */
if (filter->priority > E1000_2TUPLE_MAX_PRI) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "Priority not supported by ntuple filter");
@@ -437,18 +437,20 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev,
if (hw->mac.type == e1000_82576) {
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0,
+ sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "queue number not "
"supported by ntuple filter");
return -rte_errno;
}
- filter->flags |= RTE_5TUPLE_FLAGS;
+ filter->flags |= IGB_5TUPLE_FLAGS;
} else {
if (filter->src_ip_mask || filter->dst_ip_mask ||
filter->src_port_mask) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0,
+ sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "only two tuple are "
@@ -456,14 +458,15 @@ igb_parse_ntuple_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0,
+ sizeof(struct igb_flow_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "queue number not "
"supported by ntuple filter");
return -rte_errno;
}
- filter->flags |= RTE_2TUPLE_FLAGS;
+ filter->flags |= IGB_2TUPLE_FLAGS;
}
return 0;
@@ -489,7 +492,7 @@ static int
cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
const struct rte_flow_action *actions,
- struct rte_eth_ethertype_filter *filter,
+ struct igb_flow_ethertype_filter *filter,
struct rte_flow_error *error)
{
const struct rte_flow_item *item;
@@ -572,13 +575,13 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* If mask bits of destination MAC address
- * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ * are full of 1, set IGB_ETHTYPE_FLAGS_MAC.
*/
if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
filter->mac_addr = eth_spec->dst;
- filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ filter->flags |= IGB_ETHTYPE_FLAGS_MAC;
} else {
- filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ filter->flags &= ~IGB_ETHTYPE_FLAGS_MAC;
}
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
@@ -609,7 +612,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
act_q = (const struct rte_flow_action_queue *)act->conf;
filter->queue = act_q->index;
} else {
- filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ filter->flags |= IGB_ETHTYPE_FLAGS_DROP;
}
/* Check if the next non-void item is END */
@@ -671,7 +674,7 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_ethertype_filter *filter,
+ struct igb_flow_ethertype_filter *filter,
struct rte_flow_error *error)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -688,7 +691,7 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev,
if (hw->mac.type == e1000_82576) {
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
memset(filter, 0, sizeof(
- struct rte_eth_ethertype_filter));
+ struct igb_flow_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "queue number not supported "
@@ -698,7 +701,7 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev,
} else {
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
memset(filter, 0, sizeof(
- struct rte_eth_ethertype_filter));
+ struct igb_flow_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "queue number not supported "
@@ -709,23 +712,23 @@ igb_parse_ethertype_filter(struct rte_eth_dev *dev,
if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
filter->ether_type == RTE_ETHER_TYPE_IPV6) {
- memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ memset(filter, 0, sizeof(struct igb_flow_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "IPv4/IPv6 not supported by ethertype filter");
return -rte_errno;
}
- if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
- memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ if (filter->flags & IGB_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct igb_flow_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "mac compare is unsupported");
return -rte_errno;
}
- if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
- memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ if (filter->flags & IGB_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct igb_flow_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "drop option is unsupported");
@@ -759,7 +762,7 @@ static int
cons_parse_syn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_syn_filter *filter,
+ struct igb_flow_syn_filter *filter,
struct rte_flow_error *error)
{
const struct rte_flow_item *item;
@@ -883,7 +886,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
tcp_mask->hdr.rx_win ||
tcp_mask->hdr.cksum ||
tcp_mask->hdr.tcp_urp) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct igb_flow_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by syn filter");
@@ -894,7 +897,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
index++;
NEXT_ITEM_OF_PATTERN(item, pattern, index);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct igb_flow_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by syn filter");
@@ -907,7 +910,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
/* check if the first not void action is QUEUE. */
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct igb_flow_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -921,7 +924,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct igb_flow_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -931,7 +934,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
/* parse attr */
/* must be input direction */
if (!attr->ingress) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct igb_flow_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "Only support ingress.");
@@ -940,7 +943,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
/* not supported */
if (attr->egress) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct igb_flow_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
attr, "Not support egress.");
@@ -949,7 +952,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
/* not supported */
if (attr->transfer) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct igb_flow_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
attr, "No support for transfer.");
@@ -962,7 +965,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
} else if (attr->priority == (uint32_t)~0U) {
filter->hig_pri = 1;
} else {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct igb_flow_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
attr, "Not support priority.");
@@ -977,7 +980,7 @@ igb_parse_syn_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_syn_filter *filter,
+ struct igb_flow_syn_filter *filter,
struct rte_flow_error *error)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -990,7 +993,7 @@ igb_parse_syn_filter(struct rte_eth_dev *dev,
if (hw->mac.type == e1000_82576) {
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct igb_flow_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "queue number not "
@@ -999,7 +1002,7 @@ igb_parse_syn_filter(struct rte_eth_dev *dev,
}
} else {
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct igb_flow_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "queue number not "
@@ -1041,7 +1044,7 @@ static int
cons_parse_flex_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_flex_filter *filter,
+ struct igb_flow_flex_filter *filter,
struct rte_flow_error *error)
{
const struct rte_flow_item *item;
@@ -1102,7 +1105,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr,
if (!raw_mask->length ||
!raw_mask->relative) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flow_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by flex filter");
@@ -1116,7 +1119,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr,
for (j = 0; j < raw_spec->length; j++) {
if (raw_mask->pattern[j] != 0xFF) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flow_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by flex filter");
@@ -1140,8 +1143,8 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr,
}
if ((raw_spec->length + offset + total_offset) >
- RTE_FLEX_FILTER_MAXLEN) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ IGB_FLEX_FILTER_MAXLEN) {
+ memset(filter, 0, sizeof(struct igb_flow_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by flex filter");
@@ -1204,7 +1207,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr,
/* check if the first not void action is QUEUE. */
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flow_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -1218,7 +1221,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr,
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flow_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -1228,7 +1231,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr,
/* parse attr */
/* must be input direction */
if (!attr->ingress) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flow_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "Only support ingress.");
@@ -1237,7 +1240,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr,
/* not supported */
if (attr->egress) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flow_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
attr, "Not support egress.");
@@ -1246,7 +1249,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr,
/* not supported */
if (attr->transfer) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flow_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
attr, "No support for transfer.");
@@ -1254,7 +1257,7 @@ cons_parse_flex_filter(const struct rte_flow_attr *attr,
}
if (attr->priority > 0xFFFF) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flow_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
attr, "Error priority.");
@@ -1271,7 +1274,7 @@ igb_parse_flex_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_flex_filter *filter,
+ struct igb_flow_flex_filter *filter,
struct rte_flow_error *error)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1283,7 +1286,7 @@ igb_parse_flex_filter(struct rte_eth_dev *dev,
actions, filter, error);
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flow_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "queue number not supported by flex filter");
@@ -1441,10 +1444,10 @@ igb_flow_create(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
int ret;
- struct rte_eth_ntuple_filter ntuple_filter;
- struct rte_eth_ethertype_filter ethertype_filter;
- struct rte_eth_syn_filter syn_filter;
- struct rte_eth_flex_filter flex_filter;
+ struct igb_flow_ntuple_filter ntuple_filter;
+ struct igb_flow_ethertype_filter ethertype_filter;
+ struct igb_flow_syn_filter syn_filter;
+ struct igb_flow_flex_filter flex_filter;
struct igb_rte_flow_rss_conf rss_conf;
struct rte_flow *flow = NULL;
struct igb_ntuple_filter_ele *ntuple_filter_ptr;
@@ -1471,7 +1474,7 @@ igb_flow_create(struct rte_eth_dev *dev,
TAILQ_INSERT_TAIL(&igb_flow_list,
igb_flow_mem_ptr, entries);
- memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(&ntuple_filter, 0, sizeof(struct igb_flow_ntuple_filter));
ret = igb_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
if (!ret) {
@@ -1486,17 +1489,17 @@ igb_flow_create(struct rte_eth_dev *dev,
rte_memcpy(&ntuple_filter_ptr->filter_info,
&ntuple_filter,
- sizeof(struct rte_eth_ntuple_filter));
+ sizeof(struct igb_flow_ntuple_filter));
TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
ntuple_filter_ptr, entries);
flow->rule = ntuple_filter_ptr;
- flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ flow->filter_type = IGB_FILTER_NTUPLE;
return flow;
}
goto out;
}
- memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ memset(ðertype_filter, 0, sizeof(struct igb_flow_ethertype_filter));
ret = igb_parse_ethertype_filter(dev, attr, pattern,
actions, ðertype_filter, error);
if (!ret) {
@@ -1513,17 +1516,17 @@ igb_flow_create(struct rte_eth_dev *dev,
rte_memcpy(ðertype_filter_ptr->filter_info,
ðertype_filter,
- sizeof(struct rte_eth_ethertype_filter));
+ sizeof(struct igb_flow_ethertype_filter));
TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
ethertype_filter_ptr, entries);
flow->rule = ethertype_filter_ptr;
- flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ flow->filter_type = IGB_FILTER_ETHERTYPE;
return flow;
}
goto out;
}
- memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(&syn_filter, 0, sizeof(struct igb_flow_syn_filter));
ret = igb_parse_syn_filter(dev, attr, pattern,
actions, &syn_filter, error);
if (!ret) {
@@ -1538,18 +1541,18 @@ igb_flow_create(struct rte_eth_dev *dev,
rte_memcpy(&syn_filter_ptr->filter_info,
&syn_filter,
- sizeof(struct rte_eth_syn_filter));
+ sizeof(struct igb_flow_syn_filter));
TAILQ_INSERT_TAIL(&igb_filter_syn_list,
syn_filter_ptr,
entries);
flow->rule = syn_filter_ptr;
- flow->filter_type = RTE_ETH_FILTER_SYN;
+ flow->filter_type = IGB_FILTER_SYN;
return flow;
}
goto out;
}
- memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(&flex_filter, 0, sizeof(struct igb_flow_flex_filter));
ret = igb_parse_flex_filter(dev, attr, pattern,
actions, &flex_filter, error);
if (!ret) {
@@ -1564,11 +1567,11 @@ igb_flow_create(struct rte_eth_dev *dev,
rte_memcpy(&flex_filter_ptr->filter_info,
&flex_filter,
- sizeof(struct rte_eth_flex_filter));
+ sizeof(struct igb_flow_flex_filter));
TAILQ_INSERT_TAIL(&igb_filter_flex_list,
flex_filter_ptr, entries);
flow->rule = flex_filter_ptr;
- flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
+ flow->filter_type = IGB_FILTER_FLEXIBLE;
return flow;
}
}
@@ -1590,7 +1593,7 @@ igb_flow_create(struct rte_eth_dev *dev,
TAILQ_INSERT_TAIL(&igb_filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;
- flow->filter_type = RTE_ETH_FILTER_HASH;
+ flow->filter_type = IGB_FILTER_HASH;
return flow;
}
}
@@ -1618,32 +1621,32 @@ igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct rte_eth_ntuple_filter ntuple_filter;
- struct rte_eth_ethertype_filter ethertype_filter;
- struct rte_eth_syn_filter syn_filter;
- struct rte_eth_flex_filter flex_filter;
+ struct igb_flow_ntuple_filter ntuple_filter;
+ struct igb_flow_ethertype_filter ethertype_filter;
+ struct igb_flow_syn_filter syn_filter;
+ struct igb_flow_flex_filter flex_filter;
struct igb_rte_flow_rss_conf rss_conf;
int ret;
- memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(&ntuple_filter, 0, sizeof(struct igb_flow_ntuple_filter));
ret = igb_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
if (!ret)
return 0;
- memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ memset(ðertype_filter, 0, sizeof(struct igb_flow_ethertype_filter));
ret = igb_parse_ethertype_filter(dev, attr, pattern,
actions, ðertype_filter, error);
if (!ret)
return 0;
- memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(&syn_filter, 0, sizeof(struct igb_flow_syn_filter));
ret = igb_parse_syn_filter(dev, attr, pattern,
actions, &syn_filter, error);
if (!ret)
return 0;
- memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(&flex_filter, 0, sizeof(struct igb_flow_flex_filter));
ret = igb_parse_flex_filter(dev, attr, pattern,
actions, &flex_filter, error);
if (!ret)
@@ -1664,7 +1667,7 @@ igb_flow_destroy(struct rte_eth_dev *dev,
{
int ret;
struct rte_flow *pmd_flow = flow;
- enum rte_filter_type filter_type = pmd_flow->filter_type;
+ enum igb_filter_type filter_type = pmd_flow->filter_type;
struct igb_ntuple_filter_ele *ntuple_filter_ptr;
struct igb_ethertype_filter_ele *ethertype_filter_ptr;
struct igb_eth_syn_filter_ele *syn_filter_ptr;
@@ -1673,7 +1676,7 @@ igb_flow_destroy(struct rte_eth_dev *dev,
struct igb_rss_conf_ele *rss_filter_ptr;
switch (filter_type) {
- case RTE_ETH_FILTER_NTUPLE:
+ case IGB_FILTER_NTUPLE:
ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
pmd_flow->rule;
ret = igb_add_del_ntuple_filter(dev,
@@ -1684,7 +1687,7 @@ igb_flow_destroy(struct rte_eth_dev *dev,
rte_free(ntuple_filter_ptr);
}
break;
- case RTE_ETH_FILTER_ETHERTYPE:
+ case IGB_FILTER_ETHERTYPE:
ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
pmd_flow->rule;
ret = igb_add_del_ethertype_filter(dev,
@@ -1695,7 +1698,7 @@ igb_flow_destroy(struct rte_eth_dev *dev,
rte_free(ethertype_filter_ptr);
}
break;
- case RTE_ETH_FILTER_SYN:
+ case IGB_FILTER_SYN:
syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
pmd_flow->rule;
ret = eth_igb_syn_filter_set(dev,
@@ -1706,7 +1709,7 @@ igb_flow_destroy(struct rte_eth_dev *dev,
rte_free(syn_filter_ptr);
}
break;
- case RTE_ETH_FILTER_FLEXIBLE:
+ case IGB_FILTER_FLEXIBLE:
flex_filter_ptr = (struct igb_flex_filter_ele *)
pmd_flow->rule;
ret = eth_igb_add_del_flex_filter(dev,
@@ -1717,7 +1720,7 @@ igb_flow_destroy(struct rte_eth_dev *dev,
rte_free(flex_filter_ptr);
}
break;
- case RTE_ETH_FILTER_HASH:
+ case IGB_FILTER_HASH:
rss_filter_ptr = (struct igb_rss_conf_ele *)
pmd_flow->rule;
ret = igb_config_rss_filter(dev,
@@ -1836,7 +1839,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
struct igb_flex_filter_ele *flex_filter_ptr;
struct igb_rss_conf_ele *rss_filter_ptr;
struct igb_flow_mem *igb_flow_mem_ptr;
- enum rte_filter_type filter_type;
+ enum igb_filter_type filter_type;
struct rte_flow *pmd_flow;
TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
@@ -1845,7 +1848,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
filter_type = pmd_flow->filter_type;
switch (filter_type) {
- case RTE_ETH_FILTER_NTUPLE:
+ case IGB_FILTER_NTUPLE:
ntuple_filter_ptr =
(struct igb_ntuple_filter_ele *)
pmd_flow->rule;
@@ -1853,7 +1856,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
ntuple_filter_ptr, entries);
rte_free(ntuple_filter_ptr);
break;
- case RTE_ETH_FILTER_ETHERTYPE:
+ case IGB_FILTER_ETHERTYPE:
ethertype_filter_ptr =
(struct igb_ethertype_filter_ele *)
pmd_flow->rule;
@@ -1861,7 +1864,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
ethertype_filter_ptr, entries);
rte_free(ethertype_filter_ptr);
break;
- case RTE_ETH_FILTER_SYN:
+ case IGB_FILTER_SYN:
syn_filter_ptr =
(struct igb_eth_syn_filter_ele *)
pmd_flow->rule;
@@ -1869,7 +1872,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
syn_filter_ptr, entries);
rte_free(syn_filter_ptr);
break;
- case RTE_ETH_FILTER_FLEXIBLE:
+ case IGB_FILTER_FLEXIBLE:
flex_filter_ptr =
(struct igb_flex_filter_ele *)
pmd_flow->rule;
@@ -1877,7 +1880,7 @@ igb_filterlist_flush(struct rte_eth_dev *dev)
flex_filter_ptr, entries);
rte_free(flex_filter_ptr);
break;
- case RTE_ETH_FILTER_HASH:
+ case IGB_FILTER_HASH:
rss_filter_ptr =
(struct igb_rss_conf_ele *)
pmd_flow->rule;
--
2.17.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* [dpdk-dev] [RFC 4/5] net/ixgbe: decouple dependency from superseded structures
2020-09-29 7:48 [dpdk-dev] [RFC 2/5] net/igc: decouple dependency from superseded structures Chenxu Di
2020-09-29 7:48 ` [dpdk-dev] [RFC 3/5] net/e1000: " Chenxu Di
@ 2020-09-29 7:48 ` Chenxu Di
1 sibling, 0 replies; 3+ messages in thread
From: Chenxu Di @ 2020-09-29 7:48 UTC (permalink / raw)
To: dev; +Cc: junyux.jiang, shougangx.wang, Jeff Guo, Haiyue Wang, Chenxu Di
The legacy filter API will be removed, the associated rte_eth_ctrl.h
will also be removed. This patch replaces these superseded structures
by the PMD internal structures. The macros RTE_ETH_FILTER_GENERIC and
RTE_ETH_FILTER_GET are not replaced, they are needed to follow librte
to change. The "rte_eth_conf.fdir_conf" field will also be removed,
but IXGBE generic flow still needs to depend on these configurations.
So this patch also defines a private API to passthrough user's
configurations.
Signed-off-by: Chenxu Di <chenxux.di@intel.com>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 79 +++----
drivers/net/ixgbe/ixgbe_ethdev.h | 217 +++++++++++++++++-
drivers/net/ixgbe/ixgbe_fdir.c | 143 ++++++------
drivers/net/ixgbe/ixgbe_flow.c | 235 ++++++++++----------
drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 4 +-
drivers/net/ixgbe/rte_pmd_ixgbe.c | 72 ++++++
drivers/net/ixgbe/rte_pmd_ixgbe.h | 172 ++++++++++++++
drivers/net/ixgbe/rte_pmd_ixgbe_version.map | 1 +
8 files changed, 692 insertions(+), 231 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 0f065bbc0..977900c8f 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -301,7 +301,7 @@ static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
struct rte_ether_addr *mac_addr);
static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter);
+ struct ixgbe_syn_filter *filter);
static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -313,12 +313,12 @@ static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *filter);
+ struct ixgbe_ntuple_filter *filter);
static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter);
+ struct ixgbe_flow_ethertype_filter *filter);
static int ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
@@ -2571,6 +2571,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
uint32_t intr_vector = 0;
int err;
bool link_up = false, negotiate = 0;
@@ -2665,7 +2666,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
/* Configure DCB hw */
ixgbe_configure_dcb(dev);
- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ if (adapter->fdir_conf.mode != IXGBE_FDIR_MODE_NONE) {
err = ixgbe_fdir_configure(dev);
if (err)
goto error;
@@ -6368,7 +6369,7 @@ ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
int
ixgbe_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
+ struct ixgbe_syn_filter *filter,
bool add)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -6407,7 +6408,7 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
static int
ixgbe_syn_filter_get(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter)
+ struct ixgbe_syn_filter *filter)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
@@ -6442,17 +6443,17 @@ ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
ret = ixgbe_syn_filter_set(dev,
- (struct rte_eth_syn_filter *)arg,
+ (struct ixgbe_syn_filter *)arg,
TRUE);
break;
case RTE_ETH_FILTER_DELETE:
ret = ixgbe_syn_filter_set(dev,
- (struct rte_eth_syn_filter *)arg,
+ (struct ixgbe_syn_filter *)arg,
FALSE);
break;
case RTE_ETH_FILTER_GET:
ret = ixgbe_syn_filter_get(dev,
- (struct rte_eth_syn_filter *)arg);
+ (struct ixgbe_syn_filter *)arg);
break;
default:
PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
@@ -6652,9 +6653,11 @@ ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
return NULL;
}
-/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
+/* translate elements in struct ixgbe_ntuple_filter to
+ * struct ixgbe_5tuple_filter_info
+ */
static inline int
-ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+ntuple_filter_to_5tuple(struct ixgbe_ntuple_filter *filter,
struct ixgbe_5tuple_filter_info *filter_info)
{
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
@@ -6737,7 +6740,7 @@ ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * ntuple_filter: Pointer to struct ixgbe_ntuple_filter
* add: if true, add filter, if false, remove filter
*
* @return
@@ -6746,7 +6749,7 @@ ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
*/
int
ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter,
+ struct ixgbe_ntuple_filter *ntuple_filter,
bool add)
{
struct ixgbe_filter_info *filter_info =
@@ -6755,7 +6758,7 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter;
int ret;
- if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ if (ntuple_filter->flags != IXGBE_5TUPLE_FLAGS) {
PMD_DRV_LOG(ERR, "only 5tuple is supported.");
return -EINVAL;
}
@@ -6809,7 +6812,7 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
*/
static int
ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *ntuple_filter)
+ struct ixgbe_ntuple_filter *ntuple_filter)
{
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
@@ -6869,17 +6872,17 @@ ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
ret = ixgbe_add_del_ntuple_filter(dev,
- (struct rte_eth_ntuple_filter *)arg,
+ (struct ixgbe_ntuple_filter *)arg,
TRUE);
break;
case RTE_ETH_FILTER_DELETE:
ret = ixgbe_add_del_ntuple_filter(dev,
- (struct rte_eth_ntuple_filter *)arg,
+ (struct ixgbe_ntuple_filter *)arg,
FALSE);
break;
case RTE_ETH_FILTER_GET:
ret = ixgbe_get_ntuple_filter(dev,
- (struct rte_eth_ntuple_filter *)arg);
+ (struct ixgbe_ntuple_filter *)arg);
break;
default:
PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
@@ -6891,7 +6894,7 @@ ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
int
ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter,
+ struct ixgbe_flow_ethertype_filter *filter,
bool add)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -6912,11 +6915,11 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ if (filter->flags & IXGBE_ETHTYPE_FLAGS_MAC) {
PMD_DRV_LOG(ERR, "mac compare is unsupported.");
return -EINVAL;
}
- if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ if (filter->flags & IXGBE_ETHTYPE_FLAGS_DROP) {
PMD_DRV_LOG(ERR, "drop option is unsupported.");
return -EINVAL;
}
@@ -6965,7 +6968,7 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
static int
ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter)
+ struct ixgbe_flow_ethertype_filter *filter)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_filter_info *filter_info =
@@ -7020,17 +7023,17 @@ ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
ret = ixgbe_add_del_ethertype_filter(dev,
- (struct rte_eth_ethertype_filter *)arg,
+ (struct ixgbe_flow_ethertype_filter *)arg,
TRUE);
break;
case RTE_ETH_FILTER_DELETE:
ret = ixgbe_add_del_ethertype_filter(dev,
- (struct rte_eth_ethertype_filter *)arg,
+ (struct ixgbe_flow_ethertype_filter *)arg,
FALSE);
break;
case RTE_ETH_FILTER_GET:
ret = ixgbe_get_ethertype_filter(dev,
- (struct rte_eth_ethertype_filter *)arg);
+ (struct ixgbe_flow_ethertype_filter *)arg);
break;
default:
PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
@@ -7912,7 +7915,7 @@ ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
static int
ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel)
+ struct ixgbe_l2_tunnel_cfg *l2_tunnel)
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -7948,7 +7951,7 @@ ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
static int
ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel)
+ struct ixgbe_l2_tunnel_cfg *l2_tunnel)
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -8052,7 +8055,7 @@ ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
/* Add l2 tunnel filter */
int
ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ struct ixgbe_l2_tunnel_cfg *l2_tunnel,
bool restore)
{
int ret;
@@ -8109,7 +8112,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
/* Delete l2 tunnel filter */
int
ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel)
+ struct ixgbe_l2_tunnel_cfg *l2_tunnel)
{
int ret;
struct ixgbe_l2_tn_info *l2_tn_info =
@@ -8161,13 +8164,13 @@ ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_ADD:
ret = ixgbe_dev_l2_tunnel_filter_add
(dev,
- (struct rte_eth_l2_tunnel_conf *)arg,
+ (struct ixgbe_l2_tunnel_cfg *)arg,
FALSE);
break;
case RTE_ETH_FILTER_DELETE:
ret = ixgbe_dev_l2_tunnel_filter_del
(dev,
- (struct rte_eth_l2_tunnel_conf *)arg);
+ (struct ixgbe_l2_tunnel_cfg *)arg);
break;
default:
PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
@@ -8249,7 +8252,7 @@ ixgbe_dev_l2_tunnel_forwarding_disable
static int
ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ struct ixgbe_l2_tunnel_cfg *l2_tunnel,
bool en)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
@@ -8290,7 +8293,7 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
/* Enable l2 tunnel tag insertion */
static int
ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel)
+ struct ixgbe_l2_tunnel_cfg *l2_tunnel)
{
int ret = 0;
@@ -8311,7 +8314,7 @@ ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
static int
ixgbe_dev_l2_tunnel_insertion_disable
(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel)
+ struct ixgbe_l2_tunnel_cfg *l2_tunnel)
{
int ret = 0;
@@ -8425,11 +8428,11 @@ ixgbe_dev_l2_tunnel_offload_set
if (en)
ret = ixgbe_dev_l2_tunnel_insertion_enable(
dev,
- l2_tunnel);
+ (struct ixgbe_l2_tunnel_cfg *)l2_tunnel);
else
ret = ixgbe_dev_l2_tunnel_insertion_disable(
dev,
- l2_tunnel);
+ (struct ixgbe_l2_tunnel_cfg *)l2_tunnel);
}
if (mask & ETH_L2_TUNNEL_STRIPPING_MASK) {
@@ -8831,7 +8834,7 @@ ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
struct ixgbe_l2_tn_info *l2_tn_info =
IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
struct ixgbe_l2_tn_filter *node;
- struct rte_eth_l2_tunnel_conf l2_tn_conf;
+ struct ixgbe_l2_tunnel_cfg l2_tn_conf;
TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
@@ -8938,7 +8941,7 @@ ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
struct ixgbe_l2_tn_info *l2_tn_info =
IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
struct ixgbe_l2_tn_filter *l2_tn_filter;
- struct rte_eth_l2_tunnel_conf l2_tn_conf;
+ struct ixgbe_l2_tunnel_cfg l2_tn_conf;
int ret = 0;
while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 9bdef87fb..aebcf0fff 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -169,6 +169,126 @@ struct ixgbe_hw_fdir_mask {
uint8_t tunnel_type_mask;
};
+/**
+ * Flow Director setting modes: none, signature or perfect.
+ */
+enum ixgbe_fdir_mode {
+ /* Disable FDIR support. */
+ IXGBE_FDIR_MODE_NONE = 0,
+ /* Enable FDIR signature filter mode. */
+ IXGBE_FDIR_MODE_SIGNATURE,
+ /* Enable FDIR perfect filter mode. */
+ IXGBE_FDIR_MODE_PERFECT,
+ /* Enable FDIR filter mode - MAC VLAN. */
+ IXGBE_FDIR_MODE_PERFECT_MAC_VLAN,
+ /* Enable FDIR filter mode - tunnel. */
+ IXGBE_FDIR_MODE_PERFECT_TUNNEL,
+};
+
+/* Select report mode of FDIR hash information in RX descriptors. */
+enum ixgbe_fdir_status_mode {
+ IXGBE_FDIR_NO_REPORT_STATUS = 0, /* Never report FDIR hash. */
+ IXGBE_FDIR_REPORT_STATUS, /* Only report FDIR hash for matching pkts. */
+ IXGBE_FDIR_REPORT_STATUS_ALWAYS, /* Always report FDIR hash. */
+};
+
+/* A structure used to define the input for IPV4 flow */
+struct ixgbe_ipv4_flow {
+ uint32_t src_ip; /* IPv4 source address in big endian. */
+ uint32_t dst_ip; /* IPv4 destination address in big endian. */
+ uint8_t tos; /* Type of service to match. */
+ uint8_t ttl; /* Time to live to match. */
+ uint8_t proto; /* Protocol, next header in big endian. */
+};
+
+/* A structure used to define the input for IPV6 flow */
+struct ixgbe_ipv6_flow {
+ uint32_t src_ip[4]; /* IPv6 source address in big endian. */
+ uint32_t dst_ip[4]; /* IPv6 destination address in big endian. */
+ uint8_t tc; /* Traffic class to match. */
+ uint8_t proto; /* Protocol, next header to match. */
+ uint8_t hop_limits; /* Hop limits to match. */
+};
+
+/* A structure used to configure FDIR masks that are used by the device
+ * to match the various fields of RX packet headers.
+ */
+struct ixgbe_fdir_masks {
+ /* Bit mask for vlan_tci in big endian */
+ uint16_t vlan_tci_mask;
+ /* Bit mask for ipv4 flow in big endian. */
+ struct ixgbe_ipv4_flow ipv4_mask;
+ /* Bit mask for ipv6 flow in big endian. */
+ struct ixgbe_ipv6_flow ipv6_mask;
+ /* Bit mask for L4 source port in big endian. */
+ uint16_t src_port_mask;
+ /* Bit mask for L4 destination port in big endian. */
+ uint16_t dst_port_mask;
+ /* 6 bit mask for proper 6 bytes of Mac address, bit 0 matches the
+ * first byte on the wire
+ */
+ uint8_t mac_addr_byte_mask;
+ /* Bit mask for tunnel ID in big endian. */
+ uint32_t tunnel_id_mask;
+ /* 1 - Match tunnel type, 0 - Ignore tunnel type. */
+ uint8_t tunnel_type_mask;
+};
+
+#define IXGBE_FDIR_MAX_FLEXLEN 16 /* Max length of flexbytes. */
+
+/* Payload type */
+enum ixgbe_payload_type {
+ IXGBE_PAYLOAD_UNKNOWN = 0,
+ IXGBE_RAW_PAYLOAD,
+ IXGBE_L2_PAYLOAD,
+ IXGBE_L3_PAYLOAD,
+ IXGBE_L4_PAYLOAD,
+ IXGBE_PAYLOAD_MAX = 8,
+};
+
+/* A structure used to select bytes extracted from the protocol layers to
+ * flexible payload for filter
+ */
+struct ixgbe_flex_payload_cfg {
+ enum ixgbe_payload_type type; /* Payload type */
+ uint16_t src_offset[IXGBE_FDIR_MAX_FLEXLEN];
+ /* Offset in bytes from the beginning of packet's payload
+ * src_offset[i] indicates the flexbyte i's offset in original
+ * packet payload.
+ */
+};
+
+/* A structure used to define FDIR masks for flexible payload
+ * for each flow type
+ */
+struct ixgbe_fdir_flex_mask {
+ uint16_t flow_type;
+ uint8_t mask[IXGBE_FDIR_MAX_FLEXLEN];
+ /* Mask for the whole flexible payload */
+};
+
+/* A structure used to define all flexible payload related setting
+ * include flex payload and flex mask
+ */
+struct ixgbe_fdir_flex_conf {
+ uint16_t nb_payloads; /* The number of following payload cfg */
+ uint16_t nb_flexmasks; /* The number of following mask */
+ struct ixgbe_flex_payload_cfg flex_set[IXGBE_PAYLOAD_MAX];
+ /* Flex payload configuration for each payload type */
+ struct ixgbe_fdir_flex_mask flex_mask[RTE_ETH_FLOW_MAX];
+ /* Flex mask configuration for each flow type */
+};
+
+struct ixgbe_fdir_conf {
+ enum ixgbe_fdir_mode mode; /* Flow Director mode. */
+ enum ixgbe_fdir_pballoc_type pballoc; /* Space for FDIR filters. */
+ enum ixgbe_fdir_status_mode status; /* How to report FDIR hash. */
+ /* RX queue of packets matching a "drop" filter in perfect mode. */
+ uint8_t drop_queue;
+ struct ixgbe_fdir_masks mask;
+ struct ixgbe_fdir_flex_conf flex_conf; /* Flex payload configuration. */
+};
+
struct ixgbe_fdir_filter {
TAILQ_ENTRY(ixgbe_fdir_filter) entries;
union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
@@ -185,7 +305,7 @@ struct ixgbe_fdir_rule {
union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */
bool b_mask; /* If TRUE, mask has meaning. */
- enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
+ enum ixgbe_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
uint32_t fdirflags; /* drop or forward */
uint32_t soft_id; /* an unique value for this rule */
uint8_t queue; /* assigned rx queue */
@@ -361,8 +481,90 @@ struct ixgbe_l2_tn_info {
uint16_t e_tag_ether_type; /* ether type for e-tag */
};
+struct ixgbe_syn_filter {
+ /* 1 - higher priority than other filters, 0 - lower priority. */
+ uint8_t hig_pri;
+ /* Queue assigned to when match */
+ uint16_t queue;
+};
+
+/* Define all structures for ntuple Filter type. */
+
+/* If set, dst_ip is part of ntuple */
+#define IXGBE_NTUPLE_FLAGS_DST_IP 0x0001
+/* If set, src_ip is part of ntuple */
+#define IXGBE_NTUPLE_FLAGS_SRC_IP 0x0002
+/* If set, dst_port is part of ntuple */
+#define IXGBE_NTUPLE_FLAGS_DST_PORT 0x0004
+/* If set, src_port is part of ntuple */
+#define IXGBE_NTUPLE_FLAGS_SRC_PORT 0x0008
+/* If set, protocol is part of ntuple */
+#define IXGBE_NTUPLE_FLAGS_PROTO 0x0010
+/* If set, tcp flag is involved */
+#define IXGBE_NTUPLE_FLAGS_TCP_FLAG 0x0020
+
+#define IXGBE_5TUPLE_FLAGS ( \
+ IXGBE_NTUPLE_FLAGS_DST_IP | \
+ IXGBE_NTUPLE_FLAGS_SRC_IP | \
+ IXGBE_NTUPLE_FLAGS_DST_PORT | \
+ IXGBE_NTUPLE_FLAGS_SRC_PORT | \
+ IXGBE_NTUPLE_FLAGS_PROTO)
+
+struct ixgbe_ntuple_filter {
+ uint16_t flags; /* Flags from IXGBE_NTUPLE_FLAGS_* */
+ uint32_t dst_ip; /* Destination IP address in big endian. */
+ uint32_t dst_ip_mask; /* Mask of destination IP address. */
+ uint32_t src_ip; /* Source IP address in big endian. */
+ uint32_t src_ip_mask; /* Mask of destination IP address. */
+ uint16_t dst_port; /* Destination port in big endian. */
+ uint16_t dst_port_mask; /* Mask of destination port. */
+ uint16_t src_port; /* Source Port in big endian. */
+ uint16_t src_port_mask; /* Mask of source port. */
+ uint8_t proto; /* L4 protocol. */
+ uint8_t proto_mask; /* Mask of L4 protocol. */
+ /* tcp_flags only meaningful when the proto is TCP.
+ * The packet matched above ntuple fields and contain
+ * any set bit in tcp_flags will hit this filter.
+ */
+ uint8_t tcp_flags;
+ /* seven levels (001b-111b), 111b is highest,
+ * used when more than one filter matches.
+ */
+ uint16_t priority;
+ uint16_t queue; /* Queue assigned to when match*/
+};
+
+struct ixgbe_l2_tunnel_cfg {
+ enum rte_eth_tunnel_type l2_tunnel_type;
+ uint16_t ether_type; /* ether type in l2 header */
+ uint32_t tunnel_id; /* port tag id for e-tag */
+ uint16_t vf_id; /* VF id for tag insertion */
+ uint32_t pool; /* destination pool for tag based forwarding */
+};
+
+#define IXGBE_ETHTYPE_FLAGS_MAC 0x0001 /* If set, compare mac */
+#define IXGBE_ETHTYPE_FLAGS_DROP 0x0002 /* If set, drop packet when match */
+
+struct ixgbe_flow_ethertype_filter {
+ struct rte_ether_addr mac_addr; /* Mac address to match. */
+ uint16_t ether_type; /* Ether type to match */
+ uint16_t flags; /* Flags from IXGBE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match*/
+};
+
+enum ixgbe_filter_type {
+ IXGBE_FILTER_NONE = 0,
+ IXGBE_FILTER_ETHERTYPE,
+ IXGBE_FILTER_SYN,
+ IXGBE_FILTER_NTUPLE,
+ IXGBE_FILTER_FDIR,
+ IXGBE_FILTER_HASH,
+ IXGBE_FILTER_L2_TUNNEL,
+ IXGBE_FILTER_MAX
+};
+
struct rte_flow {
- enum rte_filter_type filter_type;
+ enum ixgbe_filter_type filter_type;
void *rule;
};
@@ -514,6 +716,7 @@ struct ixgbe_adapter {
uint8_t mac_ctrl_frame_fwd;
rte_atomic32_t link_thread_running;
pthread_t link_thread_tid;
+ struct ixgbe_fdir_conf fdir_conf;
};
struct ixgbe_vf_representor {
@@ -670,21 +873,21 @@ uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i);
bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *filter,
+ struct ixgbe_ntuple_filter *filter,
bool add);
int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter,
+ struct ixgbe_flow_ethertype_filter *filter,
bool add);
int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
+ struct ixgbe_syn_filter *filter,
bool add);
int
ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ struct ixgbe_l2_tunnel_cfg *l2_tunnel,
bool restore);
int
ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel);
+ struct ixgbe_l2_tunnel_cfg *l2_tunnel);
void ixgbe_filterlist_init(void);
void ixgbe_filterlist_flush(void);
/*
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 6faaa8f06..76fb47d49 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -81,11 +81,11 @@
static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
static int fdir_set_input_mask(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask);
+ const struct ixgbe_fdir_masks *input_mask);
static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
+ const struct ixgbe_fdir_flex_conf *conf, uint32_t *fdirctrl);
static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
static int ixgbe_fdir_filter_to_atr_input(
const struct rte_eth_fdir_filter *fdir_filter,
@@ -94,13 +94,13 @@ static int ixgbe_fdir_filter_to_atr_input(
static uint32_t ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
uint32_t key);
static uint32_t atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc);
+ enum ixgbe_fdir_pballoc_type pballoc);
static uint32_t atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc);
+ enum ixgbe_fdir_pballoc_type pballoc);
static int fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input, uint8_t queue,
uint32_t fdircmd, uint32_t fdirhash,
- enum rte_fdir_mode mode);
+ enum ixgbe_fdir_mode mode);
static int fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input, u8 queue, uint32_t fdircmd,
uint32_t fdirhash);
@@ -171,20 +171,20 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
* flexbytes matching field, and drop queue (only for perfect matching mode).
*/
static inline int
-configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+configure_fdir_flags(const struct ixgbe_fdir_conf *conf, uint32_t *fdirctrl)
{
*fdirctrl = 0;
switch (conf->pballoc) {
- case RTE_FDIR_PBALLOC_64K:
+ case IXGBE_FDIR_PBALLOC_64K:
/* 8k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
break;
- case RTE_FDIR_PBALLOC_128K:
+ case IXGBE_FDIR_PBALLOC_128K:
/* 16k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
break;
- case RTE_FDIR_PBALLOC_256K:
+ case IXGBE_FDIR_PBALLOC_256K:
/* 32k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
break;
@@ -196,14 +196,14 @@ configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
/* status flags: write hash & swindex in the rx descriptor */
switch (conf->status) {
- case RTE_FDIR_NO_REPORT_STATUS:
+ case IXGBE_FDIR_NO_REPORT_STATUS:
/* do nothing, default mode */
break;
- case RTE_FDIR_REPORT_STATUS:
+ case IXGBE_FDIR_REPORT_STATUS:
/* report status when the packet matches a fdir rule */
*fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
break;
- case RTE_FDIR_REPORT_STATUS_ALWAYS:
+ case IXGBE_FDIR_REPORT_STATUS_ALWAYS:
/* always report status */
*fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
break;
@@ -216,14 +216,14 @@ configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
*fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
IXGBE_FDIRCTRL_FLEX_SHIFT;
- if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
- conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ if (conf->mode >= IXGBE_FDIR_MODE_PERFECT &&
+ conf->mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL) {
*fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
*fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
- if (conf->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ if (conf->mode == IXGBE_FDIR_MODE_PERFECT_MAC_VLAN)
*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_MACVLAN
<< IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
- else if (conf->mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ else if (conf->mode == IXGBE_FDIR_MODE_PERFECT_TUNNEL)
*fdirctrl |= (IXGBE_FDIRCTRL_FILTERMODE_CLOUD
<< IXGBE_FDIRCTRL_FILTERMODE_SHIFT);
}
@@ -264,6 +264,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
/*
* mask VM pool and DIPv6 since there are currently not supported
* mask FLEX byte, it will be set in flex_conf
@@ -325,7 +326,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
*reg = ~(info->mask.dst_ipv4_mask);
- if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
+ if (adapter->fdir_conf.mode == IXGBE_FDIR_MODE_SIGNATURE) {
/*
* Store source and destination IPv6 masks (bit reversed)
*/
@@ -348,19 +349,20 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
/* mask VM pool and DIPv6 since there are currently not supported
* mask FLEX byte, it will be set in flex_conf
*/
uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
IXGBE_FDIRM_FLEX;
uint32_t fdiripv6m;
- enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+ enum ixgbe_fdir_mode mode = adapter->fdir_conf.mode;
uint16_t mac_mask;
PMD_INIT_FUNC_TRACE();
/* set the default UDP port for VxLAN */
- if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+ if (mode == IXGBE_FDIR_MODE_PERFECT_TUNNEL)
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, RTE_VXLAN_DEFAULT_PORT);
/* some bits must be set for mac vlan or tunnel mode */
@@ -384,11 +386,11 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
fdiripv6m |= IXGBE_FDIRIP6M_ALWAYS_MASK;
- if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ if (mode == IXGBE_FDIR_MODE_PERFECT_MAC_VLAN)
fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE |
IXGBE_FDIRIP6M_TNI_VNI;
- if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ if (mode == IXGBE_FDIR_MODE_PERFECT_TUNNEL) {
fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
mac_mask = info->mask.mac_addr_byte_mask &
(IXGBE_FDIRIP6M_INNER_MAC >>
@@ -436,7 +438,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
static int
ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+ const struct ixgbe_fdir_masks *input_mask)
{
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
@@ -459,7 +461,7 @@ ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
static int
ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+ const struct ixgbe_fdir_masks *input_mask)
{
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
@@ -475,15 +477,16 @@ ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
static int
ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+ const struct ixgbe_fdir_masks *input_mask)
{
- enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
+ enum ixgbe_fdir_mode mode = adapter->fdir_conf.mode;
- if (mode >= RTE_FDIR_MODE_SIGNATURE &&
- mode <= RTE_FDIR_MODE_PERFECT)
+ if (mode >= IXGBE_FDIR_MODE_SIGNATURE &&
+ mode <= IXGBE_FDIR_MODE_PERFECT)
return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
- else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
- mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ else if (mode >= IXGBE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL)
return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
@@ -493,13 +496,14 @@ ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
int
ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
{
- enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
+ enum ixgbe_fdir_mode mode = adapter->fdir_conf.mode;
- if (mode >= RTE_FDIR_MODE_SIGNATURE &&
- mode <= RTE_FDIR_MODE_PERFECT)
+ if (mode >= IXGBE_FDIR_MODE_SIGNATURE &&
+ mode <= IXGBE_FDIR_MODE_PERFECT)
return fdir_set_input_mask_82599(dev);
- else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
- mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ else if (mode >= IXGBE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL)
return fdir_set_input_mask_x550(dev);
PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
@@ -533,7 +537,7 @@ ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
static int
fdir_set_input_mask(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+ const struct ixgbe_fdir_masks *input_mask)
{
int ret;
@@ -550,13 +554,13 @@ fdir_set_input_mask(struct rte_eth_dev *dev,
*/
static int
ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl)
+ const struct ixgbe_fdir_flex_conf *conf, uint32_t *fdirctrl)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
- const struct rte_eth_flex_payload_cfg *flex_cfg;
- const struct rte_eth_fdir_flex_mask *flex_mask;
+ const struct ixgbe_flex_payload_cfg *flex_cfg;
+ const struct ixgbe_fdir_flex_mask *flex_mask;
uint32_t fdirm;
uint16_t flexbytes = 0;
uint16_t i;
@@ -570,7 +574,7 @@ ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
for (i = 0; i < conf->nb_payloads; i++) {
flex_cfg = &conf->flex_set[i];
- if (flex_cfg->type != RTE_ETH_RAW_PAYLOAD) {
+ if (flex_cfg->type != IXGBE_RAW_PAYLOAD) {
PMD_DRV_LOG(ERR, "unsupported payload type.");
return -EINVAL;
}
@@ -615,10 +619,11 @@ int
ixgbe_fdir_configure(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
int err;
uint32_t fdirctrl, pbsize;
int i;
- enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+ enum ixgbe_fdir_mode mode = adapter->fdir_conf.mode;
PMD_INIT_FUNC_TRACE();
@@ -633,11 +638,11 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
if (hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_X550EM_a &&
- mode != RTE_FDIR_MODE_SIGNATURE &&
- mode != RTE_FDIR_MODE_PERFECT)
+ mode != IXGBE_FDIR_MODE_SIGNATURE &&
+ mode != IXGBE_FDIR_MODE_PERFECT)
return -ENOSYS;
- err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
+ err = configure_fdir_flags(&adapter->fdir_conf, &fdirctrl);
if (err)
return err;
@@ -659,13 +664,13 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
for (i = 1; i < 8; i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
- err = fdir_set_input_mask(dev, &dev->data->dev_conf.fdir_conf.mask);
+ err = fdir_set_input_mask(dev, &adapter->fdir_conf.mask);
if (err < 0) {
PMD_INIT_LOG(ERR, " Error on setting FD mask");
return err;
}
err = ixgbe_set_fdir_flex_conf(dev,
- &dev->data->dev_conf.fdir_conf.flex_conf, &fdirctrl);
+ &adapter->fdir_conf.flex_conf, &fdirctrl);
if (err < 0) {
PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
return err;
@@ -894,13 +899,13 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
static uint32_t
atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc)
+ enum ixgbe_fdir_pballoc_type pballoc)
{
- if (pballoc == RTE_FDIR_PBALLOC_256K)
+ if (pballoc == IXGBE_FDIR_PBALLOC_256K)
return ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
PERFECT_BUCKET_256KB_HASH_MASK;
- else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ else if (pballoc == IXGBE_FDIR_PBALLOC_128K)
return ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
PERFECT_BUCKET_128KB_HASH_MASK;
@@ -937,15 +942,15 @@ ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, uint32_t *fdircmd)
*/
static uint32_t
atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
- enum rte_fdir_pballoc_type pballoc)
+ enum ixgbe_fdir_pballoc_type pballoc)
{
uint32_t bucket_hash, sig_hash;
- if (pballoc == RTE_FDIR_PBALLOC_256K)
+ if (pballoc == IXGBE_FDIR_PBALLOC_256K)
bucket_hash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
SIG_BUCKET_256KB_HASH_MASK;
- else if (pballoc == RTE_FDIR_PBALLOC_128K)
+ else if (pballoc == IXGBE_FDIR_PBALLOC_128K)
bucket_hash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
SIG_BUCKET_128KB_HASH_MASK;
@@ -970,7 +975,7 @@ static int
fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input, uint8_t queue,
uint32_t fdircmd, uint32_t fdirhash,
- enum rte_fdir_mode mode)
+ enum ixgbe_fdir_mode mode)
{
uint32_t fdirport, fdirvlan;
u32 addr_low, addr_high;
@@ -978,7 +983,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
int err = 0;
volatile uint32_t *reg;
- if (mode == RTE_FDIR_MODE_PERFECT) {
+ if (mode == IXGBE_FDIR_MODE_PERFECT) {
/* record the IPv4 address (big-endian)
* can not use IXGBE_WRITE_REG.
*/
@@ -992,8 +997,8 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
fdirport |= IXGBE_NTOHS(input->formatted.src_port);
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
- } else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
- mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ } else if (mode >= IXGBE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL) {
/* for mac vlan and tunnel modes */
addr_low = ((u32)input->formatted.inner_mac[0] |
((u32)input->formatted.inner_mac[1] << 8) |
@@ -1002,7 +1007,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
addr_high = ((u32)input->formatted.inner_mac[4] |
((u32)input->formatted.inner_mac[5] << 8));
- if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ if (mode == IXGBE_FDIR_MODE_PERFECT_MAC_VLAN) {
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), addr_high);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
@@ -1226,6 +1231,7 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
bool update)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
uint32_t fdircmd_flags;
uint32_t fdirhash;
uint8_t queue;
@@ -1233,11 +1239,11 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
int err;
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
- enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ enum ixgbe_fdir_mode fdir_mode = adapter->fdir_conf.mode;
struct ixgbe_fdir_filter *node;
bool add_node = FALSE;
- if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ if (fdir_mode == IXGBE_FDIR_MODE_NONE ||
fdir_mode != rule->mode)
return -ENOTSUP;
@@ -1257,16 +1263,16 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
IXGBE_ATR_FLOW_TYPE_IPV6) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0) &&
- (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
- rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) {
+ (rule->mode != IXGBE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ rule->mode != IXGBE_FDIR_MODE_PERFECT_TUNNEL)) {
PMD_DRV_LOG(ERR, "By this device,"
" IPv4 is not supported without"
" L4 protocol and ports masked!");
return -ENOTSUP;
}
- if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
- fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ if (fdir_mode >= IXGBE_FDIR_MODE_PERFECT &&
+ fdir_mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL)
is_perfect = TRUE;
if (is_perfect) {
@@ -1277,12 +1283,12 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
return -ENOTSUP;
}
fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
- dev->data->dev_conf.fdir_conf.pballoc);
+ adapter->fdir_conf.pballoc);
fdirhash |= rule->soft_id <<
IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
} else
fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
- dev->data->dev_conf.fdir_conf.pballoc);
+ adapter->fdir_conf.pballoc);
if (del) {
err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
@@ -1300,7 +1306,7 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
if (is_perfect) {
- queue = dev->data->dev_conf.fdir_conf.drop_queue;
+ queue = adapter->fdir_conf.drop_queue;
fdircmd_flags |= IXGBE_FDIRCMD_DROP;
} else {
PMD_DRV_LOG(ERR, "Drop option is not supported in"
@@ -1587,12 +1593,13 @@ ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *fdir_info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
+ enum ixgbe_fdir_mode fdir_mode = adapter->fdir_conf.mode;
struct ixgbe_fdir_filter *node;
bool is_perfect = FALSE;
- enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
- if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
- fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ if (fdir_mode >= IXGBE_FDIR_MODE_PERFECT &&
+ fdir_mode <= IXGBE_FDIR_MODE_PERFECT_TUNNEL)
is_perfect = TRUE;
if (is_perfect) {
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index b2a2bfc02..61bdca7e6 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -52,17 +52,17 @@
/* ntuple filter list structure */
struct ixgbe_ntuple_filter_ele {
TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
- struct rte_eth_ntuple_filter filter_info;
+ struct ixgbe_ntuple_filter filter_info;
};
/* ethertype filter list structure */
struct ixgbe_ethertype_filter_ele {
TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
- struct rte_eth_ethertype_filter filter_info;
+ struct ixgbe_flow_ethertype_filter filter_info;
};
/* syn filter list structure */
struct ixgbe_eth_syn_filter_ele {
TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
- struct rte_eth_syn_filter filter_info;
+ struct ixgbe_syn_filter filter_info;
};
/* fdir filter list structure */
struct ixgbe_fdir_rule_ele {
@@ -72,7 +72,7 @@ struct ixgbe_fdir_rule_ele {
/* l2_tunnel filter list structure */
struct ixgbe_eth_l2_tunnel_conf_ele {
TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
- struct rte_eth_l2_tunnel_conf filter_info;
+ struct ixgbe_l2_tunnel_cfg filter_info;
};
/* rss filter list structure */
struct ixgbe_rss_conf_ele {
@@ -172,7 +172,7 @@ static int
cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_ntuple_filter *filter,
+ struct ixgbe_ntuple_filter *filter,
struct rte_flow_error *error)
{
const struct rte_flow_item *item;
@@ -225,7 +225,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -391,7 +391,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -406,7 +406,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* get the TCP/UDP/SCTP info */
if (item->type != RTE_FLOW_ITEM_TYPE_END &&
(!item->spec || !item->mask)) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ntuple mask");
@@ -415,7 +415,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/*Not supported last point for range*/
if (item->last) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
item, "Not supported last point for range");
@@ -437,7 +437,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
tcp_mask->hdr.cksum ||
tcp_mask->hdr.tcp_urp) {
memset(filter, 0,
- sizeof(struct rte_eth_ntuple_filter));
+ sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -456,11 +456,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = tcp_mask->hdr.dst_port;
filter->src_port_mask = tcp_mask->hdr.src_port;
if (tcp_mask->hdr.tcp_flags == 0xFF) {
- filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ filter->flags |= IXGBE_NTUPLE_FLAGS_TCP_FLAG;
} else if (!tcp_mask->hdr.tcp_flags) {
- filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ filter->flags &= ~IXGBE_NTUPLE_FLAGS_TCP_FLAG;
} else {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -481,7 +481,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
if (udp_mask->hdr.dgram_len ||
udp_mask->hdr.dgram_cksum) {
memset(filter, 0,
- sizeof(struct rte_eth_ntuple_filter));
+ sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -513,7 +513,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
if (sctp_mask->hdr.tag ||
sctp_mask->hdr.cksum) {
memset(filter, 0,
- sizeof(struct rte_eth_ntuple_filter));
+ sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -533,7 +533,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* check if the next not void item is END */
item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by ntuple filter");
@@ -548,7 +548,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
*/
act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
item, "Not supported action.");
@@ -560,7 +560,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -570,7 +570,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* parse attr */
/* must be input direction */
if (!attr->ingress) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "Only support ingress.");
@@ -579,7 +579,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* not supported */
if (attr->egress) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
attr, "Not support egress.");
@@ -588,7 +588,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* not supported */
if (attr->transfer) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
attr, "No support for transfer.");
@@ -596,7 +596,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
if (attr->priority > 0xFFFF) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
attr, "Error priority.");
@@ -616,7 +616,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_ntuple_filter *filter,
+ struct ixgbe_ntuple_filter *filter,
struct rte_flow_error *error)
{
int ret;
@@ -636,8 +636,8 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
#endif
/* Ixgbe doesn't support tcp flags. */
- if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ if (filter->flags & IXGBE_NTUPLE_FLAGS_TCP_FLAG) {
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "Not supported by ntuple filter");
@@ -647,7 +647,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
/* Ixgbe doesn't support many priorities. */
if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
- memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(filter, 0, sizeof(struct ixgbe_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "Priority not supported by ntuple filter");
@@ -658,7 +658,7 @@ ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
return -rte_errno;
/* fixed value for ixgbe */
- filter->flags = RTE_5TUPLE_FLAGS;
+ filter->flags = IXGBE_5TUPLE_FLAGS;
return 0;
}
@@ -682,7 +682,7 @@ static int
cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
const struct rte_flow_action *actions,
- struct rte_eth_ethertype_filter *filter,
+ struct ixgbe_flow_ethertype_filter *filter,
struct rte_flow_error *error)
{
const struct rte_flow_item *item;
@@ -761,13 +761,13 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* If mask bits of destination MAC address
- * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ * are full of 1, set IXGBE_ETHTYPE_FLAGS_MAC.
*/
if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
filter->mac_addr = eth_spec->dst;
- filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ filter->flags |= IXGBE_ETHTYPE_FLAGS_MAC;
} else {
- filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ filter->flags &= ~IXGBE_ETHTYPE_FLAGS_MAC;
}
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
@@ -795,7 +795,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
act_q = (const struct rte_flow_action_queue *)act->conf;
filter->queue = act_q->index;
} else {
- filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ filter->flags |= IXGBE_ETHTYPE_FLAGS_DROP;
}
/* Check if the next non-void item is END */
@@ -856,7 +856,7 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_ethertype_filter *filter,
+ struct ixgbe_flow_ethertype_filter *filter,
struct rte_flow_error *error)
{
int ret;
@@ -871,8 +871,8 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
return ret;
/* Ixgbe doesn't support MAC address. */
- if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
- memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ if (filter->flags & IXGBE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct ixgbe_flow_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "Not supported by ethertype filter");
@@ -880,7 +880,7 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
}
if (filter->queue >= dev->data->nb_rx_queues) {
- memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ memset(filter, 0, sizeof(struct ixgbe_flow_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "queue index much too big");
@@ -889,23 +889,23 @@ ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
filter->ether_type == RTE_ETHER_TYPE_IPV6) {
- memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ memset(filter, 0, sizeof(struct ixgbe_flow_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "IPv4/IPv6 not supported by ethertype filter");
return -rte_errno;
}
- if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
- memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ if (filter->flags & IXGBE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct ixgbe_flow_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "mac compare is unsupported");
return -rte_errno;
}
- if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
- memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ if (filter->flags & IXGBE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct ixgbe_flow_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "drop option is unsupported");
@@ -939,7 +939,7 @@ static int
cons_parse_syn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_syn_filter *filter,
+ struct ixgbe_syn_filter *filter,
struct rte_flow_error *error)
{
const struct rte_flow_item *item;
@@ -1058,7 +1058,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
tcp_mask->hdr.rx_win ||
tcp_mask->hdr.cksum ||
tcp_mask->hdr.tcp_urp) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct ixgbe_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by syn filter");
@@ -1068,7 +1068,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
/* check if the next not void item is END */
item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct ixgbe_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by syn filter");
@@ -1078,7 +1078,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
/* check if the first not void action is QUEUE. */
act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct ixgbe_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -1088,7 +1088,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
act_q = (const struct rte_flow_action_queue *)act->conf;
filter->queue = act_q->index;
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct ixgbe_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -1098,7 +1098,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct ixgbe_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -1108,7 +1108,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
/* parse attr */
/* must be input direction */
if (!attr->ingress) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct ixgbe_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "Only support ingress.");
@@ -1117,7 +1117,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
/* not supported */
if (attr->egress) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct ixgbe_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
attr, "Not support egress.");
@@ -1126,7 +1126,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
/* not supported */
if (attr->transfer) {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct ixgbe_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
attr, "No support for transfer.");
@@ -1139,7 +1139,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
} else if (attr->priority == (uint32_t)~0U) {
filter->hig_pri = 1;
} else {
- memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(filter, 0, sizeof(struct ixgbe_syn_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
attr, "Not support priority.");
@@ -1154,7 +1154,7 @@ ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_syn_filter *filter,
+ struct ixgbe_syn_filter *filter,
struct rte_flow_error *error)
{
int ret;
@@ -1197,7 +1197,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_l2_tunnel_conf *filter,
+ struct ixgbe_l2_tunnel_cfg *filter,
struct rte_flow_error *error)
{
const struct rte_flow_item *item;
@@ -1231,7 +1231,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
/* The first not void item should be e-tag. */
item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by L2 tunnel filter");
@@ -1239,7 +1239,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
}
if (!item->spec || !item->mask) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by L2 tunnel filter");
return -rte_errno;
@@ -1261,7 +1261,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
e_tag_mask->in_ecid_e ||
e_tag_mask->ecid_e ||
e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by L2 tunnel filter");
@@ -1278,7 +1278,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
/* check if the next not void item is END */
item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by L2 tunnel filter");
@@ -1288,7 +1288,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
/* parse attr */
/* must be input direction */
if (!attr->ingress) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "Only support ingress.");
@@ -1297,7 +1297,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
/* not supported */
if (attr->egress) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
attr, "Not support egress.");
@@ -1306,7 +1306,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
/* not supported */
if (attr->transfer) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
attr, "No support for transfer.");
@@ -1315,7 +1315,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
/* not supported */
if (attr->priority) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
attr, "Not support priority.");
@@ -1326,7 +1326,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
act->type != RTE_FLOW_ACTION_TYPE_PF) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -1343,7 +1343,7 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
@@ -1358,7 +1358,7 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_l2_tunnel_conf *l2_tn_filter,
+ struct ixgbe_l2_tunnel_cfg *l2_tn_filter,
struct rte_flow_error *error)
{
int ret = 0;
@@ -1372,7 +1372,7 @@ ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
if (hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_X550EM_a) {
- memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "Not supported by L2 tunnel filter");
@@ -1451,7 +1451,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
rule->queue = act_q->index;
} else { /* drop */
/* signature mode does not support drop action. */
- if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+ if (rule->mode == IXGBE_FDIR_MODE_SIGNATURE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1674,9 +1674,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
}
if (signature_match(pattern))
- rule->mode = RTE_FDIR_MODE_SIGNATURE;
+ rule->mode = IXGBE_FDIR_MODE_SIGNATURE;
else
- rule->mode = RTE_FDIR_MODE_PERFECT;
+ rule->mode = IXGBE_FDIR_MODE_PERFECT;
/*Not supported last point for range*/
if (item->last) {
@@ -1719,7 +1719,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
/* Ether type should be masked. */
if (eth_mask->type ||
- rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+ rule->mode == IXGBE_FDIR_MODE_SIGNATURE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1728,7 +1728,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
}
/* If ethernet has meaning, it means MAC VLAN mode. */
- rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+ rule->mode = IXGBE_FDIR_MODE_PERFECT_MAC_VLAN;
/**
* src MAC address must be masked,
@@ -1759,7 +1759,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
* IPv6 is not supported.
*/
item = next_no_fuzzy_pattern(pattern, item);
- if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ if (rule->mode == IXGBE_FDIR_MODE_PERFECT_MAC_VLAN) {
if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1902,7 +1902,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
* 2. not support last
* 3. mask must not null
*/
- if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
+ if (rule->mode != IXGBE_FDIR_MODE_SIGNATURE ||
item->last ||
!item->mask) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2372,7 +2372,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
return -rte_errno;
}
- rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
+ rule->mode = IXGBE_FDIR_MODE_PERFECT_TUNNEL;
/* Skip MAC. */
if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
@@ -2767,7 +2767,8 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
{
int ret;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
+ enum ixgbe_fdir_mode fdir_mode = adapter->fdir_conf.mode;
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X540 &&
@@ -2796,7 +2797,7 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
rule->ixgbe_fdir.formatted.dst_port != 0))
return -ENOTSUP;
- if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ if (fdir_mode == IXGBE_FDIR_MODE_NONE ||
fdir_mode != rule->mode)
return -ENOTSUP;
@@ -3019,11 +3020,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
int ret;
- struct rte_eth_ntuple_filter ntuple_filter;
- struct rte_eth_ethertype_filter ethertype_filter;
- struct rte_eth_syn_filter syn_filter;
+ struct ixgbe_ntuple_filter ntuple_filter;
+ struct ixgbe_flow_ethertype_filter ethertype_filter;
+ struct ixgbe_syn_filter syn_filter;
struct ixgbe_fdir_rule fdir_rule;
- struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_l2_tunnel_cfg l2_tn_filter;
struct ixgbe_hw_fdir_info *fdir_info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
struct ixgbe_rte_flow_rss_conf rss_conf;
@@ -3053,7 +3054,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
TAILQ_INSERT_TAIL(&ixgbe_flow_list,
ixgbe_flow_mem_ptr, entries);
- memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(&ntuple_filter, 0, sizeof(struct ixgbe_ntuple_filter));
ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
@@ -3074,17 +3075,18 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
}
rte_memcpy(&ntuple_filter_ptr->filter_info,
&ntuple_filter,
- sizeof(struct rte_eth_ntuple_filter));
+ sizeof(struct ixgbe_ntuple_filter));
TAILQ_INSERT_TAIL(&filter_ntuple_list,
ntuple_filter_ptr, entries);
flow->rule = ntuple_filter_ptr;
- flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ flow->filter_type = IXGBE_FILTER_NTUPLE;
return flow;
}
goto out;
}
- memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ memset(ðertype_filter, 0,
+ sizeof(struct ixgbe_flow_ethertype_filter));
ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
actions, ðertype_filter, error);
if (!ret) {
@@ -3100,17 +3102,17 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
}
rte_memcpy(ðertype_filter_ptr->filter_info,
ðertype_filter,
- sizeof(struct rte_eth_ethertype_filter));
+ sizeof(struct ixgbe_flow_ethertype_filter));
TAILQ_INSERT_TAIL(&filter_ethertype_list,
ethertype_filter_ptr, entries);
flow->rule = ethertype_filter_ptr;
- flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ flow->filter_type = IXGBE_FILTER_ETHERTYPE;
return flow;
}
goto out;
}
- memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(&syn_filter, 0, sizeof(struct ixgbe_syn_filter));
ret = ixgbe_parse_syn_filter(dev, attr, pattern,
actions, &syn_filter, error);
if (!ret) {
@@ -3124,12 +3126,12 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
}
rte_memcpy(&syn_filter_ptr->filter_info,
&syn_filter,
- sizeof(struct rte_eth_syn_filter));
+ sizeof(struct ixgbe_syn_filter));
TAILQ_INSERT_TAIL(&filter_syn_list,
syn_filter_ptr,
entries);
flow->rule = syn_filter_ptr;
- flow->filter_type = RTE_ETH_FILTER_SYN;
+ flow->filter_type = IXGBE_FILTER_SYN;
return flow;
}
goto out;
@@ -3192,7 +3194,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
TAILQ_INSERT_TAIL(&filter_fdir_list,
fdir_rule_ptr, entries);
flow->rule = fdir_rule_ptr;
- flow->filter_type = RTE_ETH_FILTER_FDIR;
+ flow->filter_type = IXGBE_FILTER_FDIR;
return flow;
}
@@ -3211,7 +3213,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
goto out;
}
- memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
actions, &l2_tn_filter, error);
if (!ret) {
@@ -3225,11 +3227,11 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
}
rte_memcpy(&l2_tn_filter_ptr->filter_info,
&l2_tn_filter,
- sizeof(struct rte_eth_l2_tunnel_conf));
+ sizeof(struct ixgbe_l2_tunnel_cfg));
TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
l2_tn_filter_ptr, entries);
flow->rule = l2_tn_filter_ptr;
- flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
+ flow->filter_type = IXGBE_FILTER_L2_TUNNEL;
return flow;
}
}
@@ -3251,7 +3253,7 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
TAILQ_INSERT_TAIL(&filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;
- flow->filter_type = RTE_ETH_FILTER_HASH;
+ flow->filter_type = IXGBE_FILTER_HASH;
return flow;
}
}
@@ -3279,27 +3281,28 @@ ixgbe_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct rte_eth_ntuple_filter ntuple_filter;
- struct rte_eth_ethertype_filter ethertype_filter;
- struct rte_eth_syn_filter syn_filter;
- struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_ntuple_filter ntuple_filter;
+ struct ixgbe_flow_ethertype_filter ethertype_filter;
+ struct ixgbe_syn_filter syn_filter;
+ struct ixgbe_l2_tunnel_cfg l2_tn_filter;
struct ixgbe_fdir_rule fdir_rule;
struct ixgbe_rte_flow_rss_conf rss_conf;
int ret;
- memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ memset(&ntuple_filter, 0, sizeof(struct ixgbe_ntuple_filter));
ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
if (!ret)
return 0;
- memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ memset(ðertype_filter, 0,
+ sizeof(struct ixgbe_flow_ethertype_filter));
ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
actions, ðertype_filter, error);
if (!ret)
return 0;
- memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ memset(&syn_filter, 0, sizeof(struct ixgbe_syn_filter));
ret = ixgbe_parse_syn_filter(dev, attr, pattern,
actions, &syn_filter, error);
if (!ret)
@@ -3311,7 +3314,7 @@ ixgbe_flow_validate(struct rte_eth_dev *dev,
if (!ret)
return 0;
- memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_cfg));
ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
actions, &l2_tn_filter, error);
if (!ret)
@@ -3332,12 +3335,12 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
{
int ret;
struct rte_flow *pmd_flow = flow;
- enum rte_filter_type filter_type = pmd_flow->filter_type;
- struct rte_eth_ntuple_filter ntuple_filter;
- struct rte_eth_ethertype_filter ethertype_filter;
- struct rte_eth_syn_filter syn_filter;
+ enum ixgbe_filter_type filter_type = pmd_flow->filter_type;
+ struct ixgbe_ntuple_filter ntuple_filter;
+ struct ixgbe_flow_ethertype_filter ethertype_filter;
+ struct ixgbe_syn_filter syn_filter;
struct ixgbe_fdir_rule fdir_rule;
- struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_l2_tunnel_cfg l2_tn_filter;
struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
@@ -3349,12 +3352,12 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
struct ixgbe_rss_conf_ele *rss_filter_ptr;
switch (filter_type) {
- case RTE_ETH_FILTER_NTUPLE:
+ case IXGBE_FILTER_NTUPLE:
ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
pmd_flow->rule;
rte_memcpy(&ntuple_filter,
&ntuple_filter_ptr->filter_info,
- sizeof(struct rte_eth_ntuple_filter));
+ sizeof(struct ixgbe_ntuple_filter));
ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
if (!ret) {
TAILQ_REMOVE(&filter_ntuple_list,
@@ -3362,12 +3365,12 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
rte_free(ntuple_filter_ptr);
}
break;
- case RTE_ETH_FILTER_ETHERTYPE:
+ case IXGBE_FILTER_ETHERTYPE:
ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
pmd_flow->rule;
rte_memcpy(ðertype_filter,
ðertype_filter_ptr->filter_info,
- sizeof(struct rte_eth_ethertype_filter));
+ sizeof(struct ixgbe_flow_ethertype_filter));
ret = ixgbe_add_del_ethertype_filter(dev,
ðertype_filter, FALSE);
if (!ret) {
@@ -3376,12 +3379,12 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
rte_free(ethertype_filter_ptr);
}
break;
- case RTE_ETH_FILTER_SYN:
+ case IXGBE_FILTER_SYN:
syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
pmd_flow->rule;
rte_memcpy(&syn_filter,
&syn_filter_ptr->filter_info,
- sizeof(struct rte_eth_syn_filter));
+ sizeof(struct ixgbe_syn_filter));
ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
if (!ret) {
TAILQ_REMOVE(&filter_syn_list,
@@ -3389,7 +3392,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
rte_free(syn_filter_ptr);
}
break;
- case RTE_ETH_FILTER_FDIR:
+ case IXGBE_FILTER_FDIR:
fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
rte_memcpy(&fdir_rule,
&fdir_rule_ptr->filter_info,
@@ -3403,11 +3406,11 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
fdir_info->mask_added = false;
}
break;
- case RTE_ETH_FILTER_L2_TUNNEL:
+ case IXGBE_FILTER_L2_TUNNEL:
l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
pmd_flow->rule;
rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
- sizeof(struct rte_eth_l2_tunnel_conf));
+ sizeof(struct ixgbe_l2_tunnel_cfg));
ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
if (!ret) {
TAILQ_REMOVE(&filter_l2_tunnel_list,
@@ -3415,7 +3418,7 @@ ixgbe_flow_destroy(struct rte_eth_dev *dev,
rte_free(l2_tn_filter_ptr);
}
break;
- case RTE_ETH_FILTER_HASH:
+ case IXGBE_FILTER_HASH:
rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
pmd_flow->rule;
ret = ixgbe_config_rss_filter(dev,
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index a97c27189..e6713a728 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -278,10 +278,10 @@ static inline int
ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+ struct ixgbe_adapter *adapter = dev->data->dev_private;
/* no fdir support */
- if (fconf->mode != RTE_FDIR_MODE_NONE)
+ if (adapter->fdir_conf.mode != IXGBE_FDIR_MODE_NONE)
return -1;
return 0;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index d2f708242..c83ab7eaa 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -1172,3 +1172,75 @@ rte_pmd_ixgbe_get_fdir_stats(uint16_t port,
return 0;
}
+
+int
+rte_pmd_ixgbe_update_fdir_conf(uint16_t port,
+ struct rte_pmd_ixgbe_fdir_conf conf)
+{
+ struct ixgbe_adapter *ad;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ ad = dev->data->dev_private;
+
+ switch (conf.mode) {
+ case RTE_PMD_IXGBE_FDIR_MODE_NONE:
+ ad->fdir_conf.mode = IXGBE_FDIR_MODE_NONE;
+ break;
+ case RTE_PMD_IXGBE_FDIR_MODE_SIGNATURE:
+ ad->fdir_conf.mode = IXGBE_FDIR_MODE_SIGNATURE;
+ break;
+ case RTE_PMD_IXGBE_FDIR_MODE_PERFECT:
+ ad->fdir_conf.mode = IXGBE_FDIR_MODE_PERFECT;
+ break;
+ case RTE_PMD_IXGBE_FDIR_MODE_PERFECT_MAC_VLAN:
+ ad->fdir_conf.mode = IXGBE_FDIR_MODE_PERFECT_MAC_VLAN;
+ break;
+ case RTE_PMD_IXGBE_FDIR_MODE_PERFECT_TUNNEL:
+ ad->fdir_conf.mode = IXGBE_FDIR_MODE_PERFECT_TUNNEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (conf.status) {
+ case RTE_PMD_IXGBE_FDIR_NO_REPORT_STATUS:
+ ad->fdir_conf.status = IXGBE_FDIR_NO_REPORT_STATUS;
+ break;
+ case RTE_PMD_IXGBE_FDIR_REPORT_STATUS:
+ ad->fdir_conf.status = IXGBE_FDIR_REPORT_STATUS;
+ break;
+ case RTE_PMD_IXGBE_FDIR_REPORT_STATUS_ALWAYS:
+ ad->fdir_conf.status = IXGBE_FDIR_REPORT_STATUS_ALWAYS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (conf.pballoc) {
+ case RTE_PMD_IXGBE_FDIR_PBALLOC_64K:
+ ad->fdir_conf.pballoc = IXGBE_FDIR_PBALLOC_64K;
+ break;
+ case RTE_PMD_IXGBE_FDIR_PBALLOC_128K:
+ ad->fdir_conf.pballoc = IXGBE_FDIR_PBALLOC_128K;
+ break;
+ case RTE_PMD_IXGBE_FDIR_PBALLOC_256K:
+ ad->fdir_conf.pballoc = IXGBE_FDIR_PBALLOC_256K;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ad->fdir_conf.drop_queue = conf.drop_queue;
+ rte_memcpy(&ad->fdir_conf.mask, &conf.mask,
+ sizeof(struct ixgbe_fdir_masks));
+ rte_memcpy(&ad->fdir_conf.flex_conf, &conf.flex_conf,
+ sizeof(struct ixgbe_fdir_flex_conf));
+
+ return 0;
+}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 90fc8160b..79bd8c8da 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -729,6 +729,138 @@ __rte_experimental
int
rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable);
+enum rte_pmd_ixgbe_fdir_mode {
+ /* Disable FDIR support. */
+ RTE_PMD_IXGBE_FDIR_MODE_NONE = 0,
+ /* Enable FDIR signature filter mode. */
+ RTE_PMD_IXGBE_FDIR_MODE_SIGNATURE,
+ /* Enable FDIR perfect filter mode. */
+ RTE_PMD_IXGBE_FDIR_MODE_PERFECT,
+ /* Enable FDIR filter mode - MAC VLAN. */
+ RTE_PMD_IXGBE_FDIR_MODE_PERFECT_MAC_VLAN,
+ /* Enable FDIR filter mode - tunnel. */
+ RTE_PMD_IXGBE_FDIR_MODE_PERFECT_TUNNEL,
+};
+
+/* A structure used to define the input for IPV4 flow */
+struct rte_pmd_ixgbe_ipv4_flow {
+ uint32_t src_ip; /* IPv4 source address in big endian. */
+ uint32_t dst_ip; /* IPv4 destination address in big endian. */
+ uint8_t tos; /* Type of service to match. */
+ uint8_t ttl; /* Time to live to match. */
+ uint8_t proto; /* Protocol, next header in big endian. */
+};
+
+/* A structure used to define the input for IPV6 flow */
+struct rte_pmd_ixgbe_ipv6_flow {
+ uint32_t src_ip[4]; /* IPv6 source address in big endian. */
+ uint32_t dst_ip[4]; /* IPv6 destination address in big endian. */
+ uint8_t tc; /* Traffic class to match. */
+ uint8_t proto; /* Protocol, next header to match. */
+ uint8_t hop_limits; /* Hop limits to match. */
+};
+struct rte_pmd_ixgbe_fdir_masks {
+ /* Bit mask for vlan_tci in big endian */
+ uint16_t vlan_tci_mask;
+ /* Bit mask for ipv4 flow in big endian. */
+ struct rte_pmd_ixgbe_ipv4_flow ipv4_mask;
+ /* Bit mask for ipv6 flow in big endian. */
+ struct rte_pmd_ixgbe_ipv6_flow ipv6_mask;
+ /* Bit mask for L4 source port in big endian. */
+ uint16_t src_port_mask;
+ /* Bit mask for L4 destination port in big endian. */
+ uint16_t dst_port_mask;
+ /* 6 bit mask for proper 6 bytes of Mac address, bit 0 matches the
+ * first byte on the wire
+ */
+ uint8_t mac_addr_byte_mask;
+ /* Bit mask for tunnel ID in big endian. */
+ uint32_t tunnel_id_mask;
+ /* 1 - Match tunnel type, 0 - Ignore tunnel type. */
+ uint8_t tunnel_type_mask;
+};
+
+#define RTE_PMD_IXGBE_FDIR_MAX_FLEXLEN 16 /* Max length of flexbytes. */
+
+/* Payload type */
+enum rte_pmd_ixgbe_payload_type {
+ RTE_PMD_IXGBE_PAYLOAD_UNKNOWN = 0,
+ RTE_PMD_IXGBE_RAW_PAYLOAD,
+ RTE_PMD_IXGBE_L2_PAYLOAD,
+ RTE_PMD_IXGBE_L3_PAYLOAD,
+ RTE_PMD_IXGBE_L4_PAYLOAD,
+ RTE_PMD_IXGBE_PAYLOAD_MAX = 8,
+};
+
+/* A structure used to select bytes extracted from the protocol layers to
+ * flexible payload for filter
+ */
+struct rte_pmd_ixgbe_flex_payload_cfg {
+ enum rte_pmd_ixgbe_payload_type type; /* Payload type */
+ uint16_t src_offset[RTE_PMD_IXGBE_FDIR_MAX_FLEXLEN];
+ /* Offset in bytes from the beginning of packet's payload
+ * src_offset[i] indicates the flexbyte i's offset in original
+ * packet payload.
+ */
+};
+
+/* A structure used to define FDIR masks for flexible payload
+ * for each flow type
+ */
+struct rte_pmd_ixgbe_fdir_flex_mask {
+ uint16_t flow_type;
+ uint8_t mask[RTE_PMD_IXGBE_FDIR_MAX_FLEXLEN];
+ /* Mask for the whole flexible payload */
+};
+
+/* A structure used to define all flexible payload related setting
+ * include flex payload and flex mask
+ */
+struct rte_pmd_ixgbe_fdir_flex_conf {
+ uint16_t nb_payloads; /* The number of following payload cfg */
+ uint16_t nb_flexmasks; /* The number of following mask */
+ struct rte_pmd_ixgbe_flex_payload_cfg flex_set[RTE_PMD_IXGBE_PAYLOAD_MAX];
+ /* Flex payload configuration for each payload type */
+ struct rte_pmd_ixgbe_fdir_flex_mask flex_mask[RTE_ETH_FLOW_MAX];
+ /* Flex mask configuration for each flow type */
+};
+
+#define RTE_PMD_IXGBE_UINT64_BIT (CHAR_BIT * sizeof(uint64_t))
+#define RTE_PMD_IXGBE_FLOW_MASK_ARRAY_SIZE \
+ (RTE_ALIGN(RTE_ETH_FLOW_MAX, RTE_PMD_IXGBE_UINT64_BIT) \
+ / RTE_PMD_IXGBE_UINT64_BIT)
+
+struct rte_pmd_ixgbe_fdir_info {
+ enum rte_pmd_ixgbe_fdir_mode mode; /* Flow director mode */
+ struct rte_pmd_ixgbe_fdir_masks mask;
+ /* Flex payload configuration information */
+ struct rte_pmd_ixgbe_fdir_flex_conf flex_conf;
+ uint32_t guarant_spc; /* Guaranteed spaces.*/
+ uint32_t best_spc; /* Best effort spaces.*/
+ /* Bit mask for every supported flow type. */
+ uint64_t flow_types_mask[RTE_PMD_IXGBE_FLOW_MASK_ARRAY_SIZE];
+ uint32_t max_flexpayload; /**< Total flex payload in bytes. */
+ /* Flexible payload unit in bytes. Size and alignments of all flex
+ * payload segments should be multiplies of this value.
+ */
+ uint32_t flex_payload_unit;
+ /* Max number of flexible payload continuous segments.
+ * Each segment should be a multiple of flex_payload_unit.
+ */
+ uint32_t max_flex_payload_segment_num;
+ /* Maximum src_offset in bytes allowed. It indicates that
+ * src_offset[i] in struct rte_eth_flex_payload_cfg should be less
+ *than this value.
+ */
+ uint16_t flex_payload_limit;
+ /* Flex bitmask unit in bytes. Size of flex bitmasks should be a
+ * multiply of this value.
+ */
+ uint32_t flex_bitmask_unit;
+ /* Max supported size of flex bitmasks in flex_bitmask_unit */
+ uint32_t max_flex_bitmask_num;
+};
+
/**
* Get port fdir info
*
@@ -761,4 +893,44 @@ __rte_experimental
int
rte_pmd_ixgbe_get_fdir_stats(uint16_t port,
struct rte_eth_fdir_stats *fdir_stats);
+
+
+enum rte_pmd_ixgbe_fdir_status_mode {
+ RTE_PMD_IXGBE_FDIR_NO_REPORT_STATUS = 0, /**< Never report FDIR hash. */
+ /* Only report FDIR hash for matching pkts. */
+ RTE_PMD_IXGBE_FDIR_REPORT_STATUS,
+ RTE_PMD_IXGBE_FDIR_REPORT_STATUS_ALWAYS, /* Always report FDIR hash. */
+};
+
+enum rte_pmd_ixgbe_fdir_pballoc_type {
+ RTE_PMD_IXGBE_FDIR_PBALLOC_NONE = 0,
+ RTE_PMD_IXGBE_FDIR_PBALLOC_64K = 1,
+ RTE_PMD_IXGBE_FDIR_PBALLOC_128K = 2,
+ RTE_PMD_IXGBE_FDIR_PBALLOC_256K = 3,
+};
+
+struct rte_pmd_ixgbe_fdir_conf {
+ enum rte_pmd_ixgbe_fdir_mode mode; /* Flow Director mode. */
+ /* Space for FDIR filters. */
+ enum rte_pmd_ixgbe_fdir_pballoc_type pballoc;
+ /* How to report FDIR hash. */
+ enum rte_pmd_ixgbe_fdir_status_mode status;
+ /* RX queue of packets matching a "drop" filter in perfect mode. */
+ uint8_t drop_queue;
+ struct rte_pmd_ixgbe_fdir_masks mask;
+ /* Flex payload configuration. */
+ struct rte_pmd_ixgbe_fdir_flex_conf flex_conf;
+};
+
+/**
+ * @param conf
+ * Flow Director configuration.
+ * @return
+ * - (0) if successful.
+ * - (-EINVAL) if bad parameter.
+ */
+__rte_experimental
+int
+rte_pmd_ixgbe_update_fdir_conf(uint16_t port,
+ struct rte_pmd_ixgbe_fdir_conf conf);
#endif /* _PMD_IXGBE_H_ */
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 9402802b0..80bd47121 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -45,4 +45,5 @@ EXPERIMENTAL {
rte_pmd_ixgbe_mdio_unlocked_read;
rte_pmd_ixgbe_mdio_unlocked_write;
rte_pmd_ixgbe_upd_fctrl_sbp;
+ rte_pmd_ixgbe_update_fdir_conf;
};
--
2.17.1
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-09-29 8:10 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-29 7:48 [dpdk-dev] [RFC 2/5] net/igc: decouple dependency from superseded structures Chenxu Di
2020-09-29 7:48 ` [dpdk-dev] [RFC 3/5] net/e1000: " Chenxu Di
2020-09-29 7:48 ` [dpdk-dev] [RFC 4/5] net/ixgbe: " Chenxu Di
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).