From: Jie Wang <jie1x.wang@intel.com>
To: dev@dpdk.org
Cc: qiming.yang@intel.com, qi.z.zhang@intel.com,
jingjing.wu@intel.com, beilei.xing@intel.com,
stevex.yang@intel.com, Jie Wang <jie1x.wang@intel.com>
Subject: [PATCH v5 3/5] net/iavf: support flow subscrption pattern
Date: Wed, 7 Sep 2022 13:10:38 +0800 [thread overview]
Message-ID: <20220907051040.119588-4-jie1x.wang@intel.com> (raw)
In-Reply-To: <20220907051040.119588-1-jie1x.wang@intel.com>
Add flow subscription pattern support for AVF.
The supported patterns are listed below:
eth/vlan/ipv4
eth/ipv4(6)
eth/ipv4(6)/udp
eth/ipv4(6)/tcp
Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
drivers/net/iavf/iavf.h | 7 +
drivers/net/iavf/iavf_fsub.c | 598 ++++++++++++++++++++++++++++++++++-
2 files changed, 597 insertions(+), 8 deletions(-)
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 025ab3ff60..f79c7f9f6e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -148,6 +148,13 @@ struct iavf_fdir_info {
struct iavf_fdir_conf conf;
};
+struct iavf_fsub_conf {
+ struct virtchnl_flow_sub sub_fltr;
+ struct virtchnl_flow_unsub unsub_fltr;
+ uint64_t input_set;
+ uint32_t flow_id;
+};
+
struct iavf_qv_map {
uint16_t queue_id;
uint16_t vector_id;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 17f9bb2976..66e403d585 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -22,9 +22,51 @@
#include "iavf_generic_flow.h"
+#define MAX_QGRP_NUM_TYPE 7
+#define IAVF_IPV6_ADDR_LENGTH 16
+#define MAX_INPUT_SET_BYTE 32
+
+#define IAVF_SW_INSET_ETHER ( \
+ IAVF_INSET_DMAC | IAVF_INSET_SMAC | IAVF_INSET_ETHERTYPE)
+#define IAVF_SW_INSET_MAC_IPV4 ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+ IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS)
+#define IAVF_SW_INSET_MAC_VLAN_IPV4 ( \
+ IAVF_SW_INSET_MAC_IPV4 | IAVF_INSET_VLAN_OUTER)
+#define IAVF_SW_INSET_MAC_IPV4_TCP ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+ IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+ IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV4_UDP ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+ IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+ IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6 ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+ IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+ IAVF_INSET_IPV6_NEXT_HDR)
+#define IAVF_SW_INSET_MAC_IPV6_TCP ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+ IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+ IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6_UDP ( \
+ IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+ IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+ IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+
static struct iavf_flow_parser iavf_fsub_parser;
-static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {};
+static struct
+iavf_pattern_match_item iavf_fsub_pattern_list[] = {
+ {iavf_pattern_ethertype, IAVF_SW_INSET_ETHER, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4, IAVF_SW_INSET_MAC_IPV4, IAVF_INSET_NONE},
+ {iavf_pattern_eth_vlan_ipv4, IAVF_SW_INSET_MAC_VLAN_IPV4, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_udp, IAVF_SW_INSET_MAC_IPV4_UDP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv4_tcp, IAVF_SW_INSET_MAC_IPV4_TCP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6, IAVF_SW_INSET_MAC_IPV6, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_udp, IAVF_SW_INSET_MAC_IPV6_UDP, IAVF_INSET_NONE},
+ {iavf_pattern_eth_ipv6_tcp, IAVF_SW_INSET_MAC_IPV6_TCP, IAVF_INSET_NONE},
+};
static int
iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
@@ -53,17 +95,557 @@ iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
};
static int
-iavf_fsub_parse(__rte_unused struct iavf_adapter *ad,
- __rte_unused struct iavf_pattern_match_item *array,
- __rte_unused uint32_t array_len,
- __rte_unused const struct rte_flow_item pattern[],
- __rte_unused const struct rte_flow_action actions[],
- __rte_unused void **meta,
- __rte_unused struct rte_flow_error *error)
+iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
+ const uint64_t input_set_mask,
+ struct rte_flow_error *error,
+ struct iavf_fsub_conf *filter)
+{
+ struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs;
+ enum rte_flow_item_type item_type;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item *item = pattern;
+ struct virtchnl_proto_hdr_w_msk *hdr, *hdr1 = NULL;
+ uint64_t outer_input_set = IAVF_INSET_NONE;
+ uint64_t *input = NULL;
+ uint16_t input_set_byte = 0;
+ uint16_t j;
+ uint32_t layer = 0;
+
+ for (item = pattern; item->type !=
+ RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not support range");
+ return false;
+ }
+ item_type = item->type;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ hdr1 = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
+
+ if (eth_spec && eth_mask) {
+ input = &outer_input_set;
+
+ if (!rte_is_zero_ether_addr(ð_mask->dst)) {
+ *input |= IAVF_INSET_DMAC;
+ input_set_byte += 6;
+ } else {
+ /* flow subscribe filter will add dst mac in kernel */
+ input_set_byte += 6;
+ }
+
+ if (!rte_is_zero_ether_addr(ð_mask->src)) {
+ *input |= IAVF_INSET_SMAC;
+ input_set_byte += 6;
+ }
+
+ if (eth_mask->type) {
+ *input |= IAVF_INSET_ETHERTYPE;
+ input_set_byte += 2;
+ }
+
+ rte_memcpy(hdr1->buffer_spec, eth_spec,
+ sizeof(struct rte_ether_hdr));
+ rte_memcpy(hdr1->buffer_mask, eth_mask,
+ sizeof(struct rte_ether_hdr));
+ } else {
+ /* flow subscribe filter will add dst mac in kernel */
+ input_set_byte += 6;
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+
+ hdr = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+ if (ipv4_spec && ipv4_mask) {
+ input = &outer_input_set;
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid IPv4 mask.");
+ return false;
+ }
+
+ if (ipv4_mask->hdr.src_addr) {
+ *input |= IAVF_INSET_IPV4_SRC;
+ input_set_byte += 2;
+ }
+ if (ipv4_mask->hdr.dst_addr) {
+ *input |= IAVF_INSET_IPV4_DST;
+ input_set_byte += 2;
+ }
+ if (ipv4_mask->hdr.time_to_live) {
+ *input |= IAVF_INSET_IPV4_TTL;
+ input_set_byte++;
+ }
+ if (ipv4_mask->hdr.next_proto_id) {
+ *input |= IAVF_INSET_IPV4_PROTO;
+ input_set_byte++;
+ }
+ if (ipv4_mask->hdr.type_of_service) {
+ *input |= IAVF_INSET_IPV4_TOS;
+ input_set_byte++;
+ }
+
+ rte_memcpy(hdr->buffer_spec, &ipv4_spec->hdr,
+ sizeof(ipv4_spec->hdr));
+ rte_memcpy(hdr->buffer_mask, &ipv4_mask->hdr,
+ sizeof(ipv4_spec->hdr));
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+
+ hdr = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+ if (ipv6_spec && ipv6_mask) {
+ input = &outer_input_set;
+
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid IPv6 mask");
+ return false;
+ }
+
+ for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.src_addr[j]) {
+ *input |= IAVF_INSET_IPV6_SRC;
+ break;
+ }
+ }
+ for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.dst_addr[j]) {
+ *input |= IAVF_INSET_IPV6_DST;
+ break;
+ }
+ }
+
+ for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+ if (ipv6_mask->hdr.src_addr[j])
+ input_set_byte++;
+
+ if (ipv6_mask->hdr.dst_addr[j])
+ input_set_byte++;
+ }
+
+ if (ipv6_mask->hdr.proto) {
+ *input |= IAVF_INSET_IPV6_NEXT_HDR;
+ input_set_byte++;
+ }
+ if (ipv6_mask->hdr.hop_limits) {
+ *input |= IAVF_INSET_IPV6_HOP_LIMIT;
+ input_set_byte++;
+ }
+ if (ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) {
+ *input |= IAVF_INSET_IPV6_TC;
+ input_set_byte += 4;
+ }
+
+ rte_memcpy(hdr->buffer_spec, &ipv6_spec->hdr,
+ sizeof(ipv6_spec->hdr));
+ rte_memcpy(hdr->buffer_mask, &ipv6_mask->hdr,
+ sizeof(ipv6_spec->hdr));
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ hdr = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+ if (udp_spec && udp_mask) {
+ input = &outer_input_set;
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid UDP mask");
+ return false;
+ }
+
+ if (udp_mask->hdr.src_port) {
+ *input |= IAVF_INSET_UDP_SRC_PORT;
+ input_set_byte += 2;
+ }
+ if (udp_mask->hdr.dst_port) {
+ *input |= IAVF_INSET_UDP_DST_PORT;
+ input_set_byte += 2;
+ }
+
+ rte_memcpy(hdr->buffer_spec, &udp_spec->hdr,
+ sizeof(udp_spec->hdr));
+ rte_memcpy(hdr->buffer_mask, &udp_mask->hdr,
+ sizeof(udp_mask->hdr));
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ hdr = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+ if (tcp_spec && tcp_mask) {
+ input = &outer_input_set;
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid TCP mask");
+ return false;
+ }
+
+ if (tcp_mask->hdr.src_port) {
+ *input |= IAVF_INSET_TCP_SRC_PORT;
+ input_set_byte += 2;
+ }
+ if (tcp_mask->hdr.dst_port) {
+ *input |= IAVF_INSET_TCP_DST_PORT;
+ input_set_byte += 2;
+ }
+
+ rte_memcpy(hdr->buffer_spec, &tcp_spec->hdr,
+ sizeof(tcp_spec->hdr));
+ rte_memcpy(hdr->buffer_mask, &tcp_mask->hdr,
+ sizeof(tcp_mask->hdr));
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+
+ hdr = &hdrs->proto_hdr_w_msk[layer];
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, S_VLAN);
+
+ if (vlan_spec && vlan_spec) {
+ input = &outer_input_set;
+
+ *input |= IAVF_INSET_VLAN_OUTER;
+
+ if (vlan_mask->tci)
+ input_set_byte += 2;
+
+ if (vlan_mask->inner_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VLAN input set.");
+ return false;
+ }
+
+ rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr,
+ sizeof(vlan_spec->hdr));
+ rte_memcpy(hdr->buffer_mask, &vlan_mask->hdr,
+ sizeof(vlan_mask->hdr));
+ }
+
+ hdrs->count = ++layer;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+ "Invalid pattern item.");
+ return -rte_errno;
+ }
+ }
+
+ hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS;
+
+ if (input_set_byte > MAX_INPUT_SET_BYTE) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "too much input set");
+ return -rte_errno;
+ }
+
+ if (!outer_input_set || (outer_input_set & ~input_set_mask))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int
+iavf_fsub_parse_action(struct iavf_adapter *ad,
+ const struct rte_flow_action *actions,
+ uint32_t priority,
+ struct rte_flow_error *error,
+ struct iavf_fsub_conf *filter)
{
+ const struct rte_flow_action *action;
+ const struct rte_flow_action_ethdev *act_ethdev;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_rss *act_qgrop;
+ struct virtchnl_filter_action *filter_action;
+ uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
+ 2, 4, 8, 16, 32, 64, 128};
+ uint16_t i, num = 0, dest_num = 0, vf_num = 0;
+ uint16_t rule_port_id;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+ vf_num++;
+ filter_action = &filter->sub_fltr.actions.actions[num];
+
+ act_ethdev = action->conf;
+ rule_port_id = ad->dev_data->port_id;
+ if (rule_port_id != act_ethdev->port_id)
+ goto error1;
+
+ filter->sub_fltr.actions.count = ++num;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ dest_num++;
+ filter_action = &filter->sub_fltr.actions.actions[num];
+
+ act_q = action->conf;
+ if (act_q->index >= ad->dev_data->nb_rx_queues)
+ goto error2;
+
+ filter_action->type = VIRTCHNL_ACTION_QUEUE;
+ filter_action->act_conf.queue.index = act_q->index;
+ filter->sub_fltr.actions.count = ++num;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ dest_num++;
+ filter_action = &filter->sub_fltr.actions.actions[num];
+
+ act_qgrop = action->conf;
+ if (act_qgrop->queue_num <= 1)
+ goto error2;
+
+ filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+ filter_action->act_conf.queue.index =
+ act_qgrop->queue[0];
+ for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
+ if (act_qgrop->queue_num ==
+ valid_qgrop_number[i])
+ break;
+ }
+
+ if (i == MAX_QGRP_NUM_TYPE)
+ goto error2;
+
+ if ((act_qgrop->queue[0] + act_qgrop->queue_num) >
+ ad->dev_data->nb_rx_queues)
+ goto error3;
+
+ for (i = 0; i < act_qgrop->queue_num - 1; i++)
+ if (act_qgrop->queue[i + 1] !=
+ act_qgrop->queue[i] + 1)
+ goto error4;
+
+ filter_action->act_conf.queue.region = act_qgrop->queue_num;
+ filter->sub_fltr.actions.count = ++num;
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid action type");
+ return -rte_errno;
+ }
+ }
+
+ /* 0 denotes lowest priority of recipe and highest priority
+ * of rte_flow. Change rte_flow priority into recipe priority.
+ */
+ filter->sub_fltr.priority = priority;
+
+ if (num > VIRTCHNL_MAX_NUM_ACTIONS) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Action numbers exceed the maximum value");
+ return -rte_errno;
+ }
+
+ if (vf_num == 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action, vf action must be added");
+ return -rte_errno;
+ }
+
+ if (dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ return 0;
+
+error1:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid ethdev_port_id");
+ return -rte_errno;
+
+error2:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action type or queue number");
+ return -rte_errno;
+
+error3:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid queue region indexes");
+ return -rte_errno;
+
+error4:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Discontinuous queue region");
return -rte_errno;
}
+static int
+iavf_fsub_check_action(const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action;
+ enum rte_flow_action_type action_type;
+ uint16_t actions_num = 0;
+ bool vf_valid = false;
+ bool queue_valid = false;
+
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ action_type = action->type;
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+ vf_valid = true;
+ actions_num++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue_valid = true;
+ actions_num++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ actions_num++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid action type");
+ return -rte_errno;
+ }
+ }
+
+ if (!((actions_num == 1 && !queue_valid) ||
+ (actions_num == 2 && vf_valid && queue_valid))) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid action number");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+iavf_fsub_parse(struct iavf_adapter *ad,
+ struct iavf_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ void **meta,
+ struct rte_flow_error *error)
+{
+ struct iavf_fsub_conf *filter;
+ struct iavf_pattern_match_item *pattern_match_item = NULL;
+ int ret = 0;
+ uint32_t priority = 0;
+
+ filter = rte_zmalloc(NULL, sizeof(*filter), 0);
+ if (!filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "No memory for iavf_fsub_conf_ptr");
+ goto error;
+ }
+
+ /* search flow subscribe pattern */
+ pattern_match_item = iavf_search_pattern_match_item(pattern, array,
+ array_len, error);
+ if (!pattern_match_item)
+ return -rte_errno;
+
+ /* parse flow subscribe pattern */
+ ret = iavf_fsub_parse_pattern(pattern,
+ pattern_match_item->input_set_mask,
+ error, filter);
+ if (ret)
+ goto error;
+
+ /* check flow subscribe pattern action */
+ ret = iavf_fsub_check_action(actions, error);
+ if (ret)
+ goto error;
+
+ /* parse flow subscribe pattern action */
+ ret = iavf_fsub_parse_action((void *)ad, actions, priority,
+ error, filter);
+ if (ret)
+ goto error;
+
+ if (meta)
+ *meta = filter;
+
+error:
+ rte_free(pattern_match_item);
+ return ret;
+}
+
static int
iavf_fsub_init(struct iavf_adapter *ad)
{
--
2.25.1
next prev parent reply other threads:[~2022-09-07 5:11 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-09 6:21 [PATCH 0/2] support flow subscription Jie Wang
2022-08-09 6:21 ` [PATCH 1/2] common/iavf: " Jie Wang
2022-08-09 6:21 ` [PATCH 2/2] net/iavf: enable flow subscription rule support for AVF Jie Wang
2022-08-12 17:04 ` [PATCH v2 0/5] support flow subscription Jie Wang
2022-08-12 17:04 ` [PATCH v2 1/5] common/iavf: " Jie Wang
2022-08-12 17:04 ` [PATCH v2 2/5] net/iavf: add flow subscription to AVF Jie Wang
2022-08-12 17:04 ` [PATCH v2 3/5] net/iavf: support flow subscrption pattern Jie Wang
2022-08-12 17:04 ` [PATCH v2 4/5] net/iavf: support flow subscription rule Jie Wang
2022-08-12 17:04 ` [PATCH v2 5/5] net/iavf: support priority of flow rule Jie Wang
2022-08-30 18:05 ` [PATCH v3 0/5] support flow subscription Jie Wang
2022-08-30 18:05 ` [PATCH v3 1/5] common/iavf: " Jie Wang
2022-08-30 18:05 ` [PATCH v3 2/5] net/iavf: add flow subscription to AVF Jie Wang
2022-08-30 18:05 ` [PATCH v3 3/5] net/iavf: support flow subscrption pattern Jie Wang
2022-09-06 7:30 ` Zhang, Qi Z
2022-08-30 18:05 ` [PATCH v3 4/5] net/iavf: support flow subscription rule Jie Wang
2022-08-30 18:05 ` [PATCH v3 5/5] net/iavf: support priority of flow rule Jie Wang
2022-08-31 10:56 ` [PATCH v3 0/5] support flow subscription Ferruh Yigit
2022-08-31 12:28 ` Zhang, Qi Z
2022-08-31 12:53 ` Ferruh Yigit
2022-09-01 0:59 ` Zhang, Qi Z
2022-09-07 3:35 ` [PATCH v4 " Jie Wang
2022-09-07 3:35 ` [PATCH v4 1/5] common/iavf: " Jie Wang
2022-09-07 3:35 ` [PATCH v4 2/5] net/iavf: add flow subscription to AVF Jie Wang
2022-09-07 3:35 ` [PATCH v4 3/5] net/iavf: support flow subscrption pattern Jie Wang
2022-09-07 3:35 ` [PATCH v4 4/5] net/iavf: support flow subscription rule Jie Wang
2022-09-07 3:35 ` [PATCH v4 5/5] net/iavf: support priority of flow rule Jie Wang
2022-09-07 4:38 ` [PATCH v4 0/5] support flow subscription Jie Wang
2022-09-07 4:38 ` [PATCH v4 1/5] common/iavf: " Jie Wang
2022-09-07 4:38 ` [PATCH v4 2/5] net/iavf: add flow subscription to AVF Jie Wang
2022-09-07 4:38 ` [PATCH v4 3/5] net/iavf: support flow subscrption pattern Jie Wang
2022-09-07 4:38 ` [PATCH v4 4/5] net/iavf: support flow subscription rule Jie Wang
2022-09-07 4:38 ` [PATCH v4 5/5] net/iavf: support priority of flow rule Jie Wang
2022-09-07 5:10 ` [PATCH v5 0/5] support flow subscription Jie Wang
2022-09-07 5:10 ` [PATCH v5 1/5] common/iavf: " Jie Wang
2022-09-07 5:10 ` [PATCH v5 2/5] net/iavf: add flow subscription to AVF Jie Wang
2022-09-07 5:10 ` Jie Wang [this message]
2022-09-07 5:27 ` [PATCH v5 3/5] net/iavf: support flow subscrption pattern Zhang, Qi Z
2022-09-07 5:10 ` [PATCH v5 4/5] net/iavf: support flow subscription rule Jie Wang
2022-09-07 5:10 ` [PATCH v5 5/5] net/iavf: support priority of flow rule Jie Wang
2022-09-07 5:28 ` [PATCH v5 0/5] support flow subscription Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220907051040.119588-4-jie1x.wang@intel.com \
--to=jie1x.wang@intel.com \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=qiming.yang@intel.com \
--cc=stevex.yang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).