From: vanshika.shukla@nxp.com
To: dev@dpdk.org, Hemant Agrawal <hemant.agrawal@nxp.com>,
Sachin Saxena <sachin.saxena@nxp.com>
Cc: Jun Yang <jun.yang@nxp.com>
Subject: [v2 24/43] net/dpaa2: flow API refactor
Date: Wed, 18 Sep 2024 13:20:37 +0530 [thread overview]
Message-ID: <20240918075056.1838654-25-vanshika.shukla@nxp.com> (raw)
In-Reply-To: <20240918075056.1838654-1-vanshika.shukla@nxp.com>
From: Jun Yang <jun.yang@nxp.com>
1) Gather redundant code with same logic from various protocol
handlers to create common functions.
2) struct dpaa2_key_profile is used to describe each extract's
offset of rule and size. It's easy to insert new extract previous
IP address extract.
3) IP address profile is used to describe ipv4/v6 addresses extracts
located at end of rule.
4) L4 ports profile is used to describe the ports positions and offsets
of rule.
5) Once the extracts of QoS/FS table are update, go through all
the existing flows of this table to update the rule data.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/net/dpaa2/dpaa2_ethdev.c | 27 +-
drivers/net/dpaa2/dpaa2_ethdev.h | 90 +-
drivers/net/dpaa2/dpaa2_flow.c | 4839 ++++++++++++------------------
3 files changed, 2030 insertions(+), 2926 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index f0b4843472..533effd72b 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -2805,39 +2805,20 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
/* Init fields w.r.t. classification */
memset(&priv->extract.qos_key_extract, 0,
sizeof(struct dpaa2_key_extract));
- priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
+ priv->extract.qos_extract_param = rte_malloc(NULL, 256, 64);
if (!priv->extract.qos_extract_param) {
- DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
- " classification ", ret);
+ DPAA2_PMD_ERR("Memory alloc failed");
goto init_err;
}
- priv->extract.qos_key_extract.key_info.ipv4_src_offset =
- IP_ADDRESS_OFFSET_INVALID;
- priv->extract.qos_key_extract.key_info.ipv4_dst_offset =
- IP_ADDRESS_OFFSET_INVALID;
- priv->extract.qos_key_extract.key_info.ipv6_src_offset =
- IP_ADDRESS_OFFSET_INVALID;
- priv->extract.qos_key_extract.key_info.ipv6_dst_offset =
- IP_ADDRESS_OFFSET_INVALID;
for (i = 0; i < MAX_TCS; i++) {
memset(&priv->extract.tc_key_extract[i], 0,
sizeof(struct dpaa2_key_extract));
- priv->extract.tc_extract_param[i] =
- (size_t)rte_malloc(NULL, 256, 64);
+ priv->extract.tc_extract_param[i] = rte_malloc(NULL, 256, 64);
if (!priv->extract.tc_extract_param[i]) {
- DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification",
- ret);
+ DPAA2_PMD_ERR("Memory alloc failed");
goto init_err;
}
- priv->extract.tc_key_extract[i].key_info.ipv4_src_offset =
- IP_ADDRESS_OFFSET_INVALID;
- priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset =
- IP_ADDRESS_OFFSET_INVALID;
- priv->extract.tc_key_extract[i].key_info.ipv6_src_offset =
- IP_ADDRESS_OFFSET_INVALID;
- priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset =
- IP_ADDRESS_OFFSET_INVALID;
}
ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token,
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index 6625afaba3..ea1c1b5117 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -145,14 +145,6 @@ extern bool dpaa2_enable_ts[];
extern uint64_t dpaa2_timestamp_rx_dynflag;
extern int dpaa2_timestamp_dynfield_offset;
-#define DPAA2_QOS_TABLE_RECONFIGURE 1
-#define DPAA2_FS_TABLE_RECONFIGURE 2
-
-#define DPAA2_QOS_TABLE_IPADDR_EXTRACT 4
-#define DPAA2_FS_TABLE_IPADDR_EXTRACT 8
-
-#define DPAA2_FLOW_MAX_KEY_SIZE 16
-
/* Externally defined */
extern const struct rte_flow_ops dpaa2_flow_ops;
@@ -160,29 +152,85 @@ extern const struct rte_tm_ops dpaa2_tm_ops;
extern bool dpaa2_enable_err_queue;
-#define IP_ADDRESS_OFFSET_INVALID (-1)
+struct ipv4_sd_addr_extract_rule {
+ uint32_t ipv4_src;
+ uint32_t ipv4_dst;
+};
+
+struct ipv6_sd_addr_extract_rule {
+ uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
+ uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
+};
-struct dpaa2_key_info {
+struct ipv4_ds_addr_extract_rule {
+ uint32_t ipv4_dst;
+ uint32_t ipv4_src;
+};
+
+struct ipv6_ds_addr_extract_rule {
+ uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
+ uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
+};
+
+union ip_addr_extract_rule {
+ struct ipv4_sd_addr_extract_rule ipv4_sd_addr;
+ struct ipv6_sd_addr_extract_rule ipv6_sd_addr;
+ struct ipv4_ds_addr_extract_rule ipv4_ds_addr;
+ struct ipv6_ds_addr_extract_rule ipv6_ds_addr;
+};
+
+union ip_src_addr_extract_rule {
+ uint32_t ipv4_src;
+ uint8_t ipv6_src[NH_FLD_IPV6_ADDR_SIZE];
+};
+
+union ip_dst_addr_extract_rule {
+ uint32_t ipv4_dst;
+ uint8_t ipv6_dst[NH_FLD_IPV6_ADDR_SIZE];
+};
+
+enum ip_addr_extract_type {
+ IP_NONE_ADDR_EXTRACT,
+ IP_SRC_EXTRACT,
+ IP_DST_EXTRACT,
+ IP_SRC_DST_EXTRACT,
+ IP_DST_SRC_EXTRACT
+};
+
+struct key_prot_field {
+ enum net_prot prot;
+ uint32_t key_field;
+};
+
+struct dpaa2_key_profile {
+ uint8_t num;
uint8_t key_offset[DPKG_MAX_NUM_OF_EXTRACTS];
uint8_t key_size[DPKG_MAX_NUM_OF_EXTRACTS];
- /* Special for IP address. */
- int ipv4_src_offset;
- int ipv4_dst_offset;
- int ipv6_src_offset;
- int ipv6_dst_offset;
- uint8_t key_total_size;
+
+ enum ip_addr_extract_type ip_addr_type;
+ uint8_t ip_addr_extract_pos;
+ uint8_t ip_addr_extract_off;
+
+ uint8_t l4_src_port_present;
+ uint8_t l4_src_port_pos;
+ uint8_t l4_src_port_offset;
+ uint8_t l4_dst_port_present;
+ uint8_t l4_dst_port_pos;
+ uint8_t l4_dst_port_offset;
+ struct key_prot_field prot_field[DPKG_MAX_NUM_OF_EXTRACTS];
+ uint16_t key_max_size;
};
struct dpaa2_key_extract {
struct dpkg_profile_cfg dpkg;
- struct dpaa2_key_info key_info;
+ struct dpaa2_key_profile key_profile;
};
struct extract_s {
struct dpaa2_key_extract qos_key_extract;
struct dpaa2_key_extract tc_key_extract[MAX_TCS];
- uint64_t qos_extract_param;
- uint64_t tc_extract_param[MAX_TCS];
+ uint8_t *qos_extract_param;
+ uint8_t *tc_extract_param[MAX_TCS];
};
struct dpaa2_dev_priv {
@@ -233,7 +281,8 @@ struct dpaa2_dev_priv {
/* Stores correction offset for one step timestamping */
uint16_t ptp_correction_offset;
- LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
+ struct dpaa2_dev_flow *curr;
+ LIST_HEAD(, dpaa2_dev_flow) flows;
LIST_HEAD(nodes, dpaa2_tm_node) nodes;
LIST_HEAD(shaper_profiles, dpaa2_tm_shaper_profile) shaper_profiles;
};
@@ -292,7 +341,6 @@ uint16_t dpaa2_dev_tx_multi_txq_ordered(void **queue,
void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci, struct dpaa2_queue *dpaa2_q);
void dpaa2_flow_clean(struct rte_eth_dev *dev);
uint16_t dpaa2_dev_tx_conf(void *queue) __rte_unused;
-int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev);
int dpaa2_timesync_enable(struct rte_eth_dev *dev);
int dpaa2_timesync_disable(struct rte_eth_dev *dev);
diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c
index c30c5225c7..0522fdb026 100644
--- a/drivers/net/dpaa2/dpaa2_flow.c
+++ b/drivers/net/dpaa2/dpaa2_flow.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018-2021 NXP
+ * Copyright 2018-2022 NXP
*/
#include <sys/queue.h>
@@ -27,41 +27,40 @@
* MC/WRIOP are not able to identify
* the l4 protocol with l4 ports.
*/
-int mc_l4_port_identification;
+static int mc_l4_port_identification;
static char *dpaa2_flow_control_log;
static uint16_t dpaa2_flow_miss_flow_id; /* Default miss flow id is 0. */
-#define FIXED_ENTRY_SIZE DPNI_MAX_KEY_SIZE
-
-enum flow_rule_ipaddr_type {
- FLOW_NONE_IPADDR,
- FLOW_IPV4_ADDR,
- FLOW_IPV6_ADDR
+enum dpaa2_flow_entry_size {
+ DPAA2_FLOW_ENTRY_MIN_SIZE = (DPNI_MAX_KEY_SIZE / 2),
+ DPAA2_FLOW_ENTRY_MAX_SIZE = DPNI_MAX_KEY_SIZE
};
-struct flow_rule_ipaddr {
- enum flow_rule_ipaddr_type ipaddr_type;
- int qos_ipsrc_offset;
- int qos_ipdst_offset;
- int fs_ipsrc_offset;
- int fs_ipdst_offset;
+enum dpaa2_flow_dist_type {
+ DPAA2_FLOW_QOS_TYPE = 1 << 0,
+ DPAA2_FLOW_FS_TYPE = 1 << 1
};
-struct rte_flow {
- LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+#define DPAA2_FLOW_RAW_OFFSET_FIELD_SHIFT 16
+#define DPAA2_FLOW_MAX_KEY_SIZE 16
+
+struct dpaa2_dev_flow {
+ LIST_ENTRY(dpaa2_dev_flow) next;
struct dpni_rule_cfg qos_rule;
+ uint8_t *qos_key_addr;
+ uint8_t *qos_mask_addr;
+ uint16_t qos_rule_size;
struct dpni_rule_cfg fs_rule;
uint8_t qos_real_key_size;
uint8_t fs_real_key_size;
+ uint8_t *fs_key_addr;
+ uint8_t *fs_mask_addr;
+ uint16_t fs_rule_size;
uint8_t tc_id; /** Traffic Class ID. */
uint8_t tc_index; /** index within this Traffic Class. */
- enum rte_flow_action_type action;
- /* Special for IP address to specify the offset
- * in key/mask.
- */
- struct flow_rule_ipaddr ipaddr_rule;
- struct dpni_fs_action_cfg action_cfg;
+ enum rte_flow_action_type action_type;
+ struct dpni_fs_action_cfg fs_action_cfg;
};
static const
@@ -94,9 +93,6 @@ enum rte_flow_action_type dpaa2_supported_fs_action_type[] = {
RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
};
-/* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
-#define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
-
#ifndef __cplusplus
static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
.hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
@@ -155,11 +151,12 @@ static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
.protocol = RTE_BE16(0xffff),
};
-
#endif
-static inline void dpaa2_prot_field_string(
- enum net_prot prot, uint32_t field,
+#define DPAA2_FLOW_DUMP printf
+
+static inline void
+dpaa2_prot_field_string(uint32_t prot, uint32_t field,
char *string)
{
if (!dpaa2_flow_control_log)
@@ -234,60 +231,84 @@ static inline void dpaa2_prot_field_string(
}
}
-static inline void dpaa2_flow_qos_table_extracts_log(
- const struct dpaa2_dev_priv *priv, FILE *f)
+static inline void
+dpaa2_flow_qos_extracts_log(const struct dpaa2_dev_priv *priv)
{
int idx;
char string[32];
+ const struct dpkg_profile_cfg *dpkg =
+ &priv->extract.qos_key_extract.dpkg;
+ const struct dpkg_extract *extract;
+ enum dpkg_extract_type type;
+ enum net_prot prot;
+ uint32_t field;
if (!dpaa2_flow_control_log)
return;
- fprintf(f, "Setup QoS table: number of extracts: %d\r\n",
- priv->extract.qos_key_extract.dpkg.num_extracts);
- for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
- idx++) {
- dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
- .extracts[idx].extract.from_hdr.prot,
- priv->extract.qos_key_extract.dpkg.extracts[idx]
- .extract.from_hdr.field,
- string);
- fprintf(f, "%s", string);
- if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
- fprintf(f, " / ");
- }
- fprintf(f, "\r\n");
+ DPAA2_FLOW_DUMP("QoS table: %d extracts\r\n",
+ dpkg->num_extracts);
+ for (idx = 0; idx < dpkg->num_extracts; idx++) {
+ extract = &dpkg->extracts[idx];
+ type = extract->type;
+ if (type == DPKG_EXTRACT_FROM_HDR) {
+ prot = extract->extract.from_hdr.prot;
+ field = extract->extract.from_hdr.field;
+ dpaa2_prot_field_string(prot, field,
+ string);
+ } else if (type == DPKG_EXTRACT_FROM_DATA) {
+ sprintf(string, "raw offset/len: %d/%d",
+ extract->extract.from_data.offset,
+ extract->extract.from_data.size);
+ }
+ DPAA2_FLOW_DUMP("%s", string);
+ if ((idx + 1) < dpkg->num_extracts)
+ DPAA2_FLOW_DUMP(" / ");
+ }
+ DPAA2_FLOW_DUMP("\r\n");
}
-static inline void dpaa2_flow_fs_table_extracts_log(
- const struct dpaa2_dev_priv *priv, int tc_id, FILE *f)
+static inline void
+dpaa2_flow_fs_extracts_log(const struct dpaa2_dev_priv *priv,
+ int tc_id)
{
int idx;
char string[32];
+ const struct dpkg_profile_cfg *dpkg =
+ &priv->extract.tc_key_extract[tc_id].dpkg;
+ const struct dpkg_extract *extract;
+ enum dpkg_extract_type type;
+ enum net_prot prot;
+ uint32_t field;
if (!dpaa2_flow_control_log)
return;
- fprintf(f, "Setup FS table: number of extracts of TC[%d]: %d\r\n",
- tc_id, priv->extract.tc_key_extract[tc_id]
- .dpkg.num_extracts);
- for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
- .dpkg.num_extracts; idx++) {
- dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
- .dpkg.extracts[idx].extract.from_hdr.prot,
- priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
- .extract.from_hdr.field,
- string);
- fprintf(f, "%s", string);
- if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
- .dpkg.num_extracts)
- fprintf(f, " / ");
- }
- fprintf(f, "\r\n");
+ DPAA2_FLOW_DUMP("FS table: %d extracts in TC[%d]\r\n",
+ dpkg->num_extracts, tc_id);
+ for (idx = 0; idx < dpkg->num_extracts; idx++) {
+ extract = &dpkg->extracts[idx];
+ type = extract->type;
+ if (type == DPKG_EXTRACT_FROM_HDR) {
+ prot = extract->extract.from_hdr.prot;
+ field = extract->extract.from_hdr.field;
+ dpaa2_prot_field_string(prot, field,
+ string);
+ } else if (type == DPKG_EXTRACT_FROM_DATA) {
+ sprintf(string, "raw offset/len: %d/%d",
+ extract->extract.from_data.offset,
+ extract->extract.from_data.size);
+ }
+ DPAA2_FLOW_DUMP("%s", string);
+ if ((idx + 1) < dpkg->num_extracts)
+ DPAA2_FLOW_DUMP(" / ");
+ }
+ DPAA2_FLOW_DUMP("\r\n");
}
-static inline void dpaa2_flow_qos_entry_log(
- const char *log_info, const struct rte_flow *flow, int qos_index, FILE *f)
+static inline void
+dpaa2_flow_qos_entry_log(const char *log_info,
+ const struct dpaa2_dev_flow *flow, int qos_index)
{
int idx;
uint8_t *key, *mask;
@@ -295,27 +316,34 @@ static inline void dpaa2_flow_qos_entry_log(
if (!dpaa2_flow_control_log)
return;
- fprintf(f, "\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
- log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
-
- key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
- mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
+ if (qos_index >= 0) {
+ DPAA2_FLOW_DUMP("%s QoS entry[%d](size %d/%d) for TC[%d]\r\n",
+ log_info, qos_index, flow->qos_rule_size,
+ flow->qos_rule.key_size,
+ flow->tc_id);
+ } else {
+ DPAA2_FLOW_DUMP("%s QoS entry(size %d/%d) for TC[%d]\r\n",
+ log_info, flow->qos_rule_size,
+ flow->qos_rule.key_size,
+ flow->tc_id);
+ }
- fprintf(f, "key:\r\n");
- for (idx = 0; idx < flow->qos_real_key_size; idx++)
- fprintf(f, "%02x ", key[idx]);
+ key = flow->qos_key_addr;
+ mask = flow->qos_mask_addr;
- fprintf(f, "\r\nmask:\r\n");
- for (idx = 0; idx < flow->qos_real_key_size; idx++)
- fprintf(f, "%02x ", mask[idx]);
+ DPAA2_FLOW_DUMP("key:\r\n");
+ for (idx = 0; idx < flow->qos_rule_size; idx++)
+ DPAA2_FLOW_DUMP("%02x ", key[idx]);
- fprintf(f, "\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
- flow->ipaddr_rule.qos_ipsrc_offset,
- flow->ipaddr_rule.qos_ipdst_offset);
+ DPAA2_FLOW_DUMP("\r\nmask:\r\n");
+ for (idx = 0; idx < flow->qos_rule_size; idx++)
+ DPAA2_FLOW_DUMP("%02x ", mask[idx]);
+ DPAA2_FLOW_DUMP("\r\n");
}
-static inline void dpaa2_flow_fs_entry_log(
- const char *log_info, const struct rte_flow *flow, FILE *f)
+static inline void
+dpaa2_flow_fs_entry_log(const char *log_info,
+ const struct dpaa2_dev_flow *flow)
{
int idx;
uint8_t *key, *mask;
@@ -323,187 +351,432 @@ static inline void dpaa2_flow_fs_entry_log(
if (!dpaa2_flow_control_log)
return;
- fprintf(f, "\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
- log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
+ DPAA2_FLOW_DUMP("%s FS/TC entry[%d](size %d/%d) of TC[%d]\r\n",
+ log_info, flow->tc_index,
+ flow->fs_rule_size, flow->fs_rule.key_size,
+ flow->tc_id);
+
+ key = flow->fs_key_addr;
+ mask = flow->fs_mask_addr;
+
+ DPAA2_FLOW_DUMP("key:\r\n");
+ for (idx = 0; idx < flow->fs_rule_size; idx++)
+ DPAA2_FLOW_DUMP("%02x ", key[idx]);
+
+ DPAA2_FLOW_DUMP("\r\nmask:\r\n");
+ for (idx = 0; idx < flow->fs_rule_size; idx++)
+ DPAA2_FLOW_DUMP("%02x ", mask[idx]);
+ DPAA2_FLOW_DUMP("\r\n");
+}
- key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
- mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
+static int
+dpaa2_flow_ip_address_extract(enum net_prot prot,
+ uint32_t field)
+{
+ if (prot == NET_PROT_IPV4 &&
+ (field == NH_FLD_IPV4_SRC_IP ||
+ field == NH_FLD_IPV4_DST_IP))
+ return true;
+ else if (prot == NET_PROT_IPV6 &&
+ (field == NH_FLD_IPV6_SRC_IP ||
+ field == NH_FLD_IPV6_DST_IP))
+ return true;
+ else if (prot == NET_PROT_IP &&
+ (field == NH_FLD_IP_SRC ||
+ field == NH_FLD_IP_DST))
+ return true;
- fprintf(f, "key:\r\n");
- for (idx = 0; idx < flow->fs_real_key_size; idx++)
- fprintf(f, "%02x ", key[idx]);
+ return false;
+}
- fprintf(f, "\r\nmask:\r\n");
- for (idx = 0; idx < flow->fs_real_key_size; idx++)
- fprintf(f, "%02x ", mask[idx]);
+static int
+dpaa2_flow_l4_src_port_extract(enum net_prot prot,
+ uint32_t field)
+{
+ if (prot == NET_PROT_TCP &&
+ field == NH_FLD_TCP_PORT_SRC)
+ return true;
+ else if (prot == NET_PROT_UDP &&
+ field == NH_FLD_UDP_PORT_SRC)
+ return true;
+ else if (prot == NET_PROT_SCTP &&
+ field == NH_FLD_SCTP_PORT_SRC)
+ return true;
+
+ return false;
+}
- fprintf(f, "\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
- flow->ipaddr_rule.fs_ipsrc_offset,
- flow->ipaddr_rule.fs_ipdst_offset);
+static int
+dpaa2_flow_l4_dst_port_extract(enum net_prot prot,
+ uint32_t field)
+{
+ if (prot == NET_PROT_TCP &&
+ field == NH_FLD_TCP_PORT_DST)
+ return true;
+ else if (prot == NET_PROT_UDP &&
+ field == NH_FLD_UDP_PORT_DST)
+ return true;
+ else if (prot == NET_PROT_SCTP &&
+ field == NH_FLD_SCTP_PORT_DST)
+ return true;
+
+ return false;
}
-static inline void dpaa2_flow_extract_key_set(
- struct dpaa2_key_info *key_info, int index, uint8_t size)
+static int
+dpaa2_flow_add_qos_rule(struct dpaa2_dev_priv *priv,
+ struct dpaa2_dev_flow *flow)
{
- key_info->key_size[index] = size;
- if (index > 0) {
- key_info->key_offset[index] =
- key_info->key_offset[index - 1] +
- key_info->key_size[index - 1];
- } else {
- key_info->key_offset[index] = 0;
+ uint16_t qos_index;
+ int ret;
+ struct fsl_mc_io *dpni = priv->hw;
+
+ if (priv->num_rx_tc <= 1 &&
+ flow->action_type != RTE_FLOW_ACTION_TYPE_RSS) {
+ DPAA2_PMD_WARN("No QoS Table for FS");
+ return -EINVAL;
}
- key_info->key_total_size += size;
+
+ /* QoS entry added is only effective for multiple TCs.*/
+ qos_index = flow->tc_id * priv->fs_entries + flow->tc_index;
+ if (qos_index >= priv->qos_entries) {
+ DPAA2_PMD_ERR("QoS table full(%d >= %d)",
+ qos_index, priv->qos_entries);
+ return -EINVAL;
+ }
+
+ dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
+
+ ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
+ priv->token, &flow->qos_rule,
+ flow->tc_id, qos_index,
+ 0, 0);
+ if (ret < 0) {
+ DPAA2_PMD_ERR("Add entry(%d) to table(%d) failed",
+ qos_index, flow->tc_id);
+ return ret;
+ }
+
+ return 0;
}
-static int dpaa2_flow_extract_add(
- struct dpaa2_key_extract *key_extract,
- enum net_prot prot,
- uint32_t field, uint8_t field_size)
+static int
+dpaa2_flow_add_fs_rule(struct dpaa2_dev_priv *priv,
+ struct dpaa2_dev_flow *flow)
{
- int index, ip_src = -1, ip_dst = -1;
- struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
- struct dpaa2_key_info *key_info = &key_extract->key_info;
+ int ret;
+ struct fsl_mc_io *dpni = priv->hw;
- if (dpkg->num_extracts >=
- DPKG_MAX_NUM_OF_EXTRACTS) {
- DPAA2_PMD_WARN("Number of extracts overflows");
- return -1;
+ if (flow->tc_index >= priv->fs_entries) {
+ DPAA2_PMD_ERR("FS table full(%d >= %d)",
+ flow->tc_index, priv->fs_entries);
+ return -EINVAL;
}
- /* Before reorder, the IP SRC and IP DST are already last
- * extract(s).
- */
- for (index = 0; index < dpkg->num_extracts; index++) {
- if (dpkg->extracts[index].extract.from_hdr.prot ==
- NET_PROT_IP) {
- if (dpkg->extracts[index].extract.from_hdr.field ==
- NH_FLD_IP_SRC) {
- ip_src = index;
- }
- if (dpkg->extracts[index].extract.from_hdr.field ==
- NH_FLD_IP_DST) {
- ip_dst = index;
+
+ dpaa2_flow_fs_entry_log("Start add", flow);
+
+ ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
+ priv->token, flow->tc_id,
+ flow->tc_index, &flow->fs_rule,
+ &flow->fs_action_cfg);
+ if (ret < 0) {
+ DPAA2_PMD_ERR("Add rule(%d) to FS table(%d) failed",
+ flow->tc_index, flow->tc_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_flow_rule_insert_hole(struct dpaa2_dev_flow *flow,
+ int offset, int size,
+ enum dpaa2_flow_dist_type dist_type)
+{
+ int end;
+
+ if (dist_type & DPAA2_FLOW_QOS_TYPE) {
+ end = flow->qos_rule_size;
+ if (end > offset) {
+ memmove(flow->qos_key_addr + offset + size,
+ flow->qos_key_addr + offset,
+ end - offset);
+ memset(flow->qos_key_addr + offset,
+ 0, size);
+
+ memmove(flow->qos_mask_addr + offset + size,
+ flow->qos_mask_addr + offset,
+ end - offset);
+ memset(flow->qos_mask_addr + offset,
+ 0, size);
+ }
+ flow->qos_rule_size += size;
+ }
+
+ if (dist_type & DPAA2_FLOW_FS_TYPE) {
+ end = flow->fs_rule_size;
+ if (end > offset) {
+ memmove(flow->fs_key_addr + offset + size,
+ flow->fs_key_addr + offset,
+ end - offset);
+ memset(flow->fs_key_addr + offset,
+ 0, size);
+
+ memmove(flow->fs_mask_addr + offset + size,
+ flow->fs_mask_addr + offset,
+ end - offset);
+ memset(flow->fs_mask_addr + offset,
+ 0, size);
+ }
+ flow->fs_rule_size += size;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_flow_rule_add_all(struct dpaa2_dev_priv *priv,
+ enum dpaa2_flow_dist_type dist_type,
+ uint16_t entry_size, uint8_t tc_id)
+{
+ struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
+ int ret;
+
+ while (curr) {
+ if (dist_type & DPAA2_FLOW_QOS_TYPE) {
+ if (priv->num_rx_tc > 1 ||
+ curr->action_type ==
+ RTE_FLOW_ACTION_TYPE_RSS) {
+ curr->qos_rule.key_size = entry_size;
+ ret = dpaa2_flow_add_qos_rule(priv, curr);
+ if (ret)
+ return ret;
}
}
+ if (dist_type & DPAA2_FLOW_FS_TYPE &&
+ curr->tc_id == tc_id) {
+ curr->fs_rule.key_size = entry_size;
+ ret = dpaa2_flow_add_fs_rule(priv, curr);
+ if (ret)
+ return ret;
+ }
+ curr = LIST_NEXT(curr, next);
}
- if (ip_src >= 0)
- RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
+ return 0;
+}
- if (ip_dst >= 0)
- RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
+static int
+dpaa2_flow_qos_rule_insert_hole(struct dpaa2_dev_priv *priv,
+ int offset, int size)
+{
+ struct dpaa2_dev_flow *curr;
+ int ret;
- if (prot == NET_PROT_IP &&
- (field == NH_FLD_IP_SRC ||
- field == NH_FLD_IP_DST)) {
- index = dpkg->num_extracts;
+ curr = priv->curr;
+ if (!curr) {
+ DPAA2_PMD_ERR("Current qos flow insert hole failed.");
+ return -EINVAL;
} else {
- if (ip_src >= 0 && ip_dst >= 0)
- index = dpkg->num_extracts - 2;
- else if (ip_src >= 0 || ip_dst >= 0)
- index = dpkg->num_extracts - 1;
- else
- index = dpkg->num_extracts;
+ ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
+ DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
}
- dpkg->extracts[index].type = DPKG_EXTRACT_FROM_HDR;
- dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
- dpkg->extracts[index].extract.from_hdr.prot = prot;
- dpkg->extracts[index].extract.from_hdr.field = field;
- if (prot == NET_PROT_IP &&
- (field == NH_FLD_IP_SRC ||
- field == NH_FLD_IP_DST)) {
- dpaa2_flow_extract_key_set(key_info, index, 0);
+ curr = LIST_FIRST(&priv->flows);
+ while (curr) {
+ ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
+ DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
+ curr = LIST_NEXT(curr, next);
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_flow_fs_rule_insert_hole(struct dpaa2_dev_priv *priv,
+ int offset, int size, int tc_id)
+{
+ struct dpaa2_dev_flow *curr;
+ int ret;
+
+ curr = priv->curr;
+ if (!curr || curr->tc_id != tc_id) {
+ DPAA2_PMD_ERR("Current flow insert hole failed.");
+ return -EINVAL;
} else {
- dpaa2_flow_extract_key_set(key_info, index, field_size);
+ ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
+ DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
}
- if (prot == NET_PROT_IP) {
- if (field == NH_FLD_IP_SRC) {
- if (key_info->ipv4_dst_offset >= 0) {
- key_info->ipv4_src_offset =
- key_info->ipv4_dst_offset +
- NH_FLD_IPV4_ADDR_SIZE;
- } else {
- key_info->ipv4_src_offset =
- key_info->key_offset[index - 1] +
- key_info->key_size[index - 1];
- }
- if (key_info->ipv6_dst_offset >= 0) {
- key_info->ipv6_src_offset =
- key_info->ipv6_dst_offset +
- NH_FLD_IPV6_ADDR_SIZE;
- } else {
- key_info->ipv6_src_offset =
- key_info->key_offset[index - 1] +
- key_info->key_size[index - 1];
- }
- } else if (field == NH_FLD_IP_DST) {
- if (key_info->ipv4_src_offset >= 0) {
- key_info->ipv4_dst_offset =
- key_info->ipv4_src_offset +
- NH_FLD_IPV4_ADDR_SIZE;
- } else {
- key_info->ipv4_dst_offset =
- key_info->key_offset[index - 1] +
- key_info->key_size[index - 1];
- }
- if (key_info->ipv6_src_offset >= 0) {
- key_info->ipv6_dst_offset =
- key_info->ipv6_src_offset +
- NH_FLD_IPV6_ADDR_SIZE;
- } else {
- key_info->ipv6_dst_offset =
- key_info->key_offset[index - 1] +
- key_info->key_size[index - 1];
- }
+ curr = LIST_FIRST(&priv->flows);
+
+ while (curr) {
+ if (curr->tc_id != tc_id) {
+ curr = LIST_NEXT(curr, next);
+ continue;
}
+ ret = dpaa2_flow_rule_insert_hole(curr, offset, size,
+ DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
+ curr = LIST_NEXT(curr, next);
}
- if (index == dpkg->num_extracts) {
- dpkg->num_extracts++;
- return 0;
+ return 0;
+}
+
+/* Move IPv4/IPv6 addresses to fill new extract previous IP address.
+ * Current MC/WRIOP only support generic IP extract but IP address
+ * is not fixed, so we have to put them at end of extracts, otherwise,
+ * the extracts position following them can't be identified.
+ */
+static int
+dpaa2_flow_key_profile_advance(enum net_prot prot,
+ uint32_t field, uint8_t field_size,
+ struct dpaa2_dev_priv *priv,
+ enum dpaa2_flow_dist_type dist_type, int tc_id,
+ int *insert_offset)
+{
+ int offset, ret;
+ struct dpaa2_key_profile *key_profile;
+ int num, pos;
+
+ if (dpaa2_flow_ip_address_extract(prot, field)) {
+ DPAA2_PMD_ERR("%s only for none IP address extract",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (dist_type == DPAA2_FLOW_QOS_TYPE)
+ key_profile = &priv->extract.qos_key_extract.key_profile;
+ else
+ key_profile = &priv->extract.tc_key_extract[tc_id].key_profile;
+
+ num = key_profile->num;
+
+ if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Number of extracts overflows");
+ return -EINVAL;
+ }
+
+ if (key_profile->ip_addr_type != IP_NONE_ADDR_EXTRACT) {
+ offset = key_profile->ip_addr_extract_off;
+ pos = key_profile->ip_addr_extract_pos;
+ key_profile->ip_addr_extract_pos++;
+ key_profile->ip_addr_extract_off += field_size;
+ if (dist_type == DPAA2_FLOW_QOS_TYPE) {
+ ret = dpaa2_flow_qos_rule_insert_hole(priv,
+ offset, field_size);
+ } else {
+ ret = dpaa2_flow_fs_rule_insert_hole(priv,
+ offset, field_size, tc_id);
+ }
+ if (ret)
+ return ret;
+ } else {
+ pos = num;
+ }
+
+ if (pos > 0) {
+ key_profile->key_offset[pos] =
+ key_profile->key_offset[pos - 1] +
+ key_profile->key_size[pos - 1];
+ } else {
+ key_profile->key_offset[pos] = 0;
+ }
+
+ key_profile->key_size[pos] = field_size;
+ key_profile->prot_field[pos].prot = prot;
+ key_profile->prot_field[pos].key_field = field;
+ key_profile->num++;
+
+ if (insert_offset)
+ *insert_offset = key_profile->key_offset[pos];
+
+ if (dpaa2_flow_l4_src_port_extract(prot, field)) {
+ key_profile->l4_src_port_present = 1;
+ key_profile->l4_src_port_pos = pos;
+ key_profile->l4_src_port_offset =
+ key_profile->key_offset[pos];
+ } else if (dpaa2_flow_l4_dst_port_extract(prot, field)) {
+ key_profile->l4_dst_port_present = 1;
+ key_profile->l4_dst_port_pos = pos;
+ key_profile->l4_dst_port_offset =
+ key_profile->key_offset[pos];
+ }
+ key_profile->key_max_size += field_size;
+
+ return pos;
+}
+
+static int
+dpaa2_flow_extract_add_hdr(enum net_prot prot,
+ uint32_t field, uint8_t field_size,
+ struct dpaa2_dev_priv *priv,
+ enum dpaa2_flow_dist_type dist_type, int tc_id,
+ int *insert_offset)
+{
+ int pos, i;
+ struct dpaa2_key_extract *key_extract;
+ struct dpkg_profile_cfg *dpkg;
+ struct dpkg_extract *extracts;
+
+ if (dist_type == DPAA2_FLOW_QOS_TYPE)
+ key_extract = &priv->extract.qos_key_extract;
+ else
+ key_extract = &priv->extract.tc_key_extract[tc_id];
+
+ dpkg = &key_extract->dpkg;
+ extracts = dpkg->extracts;
+
+ if (dpaa2_flow_ip_address_extract(prot, field)) {
+ DPAA2_PMD_ERR("%s only for none IP address extract",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (dpkg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Number of extracts overflows");
+ return -EINVAL;
}
- if (ip_src >= 0) {
- ip_src++;
- dpkg->extracts[ip_src].type =
- DPKG_EXTRACT_FROM_HDR;
- dpkg->extracts[ip_src].extract.from_hdr.type =
- DPKG_FULL_FIELD;
- dpkg->extracts[ip_src].extract.from_hdr.prot =
- NET_PROT_IP;
- dpkg->extracts[ip_src].extract.from_hdr.field =
- NH_FLD_IP_SRC;
- dpaa2_flow_extract_key_set(key_info, ip_src, 0);
- key_info->ipv4_src_offset += field_size;
- key_info->ipv6_src_offset += field_size;
- }
- if (ip_dst >= 0) {
- ip_dst++;
- dpkg->extracts[ip_dst].type =
- DPKG_EXTRACT_FROM_HDR;
- dpkg->extracts[ip_dst].extract.from_hdr.type =
- DPKG_FULL_FIELD;
- dpkg->extracts[ip_dst].extract.from_hdr.prot =
- NET_PROT_IP;
- dpkg->extracts[ip_dst].extract.from_hdr.field =
- NH_FLD_IP_DST;
- dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
- key_info->ipv4_dst_offset += field_size;
- key_info->ipv6_dst_offset += field_size;
+ pos = dpaa2_flow_key_profile_advance(prot,
+ field, field_size, priv,
+ dist_type, tc_id,
+ insert_offset);
+ if (pos < 0)
+ return pos;
+
+ if (pos != dpkg->num_extracts) {
+ /* Not the last pos, must have IP address extract.*/
+ for (i = dpkg->num_extracts - 1; i >= pos; i--) {
+ memcpy(&extracts[i + 1],
+ &extracts[i], sizeof(struct dpkg_extract));
+ }
}
+ extracts[pos].type = DPKG_EXTRACT_FROM_HDR;
+ extracts[pos].extract.from_hdr.prot = prot;
+ extracts[pos].extract.from_hdr.type = DPKG_FULL_FIELD;
+ extracts[pos].extract.from_hdr.field = field;
+
dpkg->num_extracts++;
return 0;
}
-static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
- int size)
+static int
+dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
+ int size)
{
struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
- struct dpaa2_key_info *key_info = &key_extract->key_info;
+ struct dpaa2_key_profile *key_info = &key_extract->key_profile;
int last_extract_size, index;
if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
@@ -531,83 +804,58 @@ static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
DPAA2_FLOW_MAX_KEY_SIZE * index;
}
- key_info->key_total_size = size;
+ key_info->key_max_size = size;
return 0;
}
-/* Protocol discrimination.
- * Discriminate IPv4/IPv6/vLan by Eth type.
- * Discriminate UDP/TCP/ICMP by next proto of IP.
- */
static inline int
-dpaa2_flow_proto_discrimination_extract(
- struct dpaa2_key_extract *key_extract,
- enum rte_flow_item_type type)
+dpaa2_flow_extract_search(struct dpaa2_key_profile *key_profile,
+ enum net_prot prot, uint32_t key_field)
{
- if (type == RTE_FLOW_ITEM_TYPE_ETH) {
- return dpaa2_flow_extract_add(
- key_extract, NET_PROT_ETH,
- NH_FLD_ETH_TYPE,
- sizeof(rte_be16_t));
- } else if (type == (enum rte_flow_item_type)
- DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
- return dpaa2_flow_extract_add(
- key_extract, NET_PROT_IP,
- NH_FLD_IP_PROTO,
- NH_FLD_IP_PROTO_SIZE);
- }
-
- return -1;
-}
+ int pos;
+ struct key_prot_field *prot_field;
-static inline int dpaa2_flow_extract_search(
- struct dpkg_profile_cfg *dpkg,
- enum net_prot prot, uint32_t field)
-{
- int i;
+ if (dpaa2_flow_ip_address_extract(prot, key_field)) {
+ DPAA2_PMD_ERR("%s only for none IP address extract",
+ __func__);
+ return -EINVAL;
+ }
- for (i = 0; i < dpkg->num_extracts; i++) {
- if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
- dpkg->extracts[i].extract.from_hdr.field == field) {
- return i;
+ prot_field = key_profile->prot_field;
+ for (pos = 0; pos < key_profile->num; pos++) {
+ if (prot_field[pos].prot == prot &&
+ prot_field[pos].key_field == key_field) {
+ return pos;
}
}
- return -1;
+ if (dpaa2_flow_l4_src_port_extract(prot, key_field)) {
+ if (key_profile->l4_src_port_present)
+ return key_profile->l4_src_port_pos;
+ } else if (dpaa2_flow_l4_dst_port_extract(prot, key_field)) {
+ if (key_profile->l4_dst_port_present)
+ return key_profile->l4_dst_port_pos;
+ }
+
+ return -ENXIO;
}
-static inline int dpaa2_flow_extract_key_offset(
- struct dpaa2_key_extract *key_extract,
- enum net_prot prot, uint32_t field)
+static inline int
+dpaa2_flow_extract_key_offset(struct dpaa2_key_profile *key_profile,
+ enum net_prot prot, uint32_t key_field)
{
int i;
- struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
- struct dpaa2_key_info *key_info = &key_extract->key_info;
- if (prot == NET_PROT_IPV4 ||
- prot == NET_PROT_IPV6)
- i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
+ i = dpaa2_flow_extract_search(key_profile, prot, key_field);
+
+ if (i >= 0)
+ return key_profile->key_offset[i];
else
- i = dpaa2_flow_extract_search(dpkg, prot, field);
-
- if (i >= 0) {
- if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
- return key_info->ipv4_src_offset;
- else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
- return key_info->ipv4_dst_offset;
- else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
- return key_info->ipv6_src_offset;
- else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
- return key_info->ipv6_dst_offset;
- else
- return key_info->key_offset[i];
- } else {
- return -1;
- }
+ return i;
}
-struct proto_discrimination {
- enum rte_flow_item_type type;
+struct prev_proto_field_id {
+ enum net_prot prot;
union {
rte_be16_t eth_type;
uint8_t ip_proto;
@@ -615,103 +863,134 @@ struct proto_discrimination {
};
static int
-dpaa2_flow_proto_discrimination_rule(
- struct dpaa2_dev_priv *priv, struct rte_flow *flow,
- struct proto_discrimination proto, int group)
+dpaa2_flow_prev_proto_rule(struct dpaa2_dev_priv *priv,
+ struct dpaa2_dev_flow *flow,
+ const struct prev_proto_field_id *prev_proto,
+ int group,
+ enum dpaa2_flow_dist_type dist_type)
{
- enum net_prot prot;
- uint32_t field;
int offset;
- size_t key_iova;
- size_t mask_iova;
+ uint8_t *key_addr;
+ uint8_t *mask_addr;
+ uint32_t field = 0;
rte_be16_t eth_type;
uint8_t ip_proto;
+ struct dpaa2_key_extract *key_extract;
+ struct dpaa2_key_profile *key_profile;
- if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
- prot = NET_PROT_ETH;
+ if (prev_proto->prot == NET_PROT_ETH) {
field = NH_FLD_ETH_TYPE;
- } else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
- prot = NET_PROT_IP;
+ } else if (prev_proto->prot == NET_PROT_IP) {
field = NH_FLD_IP_PROTO;
} else {
- DPAA2_PMD_ERR(
- "Only Eth and IP support to discriminate next proto.");
- return -1;
- }
-
- offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
- prot, field);
- if (offset < 0) {
- DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
- prot, field);
- return -1;
- }
- key_iova = flow->qos_rule.key_iova + offset;
- mask_iova = flow->qos_rule.mask_iova + offset;
- if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
- eth_type = proto.eth_type;
- memcpy((void *)key_iova, (const void *)(ð_type),
- sizeof(rte_be16_t));
- eth_type = 0xffff;
- memcpy((void *)mask_iova, (const void *)(ð_type),
- sizeof(rte_be16_t));
- } else {
- ip_proto = proto.ip_proto;
- memcpy((void *)key_iova, (const void *)(&ip_proto),
- sizeof(uint8_t));
- ip_proto = 0xff;
- memcpy((void *)mask_iova, (const void *)(&ip_proto),
- sizeof(uint8_t));
- }
-
- offset = dpaa2_flow_extract_key_offset(
- &priv->extract.tc_key_extract[group],
- prot, field);
- if (offset < 0) {
- DPAA2_PMD_ERR("FS prot %d field %d extract failed",
- prot, field);
- return -1;
+ DPAA2_PMD_ERR("Prev proto(%d) not support!",
+ prev_proto->prot);
+ return -EINVAL;
}
- key_iova = flow->fs_rule.key_iova + offset;
- mask_iova = flow->fs_rule.mask_iova + offset;
- if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
- eth_type = proto.eth_type;
- memcpy((void *)key_iova, (const void *)(ð_type),
- sizeof(rte_be16_t));
- eth_type = 0xffff;
- memcpy((void *)mask_iova, (const void *)(ð_type),
- sizeof(rte_be16_t));
- } else {
- ip_proto = proto.ip_proto;
- memcpy((void *)key_iova, (const void *)(&ip_proto),
- sizeof(uint8_t));
- ip_proto = 0xff;
- memcpy((void *)mask_iova, (const void *)(&ip_proto),
- sizeof(uint8_t));
+ if (dist_type & DPAA2_FLOW_QOS_TYPE) {
+ key_extract = &priv->extract.qos_key_extract;
+ key_profile = &key_extract->key_profile;
+
+ offset = dpaa2_flow_extract_key_offset(key_profile,
+ prev_proto->prot, field);
+ if (offset < 0) {
+ DPAA2_PMD_ERR("%s QoS key extract failed", __func__);
+ return -EINVAL;
+ }
+ key_addr = flow->qos_key_addr + offset;
+ mask_addr = flow->qos_mask_addr + offset;
+ if (prev_proto->prot == NET_PROT_ETH) {
+ eth_type = prev_proto->eth_type;
+ memcpy(key_addr, ð_type, sizeof(rte_be16_t));
+ eth_type = 0xffff;
+ memcpy(mask_addr, ð_type, sizeof(rte_be16_t));
+ flow->qos_rule_size += sizeof(rte_be16_t);
+ } else if (prev_proto->prot == NET_PROT_IP) {
+ ip_proto = prev_proto->ip_proto;
+ memcpy(key_addr, &ip_proto, sizeof(uint8_t));
+ ip_proto = 0xff;
+ memcpy(mask_addr, &ip_proto, sizeof(uint8_t));
+ flow->qos_rule_size += sizeof(uint8_t);
+ } else {
+ DPAA2_PMD_ERR("Invalid Prev proto(%d)",
+ prev_proto->prot);
+ return -EINVAL;
+ }
+ }
+
+ if (dist_type & DPAA2_FLOW_FS_TYPE) {
+ key_extract = &priv->extract.tc_key_extract[group];
+ key_profile = &key_extract->key_profile;
+
+ offset = dpaa2_flow_extract_key_offset(key_profile,
+ prev_proto->prot, field);
+ if (offset < 0) {
+ DPAA2_PMD_ERR("%s TC[%d] key extract failed",
+ __func__, group);
+ return -EINVAL;
+ }
+ key_addr = flow->fs_key_addr + offset;
+ mask_addr = flow->fs_mask_addr + offset;
+
+ if (prev_proto->prot == NET_PROT_ETH) {
+ eth_type = prev_proto->eth_type;
+ memcpy(key_addr, ð_type, sizeof(rte_be16_t));
+ eth_type = 0xffff;
+ memcpy(mask_addr, ð_type, sizeof(rte_be16_t));
+ flow->fs_rule_size += sizeof(rte_be16_t);
+ } else if (prev_proto->prot == NET_PROT_IP) {
+ ip_proto = prev_proto->ip_proto;
+ memcpy(key_addr, &ip_proto, sizeof(uint8_t));
+ ip_proto = 0xff;
+ memcpy(mask_addr, &ip_proto, sizeof(uint8_t));
+ flow->fs_rule_size += sizeof(uint8_t);
+ } else {
+ DPAA2_PMD_ERR("Invalid Prev proto(%d)",
+ prev_proto->prot);
+ return -EINVAL;
+ }
}
return 0;
}
static inline int
-dpaa2_flow_rule_data_set(
- struct dpaa2_key_extract *key_extract,
- struct dpni_rule_cfg *rule,
- enum net_prot prot, uint32_t field,
- const void *key, const void *mask, int size)
+dpaa2_flow_hdr_rule_data_set(struct dpaa2_dev_flow *flow,
+ struct dpaa2_key_profile *key_profile,
+ enum net_prot prot, uint32_t field, int size,
+ const void *key, const void *mask,
+ enum dpaa2_flow_dist_type dist_type)
{
- int offset = dpaa2_flow_extract_key_offset(key_extract,
- prot, field);
+ int offset;
+ if (dpaa2_flow_ip_address_extract(prot, field)) {
+ DPAA2_PMD_ERR("%s only for none IP address extract",
+ __func__);
+ return -EINVAL;
+ }
+
+ offset = dpaa2_flow_extract_key_offset(key_profile,
+ prot, field);
if (offset < 0) {
- DPAA2_PMD_ERR("prot %d, field %d extract failed",
+ DPAA2_PMD_ERR("P(%d)/F(%d) does not exist!",
prot, field);
- return -1;
+ return -EINVAL;
}
- memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
- memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
+ if (dist_type & DPAA2_FLOW_QOS_TYPE) {
+ memcpy((flow->qos_key_addr + offset), key, size);
+ memcpy((flow->qos_mask_addr + offset), mask, size);
+ if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
+ flow->qos_rule_size = offset + size;
+ }
+
+ if (dist_type & DPAA2_FLOW_FS_TYPE) {
+ memcpy((flow->fs_key_addr + offset), key, size);
+ memcpy((flow->fs_mask_addr + offset), mask, size);
+ if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT)
+ flow->fs_rule_size = offset + size;
+ }
return 0;
}
@@ -728,145 +1007,13 @@ dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
return 0;
}
-static inline int
-_dpaa2_flow_rule_move_ipaddr_tail(
- struct dpaa2_key_extract *key_extract,
- struct dpni_rule_cfg *rule, int src_offset,
- uint32_t field, bool ipv4)
-{
- size_t key_src;
- size_t mask_src;
- size_t key_dst;
- size_t mask_dst;
- int dst_offset, len;
- enum net_prot prot;
- char tmp[NH_FLD_IPV6_ADDR_SIZE];
-
- if (field != NH_FLD_IP_SRC &&
- field != NH_FLD_IP_DST) {
- DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
- return -1;
- }
- if (ipv4)
- prot = NET_PROT_IPV4;
- else
- prot = NET_PROT_IPV6;
- dst_offset = dpaa2_flow_extract_key_offset(key_extract,
- prot, field);
- if (dst_offset < 0) {
- DPAA2_PMD_ERR("Field %d reorder extract failed", field);
- return -1;
- }
- key_src = rule->key_iova + src_offset;
- mask_src = rule->mask_iova + src_offset;
- key_dst = rule->key_iova + dst_offset;
- mask_dst = rule->mask_iova + dst_offset;
- if (ipv4)
- len = sizeof(rte_be32_t);
- else
- len = NH_FLD_IPV6_ADDR_SIZE;
-
- memcpy(tmp, (char *)key_src, len);
- memset((char *)key_src, 0, len);
- memcpy((char *)key_dst, tmp, len);
-
- memcpy(tmp, (char *)mask_src, len);
- memset((char *)mask_src, 0, len);
- memcpy((char *)mask_dst, tmp, len);
-
- return 0;
-}
-
-static inline int
-dpaa2_flow_rule_move_ipaddr_tail(
- struct rte_flow *flow, struct dpaa2_dev_priv *priv,
- int fs_group)
+static int
+dpaa2_flow_extract_support(const uint8_t *mask_src,
+ enum rte_flow_item_type type)
{
- int ret;
- enum net_prot prot;
-
- if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
- return 0;
-
- if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
- prot = NET_PROT_IPV4;
- else
- prot = NET_PROT_IPV6;
-
- if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
- ret = _dpaa2_flow_rule_move_ipaddr_tail(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- flow->ipaddr_rule.qos_ipsrc_offset,
- NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
- if (ret) {
- DPAA2_PMD_ERR("QoS src address reorder failed");
- return -1;
- }
- flow->ipaddr_rule.qos_ipsrc_offset =
- dpaa2_flow_extract_key_offset(
- &priv->extract.qos_key_extract,
- prot, NH_FLD_IP_SRC);
- }
-
- if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
- ret = _dpaa2_flow_rule_move_ipaddr_tail(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- flow->ipaddr_rule.qos_ipdst_offset,
- NH_FLD_IP_DST, prot == NET_PROT_IPV4);
- if (ret) {
- DPAA2_PMD_ERR("QoS dst address reorder failed");
- return -1;
- }
- flow->ipaddr_rule.qos_ipdst_offset =
- dpaa2_flow_extract_key_offset(
- &priv->extract.qos_key_extract,
- prot, NH_FLD_IP_DST);
- }
-
- if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
- ret = _dpaa2_flow_rule_move_ipaddr_tail(
- &priv->extract.tc_key_extract[fs_group],
- &flow->fs_rule,
- flow->ipaddr_rule.fs_ipsrc_offset,
- NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
- if (ret) {
- DPAA2_PMD_ERR("FS src address reorder failed");
- return -1;
- }
- flow->ipaddr_rule.fs_ipsrc_offset =
- dpaa2_flow_extract_key_offset(
- &priv->extract.tc_key_extract[fs_group],
- prot, NH_FLD_IP_SRC);
- }
- if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
- ret = _dpaa2_flow_rule_move_ipaddr_tail(
- &priv->extract.tc_key_extract[fs_group],
- &flow->fs_rule,
- flow->ipaddr_rule.fs_ipdst_offset,
- NH_FLD_IP_DST, prot == NET_PROT_IPV4);
- if (ret) {
- DPAA2_PMD_ERR("FS dst address reorder failed");
- return -1;
- }
- flow->ipaddr_rule.fs_ipdst_offset =
- dpaa2_flow_extract_key_offset(
- &priv->extract.tc_key_extract[fs_group],
- prot, NH_FLD_IP_DST);
- }
-
- return 0;
-}
-
-static int
-dpaa2_flow_extract_support(
- const uint8_t *mask_src,
- enum rte_flow_item_type type)
-{
- char mask[64];
- int i, size = 0;
- const char *mask_support = 0;
+ char mask[64];
+ int i, size = 0;
+ const char *mask_support = 0;
switch (type) {
case RTE_FLOW_ITEM_TYPE_ETH:
@@ -906,7 +1053,7 @@ dpaa2_flow_extract_support(
size = sizeof(struct rte_flow_item_gre);
break;
default:
- return -1;
+ return -EINVAL;
}
memcpy(mask, mask_support, size);
@@ -921,491 +1068,444 @@ dpaa2_flow_extract_support(
}
static int
-dpaa2_configure_flow_eth(struct rte_flow *flow,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action actions[] __rte_unused,
- struct rte_flow_error *error __rte_unused,
- int *device_configured)
+dpaa2_flow_identify_by_prev_prot(struct dpaa2_dev_priv *priv,
+ struct dpaa2_dev_flow *flow,
+ const struct prev_proto_field_id *prev_prot,
+ enum dpaa2_flow_dist_type dist_type,
+ int group, int *recfg)
{
- int index, ret;
- int local_cfg = 0;
- uint32_t group;
- const struct rte_flow_item_eth *spec, *mask;
-
- /* TODO: Currently upper bound of range parameter is not implemented */
- const struct rte_flow_item_eth *last __rte_unused;
- struct dpaa2_dev_priv *priv = dev->data->dev_private;
- const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
-
- group = attr->group;
-
- /* Parse pattern list to get the matching parameters */
- spec = (const struct rte_flow_item_eth *)pattern->spec;
- last = (const struct rte_flow_item_eth *)pattern->last;
- mask = (const struct rte_flow_item_eth *)
- (pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
- if (!spec) {
- /* Don't care any field of eth header,
- * only care eth protocol.
- */
- DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
- return 0;
- }
-
- /* Get traffic class index and flow id to be configured */
- flow->tc_id = group;
- flow->tc_index = attr->priority;
-
- if (dpaa2_flow_extract_support((const uint8_t *)mask,
- RTE_FLOW_ITEM_TYPE_ETH)) {
- DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
-
- return -1;
- }
-
- if (memcmp((const char *)&mask->hdr.src_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_ETH, NH_FLD_ETH_SA);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_ETH, NH_FLD_ETH_SA,
- RTE_ETHER_ADDR_LEN);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_ETH, NH_FLD_ETH_SA);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_ETH, NH_FLD_ETH_SA,
- RTE_ETHER_ADDR_LEN);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
+ int ret, index, local_cfg = 0, size = 0;
+ struct dpaa2_key_extract *extract;
+ struct dpaa2_key_profile *key_profile;
+ enum net_prot prot = prev_prot->prot;
+ uint32_t key_field = 0;
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before ETH_SA rule set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_ETH,
- NH_FLD_ETH_SA,
- &spec->hdr.src_addr.addr_bytes,
- &mask->hdr.src_addr.addr_bytes,
- sizeof(struct rte_ether_addr));
- if (ret) {
- DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_ETH,
- NH_FLD_ETH_SA,
- &spec->hdr.src_addr.addr_bytes,
- &mask->hdr.src_addr.addr_bytes,
- sizeof(struct rte_ether_addr));
- if (ret) {
- DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
- return -1;
- }
+ if (prot == NET_PROT_ETH) {
+ key_field = NH_FLD_ETH_TYPE;
+ size = sizeof(rte_be16_t);
+ } else if (prot == NET_PROT_IP) {
+ key_field = NH_FLD_IP_PROTO;
+ size = sizeof(uint8_t);
+ } else if (prot == NET_PROT_IPV4) {
+ prot = NET_PROT_IP;
+ key_field = NH_FLD_IP_PROTO;
+ size = sizeof(uint8_t);
+ } else if (prot == NET_PROT_IPV6) {
+ prot = NET_PROT_IP;
+ key_field = NH_FLD_IP_PROTO;
+ size = sizeof(uint8_t);
+ } else {
+ DPAA2_PMD_ERR("Invalid Prev prot(%d)", prot);
+ return -EINVAL;
}
- if (memcmp((const char *)&mask->hdr.dst_addr, zero_cmp, RTE_ETHER_ADDR_LEN)) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_ETH, NH_FLD_ETH_DA);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_ETH, NH_FLD_ETH_DA,
- RTE_ETHER_ADDR_LEN);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
+ if (dist_type & DPAA2_FLOW_QOS_TYPE) {
+ extract = &priv->extract.qos_key_extract;
+ key_profile = &extract->key_profile;
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_ETH, NH_FLD_ETH_DA);
+ index = dpaa2_flow_extract_search(key_profile,
+ prot, key_field);
if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_ETH, NH_FLD_ETH_DA,
- RTE_ETHER_ADDR_LEN);
+ ret = dpaa2_flow_extract_add_hdr(prot,
+ key_field, size, priv,
+ DPAA2_FLOW_QOS_TYPE, group,
+ NULL);
if (ret) {
- DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
+ DPAA2_PMD_ERR("QOS prev extract add failed");
- return -1;
+ return -EINVAL;
}
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before ETH DA rule set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_ETH,
- NH_FLD_ETH_DA,
- &spec->hdr.dst_addr.addr_bytes,
- &mask->hdr.dst_addr.addr_bytes,
- sizeof(struct rte_ether_addr));
- if (ret) {
- DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
- return -1;
+ local_cfg |= DPAA2_FLOW_QOS_TYPE;
}
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_ETH,
- NH_FLD_ETH_DA,
- &spec->hdr.dst_addr.addr_bytes,
- &mask->hdr.dst_addr.addr_bytes,
- sizeof(struct rte_ether_addr));
+ ret = dpaa2_flow_prev_proto_rule(priv, flow, prev_prot, group,
+ DPAA2_FLOW_QOS_TYPE);
if (ret) {
- DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
- return -1;
+ DPAA2_PMD_ERR("QoS prev rule set failed");
+ return -EINVAL;
}
}
- if (memcmp((const char *)&mask->hdr.ether_type, zero_cmp, sizeof(rte_be16_t))) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_ETH, NH_FLD_ETH_TYPE);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_ETH, NH_FLD_ETH_TYPE,
- RTE_ETHER_TYPE_LEN);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
+ if (dist_type & DPAA2_FLOW_FS_TYPE) {
+ extract = &priv->extract.tc_key_extract[group];
+ key_profile = &extract->key_profile;
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ index = dpaa2_flow_extract_search(key_profile,
+ prot, key_field);
if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_ETH, NH_FLD_ETH_TYPE,
- RTE_ETHER_TYPE_LEN);
+ ret = dpaa2_flow_extract_add_hdr(prot,
+ key_field, size, priv,
+ DPAA2_FLOW_FS_TYPE, group,
+ NULL);
if (ret) {
- DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
+ DPAA2_PMD_ERR("FS[%d] prev extract add failed",
+ group);
- return -1;
+ return -EINVAL;
}
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before ETH TYPE rule set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_ETH,
- NH_FLD_ETH_TYPE,
- &spec->hdr.ether_type,
- &mask->hdr.ether_type,
- sizeof(rte_be16_t));
- if (ret) {
- DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
- return -1;
+ local_cfg |= DPAA2_FLOW_FS_TYPE;
}
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_ETH,
- NH_FLD_ETH_TYPE,
- &spec->hdr.ether_type,
- &mask->hdr.ether_type,
- sizeof(rte_be16_t));
+ ret = dpaa2_flow_prev_proto_rule(priv, flow, prev_prot, group,
+ DPAA2_FLOW_FS_TYPE);
if (ret) {
- DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
- return -1;
+ DPAA2_PMD_ERR("FS[%d] prev rule set failed",
+ group);
+ return -EINVAL;
}
}
- (*device_configured) |= local_cfg;
+ if (recfg)
+ *recfg = local_cfg;
return 0;
}
static int
-dpaa2_configure_flow_vlan(struct rte_flow *flow,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action actions[] __rte_unused,
- struct rte_flow_error *error __rte_unused,
- int *device_configured)
+dpaa2_flow_add_hdr_extract_rule(struct dpaa2_dev_flow *flow,
+ enum net_prot prot, uint32_t field,
+ const void *key, const void *mask, int size,
+ struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
+ enum dpaa2_flow_dist_type dist_type)
{
- int index, ret;
- int local_cfg = 0;
- uint32_t group;
- const struct rte_flow_item_vlan *spec, *mask;
-
- const struct rte_flow_item_vlan *last __rte_unused;
- struct dpaa2_dev_priv *priv = dev->data->dev_private;
-
- group = attr->group;
+ int index, ret, local_cfg = 0;
+ struct dpaa2_key_extract *key_extract;
+ struct dpaa2_key_profile *key_profile;
- /* Parse pattern list to get the matching parameters */
- spec = (const struct rte_flow_item_vlan *)pattern->spec;
- last = (const struct rte_flow_item_vlan *)pattern->last;
- mask = (const struct rte_flow_item_vlan *)
- (pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
+ if (dpaa2_flow_ip_address_extract(prot, field))
+ return -EINVAL;
- /* Get traffic class index and flow id to be configured */
- flow->tc_id = group;
- flow->tc_index = attr->priority;
+ if (dist_type == DPAA2_FLOW_QOS_TYPE)
+ key_extract = &priv->extract.qos_key_extract;
+ else
+ key_extract = &priv->extract.tc_key_extract[tc_id];
- if (!spec) {
- /* Don't care any field of vlan header,
- * only care vlan protocol.
- */
- /* Eth type is actually used for vLan classification.
- */
- struct proto_discrimination proto;
+ key_profile = &key_extract->key_profile;
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_ETH, NH_FLD_ETH_TYPE);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.qos_key_extract,
- RTE_FLOW_ITEM_TYPE_ETH);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS Ext ETH_TYPE to discriminate vLan failed");
+ index = dpaa2_flow_extract_search(key_profile,
+ prot, field);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add_hdr(prot,
+ field, size, priv,
+ dist_type, tc_id, NULL);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract P(%d)/F(%d) failed",
+ prot, field);
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ return ret;
}
+ local_cfg |= dist_type;
+ }
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_ETH, NH_FLD_ETH_TYPE);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.tc_key_extract[group],
- RTE_FLOW_ITEM_TYPE_ETH);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS Ext ETH_TYPE to discriminate vLan failed.");
+ ret = dpaa2_flow_hdr_rule_data_set(flow, key_profile,
+ prot, field, size, key, mask, dist_type);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS P(%d)/F(%d) rule data set failed",
+ prot, field);
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
+ return ret;
+ }
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before vLan discrimination set failed");
- return -1;
- }
+ if (recfg)
+ *recfg |= local_cfg;
- proto.type = RTE_FLOW_ITEM_TYPE_ETH;
- proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
- ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
- proto, group);
- if (ret) {
- DPAA2_PMD_ERR("vLan discrimination rule set failed");
- return -1;
- }
+ return 0;
+}
- (*device_configured) |= local_cfg;
+static int
+dpaa2_flow_add_ipaddr_extract_rule(struct dpaa2_dev_flow *flow,
+ enum net_prot prot, uint32_t field,
+ const void *key, const void *mask, int size,
+ struct dpaa2_dev_priv *priv, int tc_id, int *recfg,
+ enum dpaa2_flow_dist_type dist_type)
+{
+ int local_cfg = 0, num, ipaddr_extract_len = 0;
+ struct dpaa2_key_extract *key_extract;
+ struct dpaa2_key_profile *key_profile;
+ struct dpkg_profile_cfg *dpkg;
+ uint8_t *key_addr, *mask_addr;
+ union ip_addr_extract_rule *ip_addr_data;
+ union ip_addr_extract_rule *ip_addr_mask;
+ enum net_prot orig_prot;
+ uint32_t orig_field;
+
+ if (prot != NET_PROT_IPV4 && prot != NET_PROT_IPV6)
+ return -EINVAL;
- return 0;
+ if (prot == NET_PROT_IPV4 && field != NH_FLD_IPV4_SRC_IP &&
+ field != NH_FLD_IPV4_DST_IP) {
+ return -EINVAL;
}
- if (dpaa2_flow_extract_support((const uint8_t *)mask,
- RTE_FLOW_ITEM_TYPE_VLAN)) {
- DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
-
- return -1;
+ if (prot == NET_PROT_IPV6 && field != NH_FLD_IPV6_SRC_IP &&
+ field != NH_FLD_IPV6_DST_IP) {
+ return -EINVAL;
}
- if (!mask->hdr.vlan_tci)
- return 0;
-
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_VLAN, NH_FLD_VLAN_TCI);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_VLAN,
- NH_FLD_VLAN_TCI,
- sizeof(rte_be16_t));
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
+ orig_prot = prot;
+ orig_field = field;
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ if (prot == NET_PROT_IPV4 &&
+ field == NH_FLD_IPV4_SRC_IP) {
+ prot = NET_PROT_IP;
+ field = NH_FLD_IP_SRC;
+ } else if (prot == NET_PROT_IPV4 &&
+ field == NH_FLD_IPV4_DST_IP) {
+ prot = NET_PROT_IP;
+ field = NH_FLD_IP_DST;
+ } else if (prot == NET_PROT_IPV6 &&
+ field == NH_FLD_IPV6_SRC_IP) {
+ prot = NET_PROT_IP;
+ field = NH_FLD_IP_SRC;
+ } else if (prot == NET_PROT_IPV6 &&
+ field == NH_FLD_IPV6_DST_IP) {
+ prot = NET_PROT_IP;
+ field = NH_FLD_IP_DST;
+ } else {
+ DPAA2_PMD_ERR("Inval P(%d)/F(%d) to extract ip address",
+ prot, field);
+ return -EINVAL;
}
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_VLAN, NH_FLD_VLAN_TCI);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_VLAN,
- NH_FLD_VLAN_TCI,
- sizeof(rte_be16_t));
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ if (dist_type == DPAA2_FLOW_QOS_TYPE) {
+ key_extract = &priv->extract.qos_key_extract;
+ key_profile = &key_extract->key_profile;
+ dpkg = &key_extract->dpkg;
+ num = key_profile->num;
+ key_addr = flow->qos_key_addr;
+ mask_addr = flow->qos_mask_addr;
+ } else {
+ key_extract = &priv->extract.tc_key_extract[tc_id];
+ key_profile = &key_extract->key_profile;
+ dpkg = &key_extract->dpkg;
+ num = key_profile->num;
+ key_addr = flow->fs_key_addr;
+ mask_addr = flow->fs_mask_addr;
}
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before VLAN TCI rule set failed");
- return -1;
+ if (num >= DPKG_MAX_NUM_OF_EXTRACTS) {
+ DPAA2_PMD_ERR("Number of extracts overflows");
+ return -EINVAL;
}
- ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_VLAN,
- NH_FLD_VLAN_TCI,
- &spec->hdr.vlan_tci,
- &mask->hdr.vlan_tci,
- sizeof(rte_be16_t));
- if (ret) {
- DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
- return -1;
+ if (key_profile->ip_addr_type == IP_NONE_ADDR_EXTRACT) {
+ if (field == NH_FLD_IP_SRC)
+ key_profile->ip_addr_type = IP_SRC_EXTRACT;
+ else
+ key_profile->ip_addr_type = IP_DST_EXTRACT;
+ ipaddr_extract_len = size;
+
+ key_profile->ip_addr_extract_pos = num;
+ if (num > 0) {
+ key_profile->ip_addr_extract_off =
+ key_profile->key_offset[num - 1] +
+ key_profile->key_size[num - 1];
+ } else {
+ key_profile->ip_addr_extract_off = 0;
+ }
+ key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
+ } else if (key_profile->ip_addr_type == IP_SRC_EXTRACT) {
+ if (field == NH_FLD_IP_SRC) {
+ ipaddr_extract_len = size;
+ goto rule_configure;
+ }
+ key_profile->ip_addr_type = IP_SRC_DST_EXTRACT;
+ ipaddr_extract_len = size * 2;
+ key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
+ } else if (key_profile->ip_addr_type == IP_DST_EXTRACT) {
+ if (field == NH_FLD_IP_DST) {
+ ipaddr_extract_len = size;
+ goto rule_configure;
+ }
+ key_profile->ip_addr_type = IP_DST_SRC_EXTRACT;
+ ipaddr_extract_len = size * 2;
+ key_profile->key_max_size += NH_FLD_IPV6_ADDR_SIZE;
+ }
+ key_profile->num++;
+
+ dpkg->extracts[num].extract.from_hdr.prot = prot;
+ dpkg->extracts[num].extract.from_hdr.field = field;
+ dpkg->extracts[num].extract.from_hdr.type = DPKG_FULL_FIELD;
+ dpkg->num_extracts++;
+
+ if (dist_type == DPAA2_FLOW_QOS_TYPE)
+ local_cfg = DPAA2_FLOW_QOS_TYPE;
+ else
+ local_cfg = DPAA2_FLOW_FS_TYPE;
+
+rule_configure:
+ key_addr += key_profile->ip_addr_extract_off;
+ ip_addr_data = (union ip_addr_extract_rule *)key_addr;
+ mask_addr += key_profile->ip_addr_extract_off;
+ ip_addr_mask = (union ip_addr_extract_rule *)mask_addr;
+
+ if (orig_prot == NET_PROT_IPV4 &&
+ orig_field == NH_FLD_IPV4_SRC_IP) {
+ if (key_profile->ip_addr_type == IP_SRC_EXTRACT ||
+ key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) {
+ memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_src,
+ key, size);
+ memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_src,
+ mask, size);
+ } else {
+ memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_src,
+ key, size);
+ memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_src,
+ mask, size);
+ }
+ } else if (orig_prot == NET_PROT_IPV4 &&
+ orig_field == NH_FLD_IPV4_DST_IP) {
+ if (key_profile->ip_addr_type == IP_DST_EXTRACT ||
+ key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) {
+ memcpy(&ip_addr_data->ipv4_ds_addr.ipv4_dst,
+ key, size);
+ memcpy(&ip_addr_mask->ipv4_ds_addr.ipv4_dst,
+ mask, size);
+ } else {
+ memcpy(&ip_addr_data->ipv4_sd_addr.ipv4_dst,
+ key, size);
+ memcpy(&ip_addr_mask->ipv4_sd_addr.ipv4_dst,
+ mask, size);
+ }
+ } else if (orig_prot == NET_PROT_IPV6 &&
+ orig_field == NH_FLD_IPV6_SRC_IP) {
+ if (key_profile->ip_addr_type == IP_SRC_EXTRACT ||
+ key_profile->ip_addr_type == IP_SRC_DST_EXTRACT) {
+ memcpy(ip_addr_data->ipv6_sd_addr.ipv6_src,
+ key, size);
+ memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_src,
+ mask, size);
+ } else {
+ memcpy(ip_addr_data->ipv6_ds_addr.ipv6_src,
+ key, size);
+ memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_src,
+ mask, size);
+ }
+ } else if (orig_prot == NET_PROT_IPV6 &&
+ orig_field == NH_FLD_IPV6_DST_IP) {
+ if (key_profile->ip_addr_type == IP_DST_EXTRACT ||
+ key_profile->ip_addr_type == IP_DST_SRC_EXTRACT) {
+ memcpy(ip_addr_data->ipv6_ds_addr.ipv6_dst,
+ key, size);
+ memcpy(ip_addr_mask->ipv6_ds_addr.ipv6_dst,
+ mask, size);
+ } else {
+ memcpy(ip_addr_data->ipv6_sd_addr.ipv6_dst,
+ key, size);
+ memcpy(ip_addr_mask->ipv6_sd_addr.ipv6_dst,
+ mask, size);
+ }
}
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_VLAN,
- NH_FLD_VLAN_TCI,
- &spec->hdr.vlan_tci,
- &mask->hdr.vlan_tci,
- sizeof(rte_be16_t));
- if (ret) {
- DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
- return -1;
+ if (dist_type == DPAA2_FLOW_QOS_TYPE) {
+ flow->qos_rule_size =
+ key_profile->ip_addr_extract_off + ipaddr_extract_len;
+ } else {
+ flow->fs_rule_size =
+ key_profile->ip_addr_extract_off + ipaddr_extract_len;
}
- (*device_configured) |= local_cfg;
+ if (recfg)
+ *recfg |= local_cfg;
return 0;
}
static int
-dpaa2_configure_flow_ip_discrimation(
- struct dpaa2_dev_priv *priv, struct rte_flow *flow,
+dpaa2_configure_flow_eth(struct dpaa2_dev_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
- int *local_cfg, int *device_configured,
- uint32_t group)
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
{
- int index, ret;
- struct proto_discrimination proto;
+ int ret, local_cfg = 0;
+ uint32_t group;
+ const struct rte_flow_item_eth *spec, *mask;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_ETH, NH_FLD_ETH_TYPE);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.qos_key_extract,
- RTE_FLOW_ITEM_TYPE_ETH);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS Extract ETH_TYPE to discriminate IP failed.");
- return -1;
- }
- (*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
+ group = attr->group;
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_ETH, NH_FLD_ETH_TYPE);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.tc_key_extract[group],
- RTE_FLOW_ITEM_TYPE_ETH);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS Extract ETH_TYPE to discriminate IP failed.");
- return -1;
- }
- (*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
+ /* Parse pattern list to get the matching parameters */
+ spec = pattern->spec;
+ mask = pattern->mask ?
+ pattern->mask : &dpaa2_flow_item_eth_mask;
+ if (!spec) {
+ DPAA2_PMD_WARN("No pattern spec for Eth flow");
+ return -EINVAL;
}
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before IP discrimination set failed");
- return -1;
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->tc_index = attr->priority;
+
+ if (dpaa2_flow_extract_support((const uint8_t *)mask,
+ RTE_FLOW_ITEM_TYPE_ETH)) {
+ DPAA2_PMD_WARN("Extract field(s) of ethernet failed");
+
+ return -EINVAL;
}
- proto.type = RTE_FLOW_ITEM_TYPE_ETH;
- if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
- proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- else
- proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
- if (ret) {
- DPAA2_PMD_ERR("IP discrimination rule set failed");
- return -1;
+ if (memcmp((const char *)&mask->src,
+ zero_cmp, RTE_ETHER_ADDR_LEN)) {
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
+ NH_FLD_ETH_SA, &spec->src.addr_bytes,
+ &mask->src.addr_bytes, RTE_ETHER_ADDR_LEN,
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
+
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
+ NH_FLD_ETH_SA, &spec->src.addr_bytes,
+ &mask->src.addr_bytes, RTE_ETHER_ADDR_LEN,
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
+ }
+
+ if (memcmp((const char *)&mask->dst,
+ zero_cmp, RTE_ETHER_ADDR_LEN)) {
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
+ NH_FLD_ETH_DA, &spec->dst.addr_bytes,
+ &mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN,
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
+
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
+ NH_FLD_ETH_DA, &spec->dst.addr_bytes,
+ &mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN,
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
+ }
+
+ if (memcmp((const char *)&mask->type,
+ zero_cmp, sizeof(rte_be16_t))) {
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
+ NH_FLD_ETH_TYPE, &spec->type,
+ &mask->type, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
+
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ETH,
+ NH_FLD_ETH_TYPE, &spec->type,
+ &mask->type, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
}
- (*device_configured) |= (*local_cfg);
+ (*device_configured) |= local_cfg;
return 0;
}
-
static int
-dpaa2_configure_flow_generic_ip(
- struct rte_flow *flow,
+dpaa2_configure_flow_vlan(struct dpaa2_dev_flow *flow,
struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
@@ -1413,419 +1513,338 @@ dpaa2_configure_flow_generic_ip(
struct rte_flow_error *error __rte_unused,
int *device_configured)
{
- int index, ret;
- int local_cfg = 0;
+ int ret, local_cfg = 0;
uint32_t group;
- const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
- *mask_ipv4 = 0;
- const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
- *mask_ipv6 = 0;
- const void *key, *mask;
- enum net_prot prot;
-
+ const struct rte_flow_item_vlan *spec, *mask;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
- int size;
group = attr->group;
/* Parse pattern list to get the matching parameters */
- if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
- spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
- mask_ipv4 = (const struct rte_flow_item_ipv4 *)
- (pattern->mask ? pattern->mask :
- &dpaa2_flow_item_ipv4_mask);
- } else {
- spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
- mask_ipv6 = (const struct rte_flow_item_ipv6 *)
- (pattern->mask ? pattern->mask :
- &dpaa2_flow_item_ipv6_mask);
- }
+ spec = pattern->spec;
+ mask = pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask;
/* Get traffic class index and flow id to be configured */
flow->tc_id = group;
flow->tc_index = attr->priority;
- ret = dpaa2_configure_flow_ip_discrimation(priv,
- flow, pattern, &local_cfg,
- device_configured, group);
- if (ret) {
- DPAA2_PMD_ERR("IP discrimination failed!");
- return -1;
+ if (!spec) {
+ struct prev_proto_field_id prev_proto;
+
+ prev_proto.prot = NET_PROT_ETH;
+ prev_proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
+ ret = dpaa2_flow_identify_by_prev_prot(priv, flow, &prev_proto,
+ DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
+ group, &local_cfg);
+ if (ret)
+ return ret;
+ (*device_configured) |= local_cfg;
+ return 0;
+ }
+
+ if (dpaa2_flow_extract_support((const uint8_t *)mask,
+ RTE_FLOW_ITEM_TYPE_VLAN)) {
+ DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
+ return -EINVAL;
}
- if (!spec_ipv4 && !spec_ipv6)
+ if (!mask->tci)
return 0;
- if (mask_ipv4) {
- if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
- RTE_FLOW_ITEM_TYPE_IPV4)) {
- DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN,
+ NH_FLD_VLAN_TCI, &spec->tci,
+ &mask->tci, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
- return -1;
- }
- }
-
- if (mask_ipv6) {
- if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
- RTE_FLOW_ITEM_TYPE_IPV6)) {
- DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
-
- return -1;
- }
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_VLAN,
+ NH_FLD_VLAN_TCI, &spec->tci,
+ &mask->tci, sizeof(rte_be16_t),
+ priv, group, &local_cfg,
+ DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
- if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
- mask_ipv4->hdr.dst_addr)) {
- flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
- } else if (mask_ipv6 &&
- (memcmp((const char *)mask_ipv6->hdr.src_addr,
- zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
- memcmp((const char *)mask_ipv6->hdr.dst_addr,
- zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
- flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
- }
-
- if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
- (mask_ipv6 &&
- memcmp((const char *)mask_ipv6->hdr.src_addr,
- zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_IP, NH_FLD_IP_SRC);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_IP,
- NH_FLD_IP_SRC,
- 0);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
+ (*device_configured) |= local_cfg;
+ return 0;
+}
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
+static int
+dpaa2_configure_flow_ipv4(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
+{
+ int ret, local_cfg = 0;
+ uint32_t group;
+ const struct rte_flow_item_ipv4 *spec_ipv4 = 0, *mask_ipv4 = 0;
+ const void *key, *mask;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ int size;
+ struct prev_proto_field_id prev_prot;
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_IP, NH_FLD_IP_SRC);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_IP,
- NH_FLD_IP_SRC,
- 0);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
+ group = attr->group;
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
+ /* Parse pattern list to get the matching parameters */
+ spec_ipv4 = pattern->spec;
+ mask_ipv4 = pattern->mask ?
+ pattern->mask : &dpaa2_flow_item_ipv4_mask;
- if (spec_ipv4)
- key = &spec_ipv4->hdr.src_addr;
- else
- key = &spec_ipv6->hdr.src_addr[0];
- if (mask_ipv4) {
- mask = &mask_ipv4->hdr.src_addr;
- size = NH_FLD_IPV4_ADDR_SIZE;
- prot = NET_PROT_IPV4;
- } else {
- mask = &mask_ipv6->hdr.src_addr[0];
- size = NH_FLD_IPV6_ADDR_SIZE;
- prot = NET_PROT_IPV6;
- }
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->tc_index = attr->priority;
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- prot, NH_FLD_IP_SRC,
- key, mask, size);
- if (ret) {
- DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
- return -1;
- }
+ prev_prot.prot = NET_PROT_ETH;
+ prev_prot.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- prot, NH_FLD_IP_SRC,
- key, mask, size);
- if (ret) {
- DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
- return -1;
- }
+ ret = dpaa2_flow_identify_by_prev_prot(priv, flow, &prev_prot,
+ DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE, group,
+ &local_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("IPv4 identification failed!");
+ return ret;
+ }
- flow->ipaddr_rule.qos_ipsrc_offset =
- dpaa2_flow_extract_key_offset(
- &priv->extract.qos_key_extract,
- prot, NH_FLD_IP_SRC);
- flow->ipaddr_rule.fs_ipsrc_offset =
- dpaa2_flow_extract_key_offset(
- &priv->extract.tc_key_extract[group],
- prot, NH_FLD_IP_SRC);
- }
-
- if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
- (mask_ipv6 &&
- memcmp((const char *)mask_ipv6->hdr.dst_addr,
- zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_IP, NH_FLD_IP_DST);
- if (index < 0) {
- if (mask_ipv4)
- size = NH_FLD_IPV4_ADDR_SIZE;
- else
- size = NH_FLD_IPV6_ADDR_SIZE;
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_IP,
- NH_FLD_IP_DST,
- size);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
+ if (!spec_ipv4)
+ return 0;
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
+ if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
+ RTE_FLOW_ITEM_TYPE_IPV4)) {
+ DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
+ return -EINVAL;
+ }
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_IP, NH_FLD_IP_DST);
- if (index < 0) {
- if (mask_ipv4)
- size = NH_FLD_IPV4_ADDR_SIZE;
- else
- size = NH_FLD_IPV6_ADDR_SIZE;
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_IP,
- NH_FLD_IP_DST,
- size);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
+ if (mask_ipv4->hdr.src_addr) {
+ key = &spec_ipv4->hdr.src_addr;
+ mask = &mask_ipv4->hdr.src_addr;
+ size = sizeof(rte_be32_t);
+
+ ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
+ NH_FLD_IPV4_SRC_IP,
+ key, mask, size, priv,
+ group, &local_cfg,
+ DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
+
+ ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
+ NH_FLD_IPV4_SRC_IP,
+ key, mask, size, priv,
+ group, &local_cfg,
+ DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
+ }
+
+ if (mask_ipv4->hdr.dst_addr) {
+ key = &spec_ipv4->hdr.dst_addr;
+ mask = &mask_ipv4->hdr.dst_addr;
+ size = sizeof(rte_be32_t);
+
+ ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
+ NH_FLD_IPV4_DST_IP,
+ key, mask, size, priv,
+ group, &local_cfg,
+ DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
+ ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV4,
+ NH_FLD_IPV4_DST_IP,
+ key, mask, size, priv,
+ group, &local_cfg,
+ DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
+ }
+
+ if (mask_ipv4->hdr.next_proto_id) {
+ key = &spec_ipv4->hdr.next_proto_id;
+ mask = &mask_ipv4->hdr.next_proto_id;
+ size = sizeof(uint8_t);
+
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
+ NH_FLD_IP_PROTO, key,
+ mask, size, priv, group,
+ &local_cfg,
+ DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
+
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
+ NH_FLD_IP_PROTO, key,
+ mask, size, priv, group,
+ &local_cfg,
+ DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
+ }
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
+ (*device_configured) |= local_cfg;
+ return 0;
+}
- if (spec_ipv4)
- key = &spec_ipv4->hdr.dst_addr;
- else
- key = spec_ipv6->hdr.dst_addr;
- if (mask_ipv4) {
- mask = &mask_ipv4->hdr.dst_addr;
- size = NH_FLD_IPV4_ADDR_SIZE;
- prot = NET_PROT_IPV4;
- } else {
- mask = &mask_ipv6->hdr.dst_addr[0];
- size = NH_FLD_IPV6_ADDR_SIZE;
- prot = NET_PROT_IPV6;
- }
+static int
+dpaa2_configure_flow_ipv6(struct dpaa2_dev_flow *flow, struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
+{
+ int ret, local_cfg = 0;
+ uint32_t group;
+ const struct rte_flow_item_ipv6 *spec_ipv6 = 0, *mask_ipv6 = 0;
+ const void *key, *mask;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
+ int size;
+ struct prev_proto_field_id prev_prot;
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- prot, NH_FLD_IP_DST,
- key, mask, size);
- if (ret) {
- DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
- return -1;
- }
+ group = attr->group;
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- prot, NH_FLD_IP_DST,
- key, mask, size);
- if (ret) {
- DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
- return -1;
- }
- flow->ipaddr_rule.qos_ipdst_offset =
- dpaa2_flow_extract_key_offset(
- &priv->extract.qos_key_extract,
- prot, NH_FLD_IP_DST);
- flow->ipaddr_rule.fs_ipdst_offset =
- dpaa2_flow_extract_key_offset(
- &priv->extract.tc_key_extract[group],
- prot, NH_FLD_IP_DST);
- }
-
- if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
- (mask_ipv6 && mask_ipv6->hdr.proto)) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_IP,
- NH_FLD_IP_PROTO,
- NH_FLD_IP_PROTO_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
+ /* Parse pattern list to get the matching parameters */
+ spec_ipv6 = pattern->spec;
+ mask_ipv6 = pattern->mask ? pattern->mask : &dpaa2_flow_item_ipv6_mask;
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->tc_index = attr->priority;
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_IP,
- NH_FLD_IP_PROTO,
- NH_FLD_IP_PROTO_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
+ prev_prot.prot = NET_PROT_ETH;
+ prev_prot.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
+ ret = dpaa2_flow_identify_by_prev_prot(priv, flow, &prev_prot,
+ DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
+ group, &local_cfg);
+ if (ret) {
+ DPAA2_PMD_ERR("IPv6 identification failed!");
+ return ret;
+ }
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr after NH_FLD_IP_PROTO rule set failed");
- return -1;
- }
+ if (!spec_ipv6)
+ return 0;
- if (spec_ipv4)
- key = &spec_ipv4->hdr.next_proto_id;
- else
- key = &spec_ipv6->hdr.proto;
- if (mask_ipv4)
- mask = &mask_ipv4->hdr.next_proto_id;
- else
- mask = &mask_ipv6->hdr.proto;
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_IP,
- NH_FLD_IP_PROTO,
- key, mask, NH_FLD_IP_PROTO_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
- return -1;
- }
+ if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
+ RTE_FLOW_ITEM_TYPE_IPV6)) {
+ DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
+ return -EINVAL;
+ }
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_IP,
- NH_FLD_IP_PROTO,
- key, mask, NH_FLD_IP_PROTO_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
- return -1;
- }
+ if (memcmp(mask_ipv6->hdr.src_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) {
+ key = &spec_ipv6->hdr.src_addr[0];
+ mask = &mask_ipv6->hdr.src_addr[0];
+ size = NH_FLD_IPV6_ADDR_SIZE;
+
+ ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
+ NH_FLD_IPV6_SRC_IP,
+ key, mask, size, priv,
+ group, &local_cfg,
+ DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
+
+ ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
+ NH_FLD_IPV6_SRC_IP,
+ key, mask, size, priv,
+ group, &local_cfg,
+ DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
+ }
+
+ if (memcmp(mask_ipv6->hdr.dst_addr, zero_cmp, NH_FLD_IPV6_ADDR_SIZE)) {
+ key = &spec_ipv6->hdr.dst_addr[0];
+ mask = &mask_ipv6->hdr.dst_addr[0];
+ size = NH_FLD_IPV6_ADDR_SIZE;
+
+ ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
+ NH_FLD_IPV6_DST_IP,
+ key, mask, size, priv,
+ group, &local_cfg,
+ DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
+
+ ret = dpaa2_flow_add_ipaddr_extract_rule(flow, NET_PROT_IPV6,
+ NH_FLD_IPV6_DST_IP,
+ key, mask, size, priv,
+ group, &local_cfg,
+ DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
+ }
+
+ if (mask_ipv6->hdr.proto) {
+ key = &spec_ipv6->hdr.proto;
+ mask = &mask_ipv6->hdr.proto;
+ size = sizeof(uint8_t);
+
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
+ NH_FLD_IP_PROTO, key,
+ mask, size, priv, group,
+ &local_cfg,
+ DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
+
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_IP,
+ NH_FLD_IP_PROTO, key,
+ mask, size, priv, group,
+ &local_cfg,
+ DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
}
(*device_configured) |= local_cfg;
-
return 0;
}
static int
-dpaa2_configure_flow_icmp(struct rte_flow *flow,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action actions[] __rte_unused,
- struct rte_flow_error *error __rte_unused,
- int *device_configured)
+dpaa2_configure_flow_icmp(struct dpaa2_dev_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
{
- int index, ret;
- int local_cfg = 0;
+ int ret, local_cfg = 0;
uint32_t group;
const struct rte_flow_item_icmp *spec, *mask;
-
- const struct rte_flow_item_icmp *last __rte_unused;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
group = attr->group;
/* Parse pattern list to get the matching parameters */
- spec = (const struct rte_flow_item_icmp *)pattern->spec;
- last = (const struct rte_flow_item_icmp *)pattern->last;
- mask = (const struct rte_flow_item_icmp *)
- (pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
+ spec = pattern->spec;
+ mask = pattern->mask ?
+ pattern->mask : &dpaa2_flow_item_icmp_mask;
/* Get traffic class index and flow id to be configured */
flow->tc_id = group;
flow->tc_index = attr->priority;
if (!spec) {
- /* Don't care any field of ICMP header,
- * only care ICMP protocol.
- * Example: flow create 0 ingress pattern icmp /
- */
/* Next proto of Generical IP is actually used
* for ICMP identification.
+ * Example: flow create 0 ingress pattern icmp
*/
- struct proto_discrimination proto;
-
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.qos_key_extract,
- DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS Extract IP protocol to discriminate ICMP failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.tc_key_extract[group],
- DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS Extract IP protocol to discriminate ICMP failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move IP addr before ICMP discrimination set failed");
- return -1;
- }
+ struct prev_proto_field_id prev_proto;
- proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
- proto.ip_proto = IPPROTO_ICMP;
- ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
- proto, group);
- if (ret) {
- DPAA2_PMD_ERR("ICMP discrimination rule set failed");
- return -1;
- }
+ prev_proto.prot = NET_PROT_IP;
+ prev_proto.ip_proto = IPPROTO_ICMP;
+ ret = dpaa2_flow_identify_by_prev_prot(priv,
+ flow, &prev_proto,
+ DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
+ group, &local_cfg);
+ if (ret)
+ return ret;
(*device_configured) |= local_cfg;
-
return 0;
}
@@ -1833,145 +1852,39 @@ dpaa2_configure_flow_icmp(struct rte_flow *flow,
RTE_FLOW_ITEM_TYPE_ICMP)) {
DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
- return -1;
+ return -EINVAL;
}
if (mask->hdr.icmp_type) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_ICMP,
- NH_FLD_ICMP_TYPE,
- NH_FLD_ICMP_TYPE_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_ICMP,
- NH_FLD_ICMP_TYPE,
- NH_FLD_ICMP_TYPE_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before ICMP TYPE set failed");
- return -1;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
+ NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type,
+ &mask->hdr.icmp_type, sizeof(uint8_t),
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_ICMP,
- NH_FLD_ICMP_TYPE,
- &spec->hdr.icmp_type,
- &mask->hdr.icmp_type,
- NH_FLD_ICMP_TYPE_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_ICMP,
- NH_FLD_ICMP_TYPE,
- &spec->hdr.icmp_type,
- &mask->hdr.icmp_type,
- NH_FLD_ICMP_TYPE_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
- return -1;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
+ NH_FLD_ICMP_TYPE, &spec->hdr.icmp_type,
+ &mask->hdr.icmp_type, sizeof(uint8_t),
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
}
if (mask->hdr.icmp_code) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_ICMP, NH_FLD_ICMP_CODE);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_ICMP,
- NH_FLD_ICMP_CODE,
- NH_FLD_ICMP_CODE_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_ICMP, NH_FLD_ICMP_CODE);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_ICMP,
- NH_FLD_ICMP_CODE,
- NH_FLD_ICMP_CODE_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
+ NH_FLD_ICMP_CODE, &spec->hdr.icmp_code,
+ &mask->hdr.icmp_code, sizeof(uint8_t),
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr after ICMP CODE set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_ICMP,
- NH_FLD_ICMP_CODE,
- &spec->hdr.icmp_code,
- &mask->hdr.icmp_code,
- NH_FLD_ICMP_CODE_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_ICMP,
- NH_FLD_ICMP_CODE,
- &spec->hdr.icmp_code,
- &mask->hdr.icmp_code,
- NH_FLD_ICMP_CODE_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
- return -1;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_ICMP,
+ NH_FLD_ICMP_CODE, &spec->hdr.icmp_code,
+ &mask->hdr.icmp_code, sizeof(uint8_t),
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
}
(*device_configured) |= local_cfg;
@@ -1980,84 +1893,41 @@ dpaa2_configure_flow_icmp(struct rte_flow *flow,
}
static int
-dpaa2_configure_flow_udp(struct rte_flow *flow,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action actions[] __rte_unused,
- struct rte_flow_error *error __rte_unused,
- int *device_configured)
+dpaa2_configure_flow_udp(struct dpaa2_dev_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
{
- int index, ret;
- int local_cfg = 0;
+ int ret, local_cfg = 0;
uint32_t group;
const struct rte_flow_item_udp *spec, *mask;
-
- const struct rte_flow_item_udp *last __rte_unused;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
group = attr->group;
/* Parse pattern list to get the matching parameters */
- spec = (const struct rte_flow_item_udp *)pattern->spec;
- last = (const struct rte_flow_item_udp *)pattern->last;
- mask = (const struct rte_flow_item_udp *)
- (pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
+ spec = pattern->spec;
+ mask = pattern->mask ?
+ pattern->mask : &dpaa2_flow_item_udp_mask;
/* Get traffic class index and flow id to be configured */
flow->tc_id = group;
flow->tc_index = attr->priority;
if (!spec || !mc_l4_port_identification) {
- struct proto_discrimination proto;
-
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.qos_key_extract,
- DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS Extract IP protocol to discriminate UDP failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.tc_key_extract[group],
- DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS Extract IP protocol to discriminate UDP failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move IP addr before UDP discrimination set failed");
- return -1;
- }
+ struct prev_proto_field_id prev_proto;
- proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
- proto.ip_proto = IPPROTO_UDP;
- ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
- proto, group);
- if (ret) {
- DPAA2_PMD_ERR("UDP discrimination rule set failed");
- return -1;
- }
+ prev_proto.prot = NET_PROT_IP;
+ prev_proto.ip_proto = IPPROTO_UDP;
+ ret = dpaa2_flow_identify_by_prev_prot(priv,
+ flow, &prev_proto,
+ DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
+ group, &local_cfg);
+ if (ret)
+ return ret;
(*device_configured) |= local_cfg;
@@ -2069,149 +1939,40 @@ dpaa2_configure_flow_udp(struct rte_flow *flow,
RTE_FLOW_ITEM_TYPE_UDP)) {
DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
- return -1;
+ return -EINVAL;
}
if (mask->hdr.src_port) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_UDP,
- NH_FLD_UDP_PORT_SRC,
- NH_FLD_UDP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
+ NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port,
+ &mask->hdr.src_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
+ NH_FLD_UDP_PORT_SRC, &spec->hdr.src_port,
+ &mask->hdr.src_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
+ }
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_UDP,
- NH_FLD_UDP_PORT_SRC,
- NH_FLD_UDP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
+ if (mask->hdr.dst_port) {
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
+ NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port,
+ &mask->hdr.dst_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before UDP_PORT_SRC set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_UDP,
- NH_FLD_UDP_PORT_SRC,
- &spec->hdr.src_port,
- &mask->hdr.src_port,
- NH_FLD_UDP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS NH_FLD_UDP_PORT_SRC rule data set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_UDP,
- NH_FLD_UDP_PORT_SRC,
- &spec->hdr.src_port,
- &mask->hdr.src_port,
- NH_FLD_UDP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS NH_FLD_UDP_PORT_SRC rule data set failed");
- return -1;
- }
- }
-
- if (mask->hdr.dst_port) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_UDP,
- NH_FLD_UDP_PORT_DST,
- NH_FLD_UDP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_UDP,
- NH_FLD_UDP_PORT_DST,
- NH_FLD_UDP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before UDP_PORT_DST set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_UDP,
- NH_FLD_UDP_PORT_DST,
- &spec->hdr.dst_port,
- &mask->hdr.dst_port,
- NH_FLD_UDP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS NH_FLD_UDP_PORT_DST rule data set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_UDP,
- NH_FLD_UDP_PORT_DST,
- &spec->hdr.dst_port,
- &mask->hdr.dst_port,
- NH_FLD_UDP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS NH_FLD_UDP_PORT_DST rule data set failed");
- return -1;
- }
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_UDP,
+ NH_FLD_UDP_PORT_DST, &spec->hdr.dst_port,
+ &mask->hdr.dst_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
+ }
(*device_configured) |= local_cfg;
@@ -2219,84 +1980,41 @@ dpaa2_configure_flow_udp(struct rte_flow *flow,
}
static int
-dpaa2_configure_flow_tcp(struct rte_flow *flow,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action actions[] __rte_unused,
- struct rte_flow_error *error __rte_unused,
- int *device_configured)
+dpaa2_configure_flow_tcp(struct dpaa2_dev_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
{
- int index, ret;
- int local_cfg = 0;
+ int ret, local_cfg = 0;
uint32_t group;
const struct rte_flow_item_tcp *spec, *mask;
-
- const struct rte_flow_item_tcp *last __rte_unused;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
group = attr->group;
/* Parse pattern list to get the matching parameters */
- spec = (const struct rte_flow_item_tcp *)pattern->spec;
- last = (const struct rte_flow_item_tcp *)pattern->last;
- mask = (const struct rte_flow_item_tcp *)
- (pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
+ spec = pattern->spec;
+ mask = pattern->mask ?
+ pattern->mask : &dpaa2_flow_item_tcp_mask;
/* Get traffic class index and flow id to be configured */
flow->tc_id = group;
flow->tc_index = attr->priority;
if (!spec || !mc_l4_port_identification) {
- struct proto_discrimination proto;
+ struct prev_proto_field_id prev_proto;
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.qos_key_extract,
- DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS Extract IP protocol to discriminate TCP failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.tc_key_extract[group],
- DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS Extract IP protocol to discriminate TCP failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move IP addr before TCP discrimination set failed");
- return -1;
- }
-
- proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
- proto.ip_proto = IPPROTO_TCP;
- ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
- proto, group);
- if (ret) {
- DPAA2_PMD_ERR("TCP discrimination rule set failed");
- return -1;
- }
+ prev_proto.prot = NET_PROT_IP;
+ prev_proto.ip_proto = IPPROTO_TCP;
+ ret = dpaa2_flow_identify_by_prev_prot(priv,
+ flow, &prev_proto,
+ DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
+ group, &local_cfg);
+ if (ret)
+ return ret;
(*device_configured) |= local_cfg;
@@ -2308,149 +2026,39 @@ dpaa2_configure_flow_tcp(struct rte_flow *flow,
RTE_FLOW_ITEM_TYPE_TCP)) {
DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
- return -1;
+ return -EINVAL;
}
if (mask->hdr.src_port) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_TCP,
- NH_FLD_TCP_PORT_SRC,
- NH_FLD_TCP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_TCP,
- NH_FLD_TCP_PORT_SRC,
- NH_FLD_TCP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
+ NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port,
+ &mask->hdr.src_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before TCP_PORT_SRC set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_TCP,
- NH_FLD_TCP_PORT_SRC,
- &spec->hdr.src_port,
- &mask->hdr.src_port,
- NH_FLD_TCP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS NH_FLD_TCP_PORT_SRC rule data set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_TCP,
- NH_FLD_TCP_PORT_SRC,
- &spec->hdr.src_port,
- &mask->hdr.src_port,
- NH_FLD_TCP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS NH_FLD_TCP_PORT_SRC rule data set failed");
- return -1;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
+ NH_FLD_TCP_PORT_SRC, &spec->hdr.src_port,
+ &mask->hdr.src_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
}
if (mask->hdr.dst_port) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_TCP,
- NH_FLD_TCP_PORT_DST,
- NH_FLD_TCP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_TCP,
- NH_FLD_TCP_PORT_DST,
- NH_FLD_TCP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before TCP_PORT_DST set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_TCP,
- NH_FLD_TCP_PORT_DST,
- &spec->hdr.dst_port,
- &mask->hdr.dst_port,
- NH_FLD_TCP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS NH_FLD_TCP_PORT_DST rule data set failed");
- return -1;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
+ NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port,
+ &mask->hdr.dst_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_TCP,
- NH_FLD_TCP_PORT_DST,
- &spec->hdr.dst_port,
- &mask->hdr.dst_port,
- NH_FLD_TCP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS NH_FLD_TCP_PORT_DST rule data set failed");
- return -1;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_TCP,
+ NH_FLD_TCP_PORT_DST, &spec->hdr.dst_port,
+ &mask->hdr.dst_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
}
(*device_configured) |= local_cfg;
@@ -2459,85 +2067,41 @@ dpaa2_configure_flow_tcp(struct rte_flow *flow,
}
static int
-dpaa2_configure_flow_sctp(struct rte_flow *flow,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action actions[] __rte_unused,
- struct rte_flow_error *error __rte_unused,
- int *device_configured)
+dpaa2_configure_flow_sctp(struct dpaa2_dev_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
{
- int index, ret;
- int local_cfg = 0;
+ int ret, local_cfg = 0;
uint32_t group;
const struct rte_flow_item_sctp *spec, *mask;
-
- const struct rte_flow_item_sctp *last __rte_unused;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
group = attr->group;
/* Parse pattern list to get the matching parameters */
- spec = (const struct rte_flow_item_sctp *)pattern->spec;
- last = (const struct rte_flow_item_sctp *)pattern->last;
- mask = (const struct rte_flow_item_sctp *)
- (pattern->mask ? pattern->mask :
- &dpaa2_flow_item_sctp_mask);
+ spec = pattern->spec;
+ mask = pattern->mask ?
+ pattern->mask : &dpaa2_flow_item_sctp_mask;
/* Get traffic class index and flow id to be configured */
flow->tc_id = group;
flow->tc_index = attr->priority;
if (!spec || !mc_l4_port_identification) {
- struct proto_discrimination proto;
+ struct prev_proto_field_id prev_proto;
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.qos_key_extract,
- DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS Extract IP protocol to discriminate SCTP failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.tc_key_extract[group],
- DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS Extract IP protocol to discriminate SCTP failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before SCTP discrimination set failed");
- return -1;
- }
-
- proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
- proto.ip_proto = IPPROTO_SCTP;
- ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
- proto, group);
- if (ret) {
- DPAA2_PMD_ERR("SCTP discrimination rule set failed");
- return -1;
- }
+ prev_proto.prot = NET_PROT_IP;
+ prev_proto.ip_proto = IPPROTO_SCTP;
+ ret = dpaa2_flow_identify_by_prev_prot(priv,
+ flow, &prev_proto,
+ DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
+ group, &local_cfg);
+ if (ret)
+ return ret;
(*device_configured) |= local_cfg;
@@ -2553,145 +2117,35 @@ dpaa2_configure_flow_sctp(struct rte_flow *flow,
}
if (mask->hdr.src_port) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_SCTP,
- NH_FLD_SCTP_PORT_SRC,
- NH_FLD_SCTP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_SCTP,
- NH_FLD_SCTP_PORT_SRC,
- NH_FLD_SCTP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
+ NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port,
+ &mask->hdr.src_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before SCTP_PORT_SRC set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_SCTP,
- NH_FLD_SCTP_PORT_SRC,
- &spec->hdr.src_port,
- &mask->hdr.src_port,
- NH_FLD_SCTP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_SCTP,
- NH_FLD_SCTP_PORT_SRC,
- &spec->hdr.src_port,
- &mask->hdr.src_port,
- NH_FLD_SCTP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS NH_FLD_SCTP_PORT_SRC rule data set failed");
- return -1;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
+ NH_FLD_SCTP_PORT_SRC, &spec->hdr.src_port,
+ &mask->hdr.src_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
}
if (mask->hdr.dst_port) {
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_SCTP,
- NH_FLD_SCTP_PORT_DST,
- NH_FLD_SCTP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_SCTP,
- NH_FLD_SCTP_PORT_DST,
- NH_FLD_SCTP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before SCTP_PORT_DST set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_SCTP,
- NH_FLD_SCTP_PORT_DST,
- &spec->hdr.dst_port,
- &mask->hdr.dst_port,
- NH_FLD_SCTP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS NH_FLD_SCTP_PORT_DST rule data set failed");
- return -1;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
+ NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port,
+ &mask->hdr.dst_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_SCTP,
- NH_FLD_SCTP_PORT_DST,
- &spec->hdr.dst_port,
- &mask->hdr.dst_port,
- NH_FLD_SCTP_PORT_SIZE);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS NH_FLD_SCTP_PORT_DST rule data set failed");
- return -1;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_SCTP,
+ NH_FLD_SCTP_PORT_DST, &spec->hdr.dst_port,
+ &mask->hdr.dst_port, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
}
(*device_configured) |= local_cfg;
@@ -2700,88 +2154,46 @@ dpaa2_configure_flow_sctp(struct rte_flow *flow,
}
static int
-dpaa2_configure_flow_gre(struct rte_flow *flow,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action actions[] __rte_unused,
- struct rte_flow_error *error __rte_unused,
- int *device_configured)
+dpaa2_configure_flow_gre(struct dpaa2_dev_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
{
- int index, ret;
- int local_cfg = 0;
+ int ret, local_cfg = 0;
uint32_t group;
const struct rte_flow_item_gre *spec, *mask;
-
- const struct rte_flow_item_gre *last __rte_unused;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
group = attr->group;
/* Parse pattern list to get the matching parameters */
- spec = (const struct rte_flow_item_gre *)pattern->spec;
- last = (const struct rte_flow_item_gre *)pattern->last;
- mask = (const struct rte_flow_item_gre *)
- (pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
+ spec = pattern->spec;
+ mask = pattern->mask ?
+ pattern->mask : &dpaa2_flow_item_gre_mask;
/* Get traffic class index and flow id to be configured */
flow->tc_id = group;
flow->tc_index = attr->priority;
if (!spec) {
- struct proto_discrimination proto;
+ struct prev_proto_field_id prev_proto;
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.qos_key_extract,
- DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS Extract IP protocol to discriminate GRE failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_IP, NH_FLD_IP_PROTO);
- if (index < 0) {
- ret = dpaa2_flow_proto_discrimination_extract(
- &priv->extract.tc_key_extract[group],
- DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
- if (ret) {
- DPAA2_PMD_ERR(
- "FS Extract IP protocol to discriminate GRE failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move IP addr before GRE discrimination set failed");
- return -1;
- }
-
- proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
- proto.ip_proto = IPPROTO_GRE;
- ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
- proto, group);
- if (ret) {
- DPAA2_PMD_ERR("GRE discrimination rule set failed");
- return -1;
- }
+ prev_proto.prot = NET_PROT_IP;
+ prev_proto.ip_proto = IPPROTO_GRE;
+ ret = dpaa2_flow_identify_by_prev_prot(priv,
+ flow, &prev_proto,
+ DPAA2_FLOW_QOS_TYPE | DPAA2_FLOW_FS_TYPE,
+ group, &local_cfg);
+ if (ret)
+ return ret;
(*device_configured) |= local_cfg;
- return 0;
+ if (!spec)
+ return 0;
}
if (dpaa2_flow_extract_support((const uint8_t *)mask,
@@ -2794,74 +2206,19 @@ dpaa2_configure_flow_gre(struct rte_flow *flow,
if (!mask->protocol)
return 0;
- index = dpaa2_flow_extract_search(
- &priv->extract.qos_key_extract.dpkg,
- NET_PROT_GRE, NH_FLD_GRE_TYPE);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.qos_key_extract,
- NET_PROT_GRE,
- NH_FLD_GRE_TYPE,
- sizeof(rte_be16_t));
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
- }
-
- index = dpaa2_flow_extract_search(
- &priv->extract.tc_key_extract[group].dpkg,
- NET_PROT_GRE, NH_FLD_GRE_TYPE);
- if (index < 0) {
- ret = dpaa2_flow_extract_add(
- &priv->extract.tc_key_extract[group],
- NET_PROT_GRE,
- NH_FLD_GRE_TYPE,
- sizeof(rte_be16_t));
- if (ret) {
- DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
-
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
- if (ret) {
- DPAA2_PMD_ERR(
- "Move ipaddr before GRE_TYPE set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.qos_key_extract,
- &flow->qos_rule,
- NET_PROT_GRE,
- NH_FLD_GRE_TYPE,
- &spec->protocol,
- &mask->protocol,
- sizeof(rte_be16_t));
- if (ret) {
- DPAA2_PMD_ERR(
- "QoS NH_FLD_GRE_TYPE rule data set failed");
- return -1;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE,
+ NH_FLD_GRE_TYPE, &spec->protocol,
+ &mask->protocol, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_QOS_TYPE);
+ if (ret)
+ return ret;
- ret = dpaa2_flow_rule_data_set(
- &priv->extract.tc_key_extract[group],
- &flow->fs_rule,
- NET_PROT_GRE,
- NH_FLD_GRE_TYPE,
- &spec->protocol,
- &mask->protocol,
- sizeof(rte_be16_t));
- if (ret) {
- DPAA2_PMD_ERR(
- "FS NH_FLD_GRE_TYPE rule data set failed");
- return -1;
- }
+ ret = dpaa2_flow_add_hdr_extract_rule(flow, NET_PROT_GRE,
+ NH_FLD_GRE_TYPE, &spec->protocol,
+ &mask->protocol, sizeof(rte_be16_t),
+ priv, group, &local_cfg, DPAA2_FLOW_FS_TYPE);
+ if (ret)
+ return ret;
(*device_configured) |= local_cfg;
@@ -2869,404 +2226,109 @@ dpaa2_configure_flow_gre(struct rte_flow *flow,
}
static int
-dpaa2_configure_flow_raw(struct rte_flow *flow,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item *pattern,
- const struct rte_flow_action actions[] __rte_unused,
- struct rte_flow_error *error __rte_unused,
- int *device_configured)
+dpaa2_configure_flow_raw(struct dpaa2_dev_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
const struct rte_flow_item_raw *spec = pattern->spec;
const struct rte_flow_item_raw *mask = pattern->mask;
int prev_key_size =
- priv->extract.qos_key_extract.key_info.key_total_size;
+ priv->extract.qos_key_extract.key_profile.key_max_size;
int local_cfg = 0, ret;
uint32_t group;
/* Need both spec and mask */
if (!spec || !mask) {
- DPAA2_PMD_ERR("spec or mask not present.");
- return -EINVAL;
- }
- /* Only supports non-relative with offset 0 */
- if (spec->relative || spec->offset != 0 ||
- spec->search || spec->limit) {
- DPAA2_PMD_ERR("relative and non zero offset not supported.");
- return -EINVAL;
- }
- /* Spec len and mask len should be same */
- if (spec->length != mask->length) {
- DPAA2_PMD_ERR("Spec len and mask len mismatch.");
- return -EINVAL;
- }
-
- /* Get traffic class index and flow id to be configured */
- group = attr->group;
- flow->tc_id = group;
- flow->tc_index = attr->priority;
-
- if (prev_key_size <= spec->length) {
- ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
- spec->length);
- if (ret) {
- DPAA2_PMD_ERR("QoS Extract RAW add failed.");
- return -1;
- }
- local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
-
- ret = dpaa2_flow_extract_add_raw(
- &priv->extract.tc_key_extract[group],
- spec->length);
- if (ret) {
- DPAA2_PMD_ERR("FS Extract RAW add failed.");
- return -1;
- }
- local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
- }
-
- ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
- mask->pattern, spec->length);
- if (ret) {
- DPAA2_PMD_ERR("QoS RAW rule data set failed");
- return -1;
- }
-
- ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
- mask->pattern, spec->length);
- if (ret) {
- DPAA2_PMD_ERR("FS RAW rule data set failed");
- return -1;
- }
-
- (*device_configured) |= local_cfg;
-
- return 0;
-}
-
-static inline int
-dpaa2_fs_action_supported(enum rte_flow_action_type action)
-{
- int i;
-
- for (i = 0; i < (int)(sizeof(dpaa2_supported_fs_action_type) /
- sizeof(enum rte_flow_action_type)); i++) {
- if (action == dpaa2_supported_fs_action_type[i])
- return 1;
- }
-
- return 0;
-}
-/* The existing QoS/FS entry with IP address(es)
- * needs update after
- * new extract(s) are inserted before IP
- * address(es) extract(s).
- */
-static int
-dpaa2_flow_entry_update(
- struct dpaa2_dev_priv *priv, uint8_t tc_id)
-{
- struct rte_flow *curr = LIST_FIRST(&priv->flows);
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- int ret;
- int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
- int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
- struct dpaa2_key_extract *qos_key_extract =
- &priv->extract.qos_key_extract;
- struct dpaa2_key_extract *tc_key_extract =
- &priv->extract.tc_key_extract[tc_id];
- char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
- char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
- char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
- char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
- int extend = -1, extend1, size = -1;
- uint16_t qos_index;
-
- while (curr) {
- if (curr->ipaddr_rule.ipaddr_type ==
- FLOW_NONE_IPADDR) {
- curr = LIST_NEXT(curr, next);
- continue;
- }
-
- if (curr->ipaddr_rule.ipaddr_type ==
- FLOW_IPV4_ADDR) {
- qos_ipsrc_offset =
- qos_key_extract->key_info.ipv4_src_offset;
- qos_ipdst_offset =
- qos_key_extract->key_info.ipv4_dst_offset;
- fs_ipsrc_offset =
- tc_key_extract->key_info.ipv4_src_offset;
- fs_ipdst_offset =
- tc_key_extract->key_info.ipv4_dst_offset;
- size = NH_FLD_IPV4_ADDR_SIZE;
- } else {
- qos_ipsrc_offset =
- qos_key_extract->key_info.ipv6_src_offset;
- qos_ipdst_offset =
- qos_key_extract->key_info.ipv6_dst_offset;
- fs_ipsrc_offset =
- tc_key_extract->key_info.ipv6_src_offset;
- fs_ipdst_offset =
- tc_key_extract->key_info.ipv6_dst_offset;
- size = NH_FLD_IPV6_ADDR_SIZE;
- }
-
- qos_index = curr->tc_id * priv->fs_entries +
- curr->tc_index;
-
- dpaa2_flow_qos_entry_log("Before update", curr, qos_index, stdout);
-
- if (priv->num_rx_tc > 1) {
- ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
- priv->token, &curr->qos_rule);
- if (ret) {
- DPAA2_PMD_ERR("Qos entry remove failed.");
- return -1;
- }
- }
-
- extend = -1;
-
- if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
- RTE_ASSERT(qos_ipsrc_offset >=
- curr->ipaddr_rule.qos_ipsrc_offset);
- extend1 = qos_ipsrc_offset -
- curr->ipaddr_rule.qos_ipsrc_offset;
- if (extend >= 0)
- RTE_ASSERT(extend == extend1);
- else
- extend = extend1;
-
- RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
- (size == NH_FLD_IPV6_ADDR_SIZE));
-
- memcpy(ipsrc_key,
- (char *)(size_t)curr->qos_rule.key_iova +
- curr->ipaddr_rule.qos_ipsrc_offset,
- size);
- memset((char *)(size_t)curr->qos_rule.key_iova +
- curr->ipaddr_rule.qos_ipsrc_offset,
- 0, size);
-
- memcpy(ipsrc_mask,
- (char *)(size_t)curr->qos_rule.mask_iova +
- curr->ipaddr_rule.qos_ipsrc_offset,
- size);
- memset((char *)(size_t)curr->qos_rule.mask_iova +
- curr->ipaddr_rule.qos_ipsrc_offset,
- 0, size);
-
- curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
- }
-
- if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
- RTE_ASSERT(qos_ipdst_offset >=
- curr->ipaddr_rule.qos_ipdst_offset);
- extend1 = qos_ipdst_offset -
- curr->ipaddr_rule.qos_ipdst_offset;
- if (extend >= 0)
- RTE_ASSERT(extend == extend1);
- else
- extend = extend1;
-
- RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
- (size == NH_FLD_IPV6_ADDR_SIZE));
-
- memcpy(ipdst_key,
- (char *)(size_t)curr->qos_rule.key_iova +
- curr->ipaddr_rule.qos_ipdst_offset,
- size);
- memset((char *)(size_t)curr->qos_rule.key_iova +
- curr->ipaddr_rule.qos_ipdst_offset,
- 0, size);
-
- memcpy(ipdst_mask,
- (char *)(size_t)curr->qos_rule.mask_iova +
- curr->ipaddr_rule.qos_ipdst_offset,
- size);
- memset((char *)(size_t)curr->qos_rule.mask_iova +
- curr->ipaddr_rule.qos_ipdst_offset,
- 0, size);
-
- curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
- }
-
- if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
- RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
- (size == NH_FLD_IPV6_ADDR_SIZE));
- memcpy((char *)(size_t)curr->qos_rule.key_iova +
- curr->ipaddr_rule.qos_ipsrc_offset,
- ipsrc_key,
- size);
- memcpy((char *)(size_t)curr->qos_rule.mask_iova +
- curr->ipaddr_rule.qos_ipsrc_offset,
- ipsrc_mask,
- size);
- }
- if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
- RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
- (size == NH_FLD_IPV6_ADDR_SIZE));
- memcpy((char *)(size_t)curr->qos_rule.key_iova +
- curr->ipaddr_rule.qos_ipdst_offset,
- ipdst_key,
- size);
- memcpy((char *)(size_t)curr->qos_rule.mask_iova +
- curr->ipaddr_rule.qos_ipdst_offset,
- ipdst_mask,
- size);
- }
-
- if (extend >= 0)
- curr->qos_real_key_size += extend;
-
- curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
-
- dpaa2_flow_qos_entry_log("Start update", curr, qos_index, stdout);
-
- if (priv->num_rx_tc > 1) {
- ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
- priv->token, &curr->qos_rule,
- curr->tc_id, qos_index,
- 0, 0);
- if (ret) {
- DPAA2_PMD_ERR("Qos entry update failed.");
- return -1;
- }
- }
-
- if (!dpaa2_fs_action_supported(curr->action)) {
- curr = LIST_NEXT(curr, next);
- continue;
- }
+ DPAA2_PMD_ERR("spec or mask not present.");
+ return -EINVAL;
+ }
+ /* Only supports non-relative with offset 0 */
+ if (spec->relative || spec->offset != 0 ||
+ spec->search || spec->limit) {
+ DPAA2_PMD_ERR("relative and non zero offset not supported.");
+ return -EINVAL;
+ }
+ /* Spec len and mask len should be same */
+ if (spec->length != mask->length) {
+ DPAA2_PMD_ERR("Spec len and mask len mismatch.");
+ return -EINVAL;
+ }
- dpaa2_flow_fs_entry_log("Before update", curr, stdout);
- extend = -1;
+ /* Get traffic class index and flow id to be configured */
+ group = attr->group;
+ flow->tc_id = group;
+ flow->tc_index = attr->priority;
- ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
- priv->token, curr->tc_id, &curr->fs_rule);
+ if (prev_key_size <= spec->length) {
+ ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
+ spec->length);
if (ret) {
- DPAA2_PMD_ERR("FS entry remove failed.");
+ DPAA2_PMD_ERR("QoS Extract RAW add failed.");
return -1;
}
+ local_cfg |= DPAA2_FLOW_QOS_TYPE;
- if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
- tc_id == curr->tc_id) {
- RTE_ASSERT(fs_ipsrc_offset >=
- curr->ipaddr_rule.fs_ipsrc_offset);
- extend1 = fs_ipsrc_offset -
- curr->ipaddr_rule.fs_ipsrc_offset;
- if (extend >= 0)
- RTE_ASSERT(extend == extend1);
- else
- extend = extend1;
-
- memcpy(ipsrc_key,
- (char *)(size_t)curr->fs_rule.key_iova +
- curr->ipaddr_rule.fs_ipsrc_offset,
- size);
- memset((char *)(size_t)curr->fs_rule.key_iova +
- curr->ipaddr_rule.fs_ipsrc_offset,
- 0, size);
-
- memcpy(ipsrc_mask,
- (char *)(size_t)curr->fs_rule.mask_iova +
- curr->ipaddr_rule.fs_ipsrc_offset,
- size);
- memset((char *)(size_t)curr->fs_rule.mask_iova +
- curr->ipaddr_rule.fs_ipsrc_offset,
- 0, size);
-
- curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
+ ret = dpaa2_flow_extract_add_raw(&priv->extract.tc_key_extract[group],
+ spec->length);
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract RAW add failed.");
+ return -1;
}
+ local_cfg |= DPAA2_FLOW_FS_TYPE;
+ }
- if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
- tc_id == curr->tc_id) {
- RTE_ASSERT(fs_ipdst_offset >=
- curr->ipaddr_rule.fs_ipdst_offset);
- extend1 = fs_ipdst_offset -
- curr->ipaddr_rule.fs_ipdst_offset;
- if (extend >= 0)
- RTE_ASSERT(extend == extend1);
- else
- extend = extend1;
-
- memcpy(ipdst_key,
- (char *)(size_t)curr->fs_rule.key_iova +
- curr->ipaddr_rule.fs_ipdst_offset,
- size);
- memset((char *)(size_t)curr->fs_rule.key_iova +
- curr->ipaddr_rule.fs_ipdst_offset,
- 0, size);
-
- memcpy(ipdst_mask,
- (char *)(size_t)curr->fs_rule.mask_iova +
- curr->ipaddr_rule.fs_ipdst_offset,
- size);
- memset((char *)(size_t)curr->fs_rule.mask_iova +
- curr->ipaddr_rule.fs_ipdst_offset,
- 0, size);
-
- curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
- }
+ ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
+ mask->pattern, spec->length);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS RAW rule data set failed");
+ return -1;
+ }
- if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
- memcpy((char *)(size_t)curr->fs_rule.key_iova +
- curr->ipaddr_rule.fs_ipsrc_offset,
- ipsrc_key,
- size);
- memcpy((char *)(size_t)curr->fs_rule.mask_iova +
- curr->ipaddr_rule.fs_ipsrc_offset,
- ipsrc_mask,
- size);
- }
- if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
- memcpy((char *)(size_t)curr->fs_rule.key_iova +
- curr->ipaddr_rule.fs_ipdst_offset,
- ipdst_key,
- size);
- memcpy((char *)(size_t)curr->fs_rule.mask_iova +
- curr->ipaddr_rule.fs_ipdst_offset,
- ipdst_mask,
- size);
- }
+ ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
+ mask->pattern, spec->length);
+ if (ret) {
+ DPAA2_PMD_ERR("FS RAW rule data set failed");
+ return -1;
+ }
- if (extend >= 0)
- curr->fs_real_key_size += extend;
- curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
+ (*device_configured) |= local_cfg;
- dpaa2_flow_fs_entry_log("Start update", curr, stdout);
+ return 0;
+}
- ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
- priv->token, curr->tc_id, curr->tc_index,
- &curr->fs_rule, &curr->action_cfg);
- if (ret) {
- DPAA2_PMD_ERR("FS entry update failed.");
- return -1;
- }
+static inline int
+dpaa2_fs_action_supported(enum rte_flow_action_type action)
+{
+ int i;
+ int action_num = sizeof(dpaa2_supported_fs_action_type) /
+ sizeof(enum rte_flow_action_type);
- curr = LIST_NEXT(curr, next);
+ for (i = 0; i < action_num; i++) {
+ if (action == dpaa2_supported_fs_action_type[i])
+ return true;
}
- return 0;
+ return false;
}
static inline int
-dpaa2_flow_verify_attr(
- struct dpaa2_dev_priv *priv,
+dpaa2_flow_verify_attr(struct dpaa2_dev_priv *priv,
const struct rte_flow_attr *attr)
{
- struct rte_flow *curr = LIST_FIRST(&priv->flows);
+ struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
while (curr) {
if (curr->tc_id == attr->group &&
curr->tc_index == attr->priority) {
- DPAA2_PMD_ERR(
- "Flow with group %d and priority %d already exists.",
+ DPAA2_PMD_ERR("Flow(TC[%d].entry[%d] exists",
attr->group, attr->priority);
- return -1;
+ return -EINVAL;
}
curr = LIST_NEXT(curr, next);
}
@@ -3279,18 +2341,16 @@ dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
const struct rte_flow_action *action)
{
const struct rte_flow_action_port_id *port_id;
+ const struct rte_flow_action_ethdev *ethdev;
int idx = -1;
struct rte_eth_dev *dest_dev;
if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
- port_id = (const struct rte_flow_action_port_id *)
- action->conf;
+ port_id = action->conf;
if (!port_id->original)
idx = port_id->id;
} else if (action->type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
- const struct rte_flow_action_ethdev *ethdev;
-
- ethdev = (const struct rte_flow_action_ethdev *)action->conf;
+ ethdev = action->conf;
idx = ethdev->port_id;
} else {
return NULL;
@@ -3310,8 +2370,7 @@ dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
}
static inline int
-dpaa2_flow_verify_action(
- struct dpaa2_dev_priv *priv,
+dpaa2_flow_verify_action(struct dpaa2_dev_priv *priv,
const struct rte_flow_attr *attr,
const struct rte_flow_action actions[])
{
@@ -3323,15 +2382,14 @@ dpaa2_flow_verify_action(
while (!end_of_list) {
switch (actions[j].type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
- dest_queue = (const struct rte_flow_action_queue *)
- (actions[j].conf);
+ dest_queue = actions[j].conf;
rxq = priv->rx_vq[dest_queue->index];
if (attr->group != rxq->tc_index) {
- DPAA2_PMD_ERR(
- "RXQ[%d] does not belong to the group %d",
- dest_queue->index, attr->group);
+ DPAA2_PMD_ERR("FSQ(%d.%d) not in TC[%d]",
+ rxq->tc_index, rxq->flow_id,
+ attr->group);
- return -1;
+ return -ENOTSUP;
}
break;
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
@@ -3345,20 +2403,17 @@ dpaa2_flow_verify_action(
rss_conf = (const struct rte_flow_action_rss *)
(actions[j].conf);
if (rss_conf->queue_num > priv->dist_queues) {
- DPAA2_PMD_ERR(
- "RSS number exceeds the distribution size");
+ DPAA2_PMD_ERR("RSS number too large");
return -ENOTSUP;
}
for (i = 0; i < (int)rss_conf->queue_num; i++) {
if (rss_conf->queue[i] >= priv->nb_rx_queues) {
- DPAA2_PMD_ERR(
- "RSS queue index exceeds the number of RXQs");
+ DPAA2_PMD_ERR("RSS queue not in range");
return -ENOTSUP;
}
rxq = priv->rx_vq[rss_conf->queue[i]];
if (rxq->tc_index != attr->group) {
- DPAA2_PMD_ERR(
- "Queue/Group combination are not supported\n");
+ DPAA2_PMD_ERR("RSS queue not in group");
return -ENOTSUP;
}
}
@@ -3378,28 +2433,248 @@ dpaa2_flow_verify_action(
}
static int
-dpaa2_generic_flow_set(struct rte_flow *flow,
- struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+dpaa2_configure_flow_fs_action(struct dpaa2_dev_priv *priv,
+ struct dpaa2_dev_flow *flow,
+ const struct rte_flow_action *rte_action)
{
+ struct rte_eth_dev *dest_dev;
+ struct dpaa2_dev_priv *dest_priv;
const struct rte_flow_action_queue *dest_queue;
+ struct dpaa2_queue *dest_q;
+
+ memset(&flow->fs_action_cfg, 0,
+ sizeof(struct dpni_fs_action_cfg));
+ flow->action_type = rte_action->type;
+
+ if (flow->action_type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ dest_queue = rte_action->conf;
+ dest_q = priv->rx_vq[dest_queue->index];
+ flow->fs_action_cfg.flow_id = dest_q->flow_id;
+ } else if (flow->action_type == RTE_FLOW_ACTION_TYPE_PORT_ID ||
+ flow->action_type == RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT) {
+ dest_dev = dpaa2_flow_redirect_dev(priv, rte_action);
+ if (!dest_dev) {
+ DPAA2_PMD_ERR("Invalid device to redirect");
+ return -EINVAL;
+ }
+
+ dest_priv = dest_dev->data->dev_private;
+ dest_q = dest_priv->tx_vq[0];
+ flow->fs_action_cfg.options =
+ DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
+ flow->fs_action_cfg.redirect_obj_token =
+ dest_priv->token;
+ flow->fs_action_cfg.flow_id = dest_q->flow_id;
+ }
+
+ return 0;
+}
+
+static inline uint16_t
+dpaa2_flow_entry_size(uint16_t key_max_size)
+{
+ if (key_max_size > DPAA2_FLOW_ENTRY_MAX_SIZE) {
+ DPAA2_PMD_ERR("Key size(%d) > max(%d)",
+ key_max_size,
+ DPAA2_FLOW_ENTRY_MAX_SIZE);
+
+ return 0;
+ }
+
+ if (key_max_size > DPAA2_FLOW_ENTRY_MIN_SIZE)
+ return DPAA2_FLOW_ENTRY_MAX_SIZE;
+
+ /* Current MC only support fixed entry size(56)*/
+ return DPAA2_FLOW_ENTRY_MAX_SIZE;
+}
+
+static inline int
+dpaa2_flow_clear_fs_table(struct dpaa2_dev_priv *priv,
+ uint8_t tc_id)
+{
+ struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
+ int need_clear = 0, ret;
+ struct fsl_mc_io *dpni = priv->hw;
+
+ while (curr) {
+ if (curr->tc_id == tc_id) {
+ need_clear = 1;
+ break;
+ }
+ curr = LIST_NEXT(curr, next);
+ }
+
+ if (need_clear) {
+ ret = dpni_clear_fs_entries(dpni, CMD_PRI_LOW,
+ priv->token, tc_id);
+ if (ret) {
+ DPAA2_PMD_ERR("TC[%d] clear failed", tc_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_configure_fs_rss_table(struct dpaa2_dev_priv *priv,
+ uint8_t tc_id, uint16_t dist_size, int rss_dist)
+{
+ struct dpaa2_key_extract *tc_extract;
+ uint8_t *key_cfg_buf;
+ uint64_t key_cfg_iova;
+ int ret;
+ struct dpni_rx_dist_cfg tc_cfg;
+ struct fsl_mc_io *dpni = priv->hw;
+ uint16_t entry_size;
+ uint16_t key_max_size;
+
+ ret = dpaa2_flow_clear_fs_table(priv, tc_id);
+ if (ret < 0) {
+ DPAA2_PMD_ERR("TC[%d] clear failed", tc_id);
+ return ret;
+ }
+
+ tc_extract = &priv->extract.tc_key_extract[tc_id];
+ key_cfg_buf = priv->extract.tc_extract_param[tc_id];
+ key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf);
+
+ key_max_size = tc_extract->key_profile.key_max_size;
+ entry_size = dpaa2_flow_entry_size(key_max_size);
+
+ dpaa2_flow_fs_extracts_log(priv, tc_id);
+ ret = dpkg_prepare_key_cfg(&tc_extract->dpkg,
+ key_cfg_buf);
+ if (ret < 0) {
+ DPAA2_PMD_ERR("TC[%d] prepare key failed", tc_id);
+ return ret;
+ }
+
+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
+ tc_cfg.dist_size = dist_size;
+ tc_cfg.key_cfg_iova = key_cfg_iova;
+ if (rss_dist)
+ tc_cfg.enable = true;
+ else
+ tc_cfg.enable = false;
+ tc_cfg.tc = tc_id;
+ ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
+ priv->token, &tc_cfg);
+ if (ret < 0) {
+ if (rss_dist) {
+ DPAA2_PMD_ERR("RSS TC[%d] set failed",
+ tc_id);
+ } else {
+ DPAA2_PMD_ERR("FS TC[%d] hash disable failed",
+ tc_id);
+ }
+
+ return ret;
+ }
+
+ if (rss_dist)
+ return 0;
+
+ tc_cfg.enable = true;
+ tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
+ ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
+ priv->token, &tc_cfg);
+ if (ret < 0) {
+ DPAA2_PMD_ERR("TC[%d] FS configured failed", tc_id);
+ return ret;
+ }
+
+ ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_FS_TYPE,
+ entry_size, tc_id);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+dpaa2_configure_qos_table(struct dpaa2_dev_priv *priv,
+ int rss_dist)
+{
+ struct dpaa2_key_extract *qos_extract;
+ uint8_t *key_cfg_buf;
+ uint64_t key_cfg_iova;
+ int ret;
+ struct dpni_qos_tbl_cfg qos_cfg;
+ struct fsl_mc_io *dpni = priv->hw;
+ uint16_t entry_size;
+ uint16_t key_max_size;
+
+ if (!rss_dist && priv->num_rx_tc <= 1) {
+ /* QoS table is effecitive for FS multiple TCs or RSS.*/
+ return 0;
+ }
+
+ if (LIST_FIRST(&priv->flows)) {
+ ret = dpni_clear_qos_table(dpni, CMD_PRI_LOW,
+ priv->token);
+ if (ret < 0) {
+ DPAA2_PMD_ERR("QoS table clear failed");
+ return ret;
+ }
+ }
+
+ qos_extract = &priv->extract.qos_key_extract;
+ key_cfg_buf = priv->extract.qos_extract_param;
+ key_cfg_iova = DPAA2_VADDR_TO_IOVA(key_cfg_buf);
+
+ key_max_size = qos_extract->key_profile.key_max_size;
+ entry_size = dpaa2_flow_entry_size(key_max_size);
+
+ dpaa2_flow_qos_extracts_log(priv);
+
+ ret = dpkg_prepare_key_cfg(&qos_extract->dpkg,
+ key_cfg_buf);
+ if (ret < 0) {
+ DPAA2_PMD_ERR("QoS prepare extract failed");
+ return ret;
+ }
+ memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
+ qos_cfg.keep_entries = true;
+ qos_cfg.key_cfg_iova = key_cfg_iova;
+ if (rss_dist) {
+ qos_cfg.discard_on_miss = true;
+ } else {
+ qos_cfg.discard_on_miss = false;
+ qos_cfg.default_tc = 0;
+ }
+
+ ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
+ priv->token, &qos_cfg);
+ if (ret < 0) {
+ DPAA2_PMD_ERR("QoS table set failed");
+ return ret;
+ }
+
+ ret = dpaa2_flow_rule_add_all(priv, DPAA2_FLOW_QOS_TYPE,
+ entry_size, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+dpaa2_generic_flow_set(struct dpaa2_dev_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
const struct rte_flow_action_rss *rss_conf;
int is_keycfg_configured = 0, end_of_list = 0;
int ret = 0, i = 0, j = 0;
- struct dpni_rx_dist_cfg tc_cfg;
- struct dpni_qos_tbl_cfg qos_cfg;
- struct dpni_fs_action_cfg action;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct dpaa2_queue *dest_q;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- size_t param;
- struct rte_flow *curr = LIST_FIRST(&priv->flows);
- uint16_t qos_index;
- struct rte_eth_dev *dest_dev;
- struct dpaa2_dev_priv *dest_priv;
+ struct dpaa2_dev_flow *curr = LIST_FIRST(&priv->flows);
+ uint16_t dist_size, key_size;
+ struct dpaa2_key_extract *qos_key_extract;
+ struct dpaa2_key_extract *tc_key_extract;
ret = dpaa2_flow_verify_attr(priv, attr);
if (ret)
@@ -3417,7 +2692,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
dev, attr, &pattern[i], actions, error,
&is_keycfg_configured);
if (ret) {
- DPAA2_PMD_ERR("ETH flow configuration failed!");
+ DPAA2_PMD_ERR("ETH flow config failed!");
return ret;
}
break;
@@ -3426,17 +2701,25 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
dev, attr, &pattern[i], actions, error,
&is_keycfg_configured);
if (ret) {
- DPAA2_PMD_ERR("vLan flow configuration failed!");
+ DPAA2_PMD_ERR("vLan flow config failed!");
return ret;
}
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = dpaa2_configure_flow_ipv4(flow,
+ dev, attr, &pattern[i], actions, error,
+ &is_keycfg_configured);
+ if (ret) {
+ DPAA2_PMD_ERR("IPV4 flow config failed!");
+ return ret;
+ }
+ break;
case RTE_FLOW_ITEM_TYPE_IPV6:
- ret = dpaa2_configure_flow_generic_ip(flow,
+ ret = dpaa2_configure_flow_ipv6(flow,
dev, attr, &pattern[i], actions, error,
&is_keycfg_configured);
if (ret) {
- DPAA2_PMD_ERR("IP flow configuration failed!");
+ DPAA2_PMD_ERR("IPV6 flow config failed!");
return ret;
}
break;
@@ -3445,7 +2728,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
dev, attr, &pattern[i], actions, error,
&is_keycfg_configured);
if (ret) {
- DPAA2_PMD_ERR("ICMP flow configuration failed!");
+ DPAA2_PMD_ERR("ICMP flow config failed!");
return ret;
}
break;
@@ -3454,7 +2737,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
dev, attr, &pattern[i], actions, error,
&is_keycfg_configured);
if (ret) {
- DPAA2_PMD_ERR("UDP flow configuration failed!");
+ DPAA2_PMD_ERR("UDP flow config failed!");
return ret;
}
break;
@@ -3463,7 +2746,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
dev, attr, &pattern[i], actions, error,
&is_keycfg_configured);
if (ret) {
- DPAA2_PMD_ERR("TCP flow configuration failed!");
+ DPAA2_PMD_ERR("TCP flow config failed!");
return ret;
}
break;
@@ -3472,7 +2755,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
dev, attr, &pattern[i], actions, error,
&is_keycfg_configured);
if (ret) {
- DPAA2_PMD_ERR("SCTP flow configuration failed!");
+ DPAA2_PMD_ERR("SCTP flow config failed!");
return ret;
}
break;
@@ -3481,17 +2764,17 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
dev, attr, &pattern[i], actions, error,
&is_keycfg_configured);
if (ret) {
- DPAA2_PMD_ERR("GRE flow configuration failed!");
+ DPAA2_PMD_ERR("GRE flow config failed!");
return ret;
}
break;
case RTE_FLOW_ITEM_TYPE_RAW:
ret = dpaa2_configure_flow_raw(flow,
- dev, attr, &pattern[i],
- actions, error,
- &is_keycfg_configured);
+ dev, attr, &pattern[i],
+ actions, error,
+ &is_keycfg_configured);
if (ret) {
- DPAA2_PMD_ERR("RAW flow configuration failed!");
+ DPAA2_PMD_ERR("RAW flow config failed!");
return ret;
}
break;
@@ -3506,6 +2789,14 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
i++;
}
+ qos_key_extract = &priv->extract.qos_key_extract;
+ key_size = qos_key_extract->key_profile.key_max_size;
+ flow->qos_rule.key_size = dpaa2_flow_entry_size(key_size);
+
+ tc_key_extract = &priv->extract.tc_key_extract[flow->tc_id];
+ key_size = tc_key_extract->key_profile.key_max_size;
+ flow->fs_rule.key_size = dpaa2_flow_entry_size(key_size);
+
/* Let's parse action on matching traffic */
end_of_list = 0;
while (!end_of_list) {
@@ -3513,150 +2804,33 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
case RTE_FLOW_ACTION_TYPE_QUEUE:
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
case RTE_FLOW_ACTION_TYPE_PORT_ID:
- memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
- flow->action = actions[j].type;
-
- if (actions[j].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- dest_queue = (const struct rte_flow_action_queue *)
- (actions[j].conf);
- dest_q = priv->rx_vq[dest_queue->index];
- action.flow_id = dest_q->flow_id;
- } else {
- dest_dev = dpaa2_flow_redirect_dev(priv,
- &actions[j]);
- if (!dest_dev) {
- DPAA2_PMD_ERR("Invalid destination device to redirect!");
- return -1;
- }
-
- dest_priv = dest_dev->data->dev_private;
- dest_q = dest_priv->tx_vq[0];
- action.options =
- DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
- action.redirect_obj_token = dest_priv->token;
- action.flow_id = dest_q->flow_id;
- }
+ ret = dpaa2_configure_flow_fs_action(priv, flow,
+ &actions[j]);
+ if (ret)
+ return ret;
/* Configure FS table first*/
- if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
- dpaa2_flow_fs_table_extracts_log(priv,
- flow->tc_id, stdout);
- if (dpkg_prepare_key_cfg(
- &priv->extract.tc_key_extract[flow->tc_id].dpkg,
- (uint8_t *)(size_t)priv->extract
- .tc_extract_param[flow->tc_id]) < 0) {
- DPAA2_PMD_ERR(
- "Unable to prepare extract parameters");
- return -1;
- }
-
- memset(&tc_cfg, 0,
- sizeof(struct dpni_rx_dist_cfg));
- tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
- tc_cfg.key_cfg_iova =
- (uint64_t)priv->extract.tc_extract_param[flow->tc_id];
- tc_cfg.tc = flow->tc_id;
- tc_cfg.enable = false;
- ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
- priv->token, &tc_cfg);
- if (ret < 0) {
- DPAA2_PMD_ERR(
- "TC hash cannot be disabled.(%d)",
- ret);
- return -1;
- }
- tc_cfg.enable = true;
- tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
- ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
- priv->token, &tc_cfg);
- if (ret < 0) {
- DPAA2_PMD_ERR(
- "TC distribution cannot be configured.(%d)",
- ret);
- return -1;
- }
+ dist_size = priv->nb_rx_queues / priv->num_rx_tc;
+ if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) {
+ ret = dpaa2_configure_fs_rss_table(priv,
+ flow->tc_id,
+ dist_size,
+ false);
+ if (ret)
+ return ret;
}
/* Configure QoS table then.*/
- if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
- dpaa2_flow_qos_table_extracts_log(priv, stdout);
- if (dpkg_prepare_key_cfg(
- &priv->extract.qos_key_extract.dpkg,
- (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
- DPAA2_PMD_ERR(
- "Unable to prepare extract parameters");
- return -1;
- }
-
- memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
- qos_cfg.discard_on_miss = false;
- qos_cfg.default_tc = 0;
- qos_cfg.keep_entries = true;
- qos_cfg.key_cfg_iova =
- (size_t)priv->extract.qos_extract_param;
- /* QoS table is effective for multiple TCs. */
- if (priv->num_rx_tc > 1) {
- ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
- priv->token, &qos_cfg);
- if (ret < 0) {
- DPAA2_PMD_ERR(
- "RSS QoS table can not be configured(%d)\n",
- ret);
- return -1;
- }
- }
- }
-
- flow->qos_real_key_size = priv->extract
- .qos_key_extract.key_info.key_total_size;
- if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
- if (flow->ipaddr_rule.qos_ipdst_offset >=
- flow->ipaddr_rule.qos_ipsrc_offset) {
- flow->qos_real_key_size =
- flow->ipaddr_rule.qos_ipdst_offset +
- NH_FLD_IPV4_ADDR_SIZE;
- } else {
- flow->qos_real_key_size =
- flow->ipaddr_rule.qos_ipsrc_offset +
- NH_FLD_IPV4_ADDR_SIZE;
- }
- } else if (flow->ipaddr_rule.ipaddr_type ==
- FLOW_IPV6_ADDR) {
- if (flow->ipaddr_rule.qos_ipdst_offset >=
- flow->ipaddr_rule.qos_ipsrc_offset) {
- flow->qos_real_key_size =
- flow->ipaddr_rule.qos_ipdst_offset +
- NH_FLD_IPV6_ADDR_SIZE;
- } else {
- flow->qos_real_key_size =
- flow->ipaddr_rule.qos_ipsrc_offset +
- NH_FLD_IPV6_ADDR_SIZE;
- }
+ if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
+ ret = dpaa2_configure_qos_table(priv, false);
+ if (ret)
+ return ret;
}
- /* QoS entry added is only effective for multiple TCs.*/
if (priv->num_rx_tc > 1) {
- qos_index = flow->tc_id * priv->fs_entries +
- flow->tc_index;
- if (qos_index >= priv->qos_entries) {
- DPAA2_PMD_ERR("QoS table with %d entries full",
- priv->qos_entries);
- return -1;
- }
- flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
-
- dpaa2_flow_qos_entry_log("Start add", flow,
- qos_index, stdout);
-
- ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
- priv->token, &flow->qos_rule,
- flow->tc_id, qos_index,
- 0, 0);
- if (ret < 0) {
- DPAA2_PMD_ERR(
- "Error in adding entry to QoS table(%d)", ret);
+ ret = dpaa2_flow_add_qos_rule(priv, flow);
+ if (ret)
return ret;
- }
}
if (flow->tc_index >= priv->fs_entries) {
@@ -3665,140 +2839,47 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
return -1;
}
- flow->fs_real_key_size =
- priv->extract.tc_key_extract[flow->tc_id]
- .key_info.key_total_size;
-
- if (flow->ipaddr_rule.ipaddr_type ==
- FLOW_IPV4_ADDR) {
- if (flow->ipaddr_rule.fs_ipdst_offset >=
- flow->ipaddr_rule.fs_ipsrc_offset) {
- flow->fs_real_key_size =
- flow->ipaddr_rule.fs_ipdst_offset +
- NH_FLD_IPV4_ADDR_SIZE;
- } else {
- flow->fs_real_key_size =
- flow->ipaddr_rule.fs_ipsrc_offset +
- NH_FLD_IPV4_ADDR_SIZE;
- }
- } else if (flow->ipaddr_rule.ipaddr_type ==
- FLOW_IPV6_ADDR) {
- if (flow->ipaddr_rule.fs_ipdst_offset >=
- flow->ipaddr_rule.fs_ipsrc_offset) {
- flow->fs_real_key_size =
- flow->ipaddr_rule.fs_ipdst_offset +
- NH_FLD_IPV6_ADDR_SIZE;
- } else {
- flow->fs_real_key_size =
- flow->ipaddr_rule.fs_ipsrc_offset +
- NH_FLD_IPV6_ADDR_SIZE;
- }
- }
-
- flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
-
- dpaa2_flow_fs_entry_log("Start add", flow, stdout);
-
- ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
- flow->tc_id, flow->tc_index,
- &flow->fs_rule, &action);
- if (ret < 0) {
- DPAA2_PMD_ERR(
- "Error in adding entry to FS table(%d)", ret);
+ ret = dpaa2_flow_add_fs_rule(priv, flow);
+ if (ret)
return ret;
- }
- memcpy(&flow->action_cfg, &action,
- sizeof(struct dpni_fs_action_cfg));
+
break;
case RTE_FLOW_ACTION_TYPE_RSS:
- rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
+ rss_conf = actions[j].conf;
+ flow->action_type = RTE_FLOW_ACTION_TYPE_RSS;
- flow->action = RTE_FLOW_ACTION_TYPE_RSS;
ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
- &priv->extract.tc_key_extract[flow->tc_id].dpkg);
+ &tc_key_extract->dpkg);
if (ret < 0) {
- DPAA2_PMD_ERR(
- "unable to set flow distribution.please check queue config\n");
+ DPAA2_PMD_ERR("TC[%d] distset RSS failed",
+ flow->tc_id);
return ret;
}
- /* Allocate DMA'ble memory to write the rules */
- param = (size_t)rte_malloc(NULL, 256, 64);
- if (!param) {
- DPAA2_PMD_ERR("Memory allocation failure\n");
- return -1;
- }
-
- if (dpkg_prepare_key_cfg(
- &priv->extract.tc_key_extract[flow->tc_id].dpkg,
- (uint8_t *)param) < 0) {
- DPAA2_PMD_ERR(
- "Unable to prepare extract parameters");
- rte_free((void *)param);
- return -1;
- }
-
- memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
- tc_cfg.dist_size = rss_conf->queue_num;
- tc_cfg.key_cfg_iova = (size_t)param;
- tc_cfg.enable = true;
- tc_cfg.tc = flow->tc_id;
- ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
- priv->token, &tc_cfg);
- if (ret < 0) {
- DPAA2_PMD_ERR(
- "RSS TC table cannot be configured: %d\n",
- ret);
- rte_free((void *)param);
- return -1;
+ dist_size = rss_conf->queue_num;
+ if (is_keycfg_configured & DPAA2_FLOW_FS_TYPE) {
+ ret = dpaa2_configure_fs_rss_table(priv,
+ flow->tc_id,
+ dist_size,
+ true);
+ if (ret)
+ return ret;
}
- rte_free((void *)param);
- if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
- if (dpkg_prepare_key_cfg(
- &priv->extract.qos_key_extract.dpkg,
- (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
- DPAA2_PMD_ERR(
- "Unable to prepare extract parameters");
- return -1;
- }
- memset(&qos_cfg, 0,
- sizeof(struct dpni_qos_tbl_cfg));
- qos_cfg.discard_on_miss = true;
- qos_cfg.keep_entries = true;
- qos_cfg.key_cfg_iova =
- (size_t)priv->extract.qos_extract_param;
- ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
- priv->token, &qos_cfg);
- if (ret < 0) {
- DPAA2_PMD_ERR(
- "RSS QoS dist can't be configured-%d\n",
- ret);
- return -1;
- }
+ if (is_keycfg_configured & DPAA2_FLOW_QOS_TYPE) {
+ ret = dpaa2_configure_qos_table(priv, true);
+ if (ret)
+ return ret;
}
- /* Add Rule into QoS table */
- qos_index = flow->tc_id * priv->fs_entries +
- flow->tc_index;
- if (qos_index >= priv->qos_entries) {
- DPAA2_PMD_ERR("QoS table with %d entries full",
- priv->qos_entries);
- return -1;
- }
+ ret = dpaa2_flow_add_qos_rule(priv, flow);
+ if (ret)
+ return ret;
- flow->qos_real_key_size =
- priv->extract.qos_key_extract.key_info.key_total_size;
- flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
- ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
- &flow->qos_rule, flow->tc_id,
- qos_index, 0, 0);
- if (ret < 0) {
- DPAA2_PMD_ERR(
- "Error in entry addition in QoS table(%d)",
- ret);
+ ret = dpaa2_flow_add_fs_rule(priv, flow);
+ if (ret)
return ret;
- }
+
break;
case RTE_FLOW_ACTION_TYPE_END:
end_of_list = 1;
@@ -3812,16 +2893,6 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
}
if (!ret) {
- if (is_keycfg_configured &
- (DPAA2_QOS_TABLE_RECONFIGURE |
- DPAA2_FS_TABLE_RECONFIGURE)) {
- ret = dpaa2_flow_entry_update(priv, flow->tc_id);
- if (ret) {
- DPAA2_PMD_ERR("Flow entry update failed.");
-
- return -1;
- }
- }
/* New rules are inserted. */
if (!curr) {
LIST_INSERT_HEAD(&priv->flows, flow, next);
@@ -3836,7 +2907,7 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
static inline int
dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
- const struct rte_flow_attr *attr)
+ const struct rte_flow_attr *attr)
{
int ret = 0;
@@ -3910,18 +2981,18 @@ dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
}
for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
- !actions[j].conf)
+ !actions[j].conf)
ret = -EINVAL;
}
return ret;
}
-static
-int dpaa2_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *flow_attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+static int
+dpaa2_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *flow_attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct dpni_attr dpni_attr;
@@ -3975,127 +3046,128 @@ int dpaa2_flow_validate(struct rte_eth_dev *dev,
return ret;
}
-static
-struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+static struct rte_flow *
+dpaa2_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
- struct rte_flow *flow = NULL;
- size_t key_iova = 0, mask_iova = 0;
+ struct dpaa2_dev_flow *flow = NULL;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
int ret;
dpaa2_flow_control_log =
getenv("DPAA2_FLOW_CONTROL_LOG");
if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
- struct dpaa2_dev_priv *priv = dev->data->dev_private;
-
dpaa2_flow_miss_flow_id =
(uint16_t)atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
- DPAA2_PMD_ERR(
- "The missed flow ID %d exceeds the max flow ID %d",
- dpaa2_flow_miss_flow_id,
- priv->dist_queues - 1);
+ DPAA2_PMD_ERR("Missed flow ID %d >= dist size(%d)",
+ dpaa2_flow_miss_flow_id,
+ priv->dist_queues);
return NULL;
}
}
- flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
+ flow = rte_zmalloc(NULL, sizeof(struct dpaa2_dev_flow),
+ RTE_CACHE_LINE_SIZE);
if (!flow) {
DPAA2_PMD_ERR("Failure to allocate memory for flow");
goto mem_failure;
}
- /* Allocate DMA'ble memory to write the rules */
- key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
- if (!key_iova) {
- DPAA2_PMD_ERR(
- "Memory allocation failure for rule configuration\n");
+
+ /* Allocate DMA'ble memory to write the qos rules */
+ flow->qos_key_addr = rte_zmalloc(NULL, 256, 64);
+ if (!flow->qos_key_addr) {
+ DPAA2_PMD_ERR("Memory allocation failed");
goto mem_failure;
}
- mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
- if (!mask_iova) {
- DPAA2_PMD_ERR(
- "Memory allocation failure for rule configuration\n");
+ flow->qos_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->qos_key_addr);
+
+ flow->qos_mask_addr = rte_zmalloc(NULL, 256, 64);
+ if (!flow->qos_mask_addr) {
+ DPAA2_PMD_ERR("Memory allocation failed");
goto mem_failure;
}
+ flow->qos_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->qos_mask_addr);
- flow->qos_rule.key_iova = key_iova;
- flow->qos_rule.mask_iova = mask_iova;
-
- /* Allocate DMA'ble memory to write the rules */
- key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
- if (!key_iova) {
- DPAA2_PMD_ERR(
- "Memory allocation failure for rule configuration\n");
+ /* Allocate DMA'ble memory to write the FS rules */
+ flow->fs_key_addr = rte_zmalloc(NULL, 256, 64);
+ if (!flow->fs_key_addr) {
+ DPAA2_PMD_ERR("Memory allocation failed");
goto mem_failure;
}
- mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
- if (!mask_iova) {
- DPAA2_PMD_ERR(
- "Memory allocation failure for rule configuration\n");
+ flow->fs_rule.key_iova = DPAA2_VADDR_TO_IOVA(flow->fs_key_addr);
+
+ flow->fs_mask_addr = rte_zmalloc(NULL, 256, 64);
+ if (!flow->fs_mask_addr) {
+ DPAA2_PMD_ERR("Memory allocation failed");
goto mem_failure;
}
+ flow->fs_rule.mask_iova = DPAA2_VADDR_TO_IOVA(flow->fs_mask_addr);
- flow->fs_rule.key_iova = key_iova;
- flow->fs_rule.mask_iova = mask_iova;
-
- flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
- flow->ipaddr_rule.qos_ipsrc_offset =
- IP_ADDRESS_OFFSET_INVALID;
- flow->ipaddr_rule.qos_ipdst_offset =
- IP_ADDRESS_OFFSET_INVALID;
- flow->ipaddr_rule.fs_ipsrc_offset =
- IP_ADDRESS_OFFSET_INVALID;
- flow->ipaddr_rule.fs_ipdst_offset =
- IP_ADDRESS_OFFSET_INVALID;
+ priv->curr = flow;
- ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
- actions, error);
+ ret = dpaa2_generic_flow_set(flow, dev, attr, pattern, actions, error);
if (ret < 0) {
if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
rte_flow_error_set(error, EPERM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- attr, "unknown");
- DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ attr, "unknown");
+ DPAA2_PMD_ERR("Create flow failed (%d)", ret);
goto creation_error;
}
- return flow;
+ priv->curr = NULL;
+ return (struct rte_flow *)flow;
+
mem_failure:
- rte_flow_error_set(error, EPERM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "memory alloc");
+ rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "memory alloc");
+
creation_error:
- rte_free((void *)flow);
- rte_free((void *)key_iova);
- rte_free((void *)mask_iova);
+ if (flow) {
+ if (flow->qos_key_addr)
+ rte_free(flow->qos_key_addr);
+ if (flow->qos_mask_addr)
+ rte_free(flow->qos_mask_addr);
+ if (flow->fs_key_addr)
+ rte_free(flow->fs_key_addr);
+ if (flow->fs_mask_addr)
+ rte_free(flow->fs_mask_addr);
+ rte_free(flow);
+ }
+ priv->curr = NULL;
return NULL;
}
-static
-int dpaa2_flow_destroy(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+static int
+dpaa2_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *_flow,
+ struct rte_flow_error *error)
{
int ret = 0;
+ struct dpaa2_dev_flow *flow;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct fsl_mc_io *dpni = priv->hw;
- switch (flow->action) {
+ flow = (struct dpaa2_dev_flow *)_flow;
+
+ switch (flow->action_type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
case RTE_FLOW_ACTION_TYPE_PORT_ID:
if (priv->num_rx_tc > 1) {
/* Remove entry from QoS table first */
- ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
- &flow->qos_rule);
+ ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
+ priv->token,
+ &flow->qos_rule);
if (ret < 0) {
- DPAA2_PMD_ERR(
- "Error in removing entry from QoS table(%d)", ret);
+ DPAA2_PMD_ERR("Remove FS QoS entry failed");
+ dpaa2_flow_qos_entry_log("Delete failed", flow,
+ -1);
+ abort();
goto error;
}
}
@@ -4104,34 +3176,37 @@ int dpaa2_flow_destroy(struct rte_eth_dev *dev,
ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
flow->tc_id, &flow->fs_rule);
if (ret < 0) {
- DPAA2_PMD_ERR(
- "Error in removing entry from FS table(%d)", ret);
+ DPAA2_PMD_ERR("Remove entry from FS[%d] failed",
+ flow->tc_id);
goto error;
}
break;
case RTE_FLOW_ACTION_TYPE_RSS:
if (priv->num_rx_tc > 1) {
- ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
- &flow->qos_rule);
+ ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
+ priv->token,
+ &flow->qos_rule);
if (ret < 0) {
- DPAA2_PMD_ERR(
- "Error in entry addition in QoS table(%d)", ret);
+ DPAA2_PMD_ERR("Remove RSS QoS entry failed");
goto error;
}
}
break;
default:
- DPAA2_PMD_ERR(
- "Action type (%d) is not supported", flow->action);
+ DPAA2_PMD_ERR("Action(%d) not supported", flow->action_type);
ret = -ENOTSUP;
break;
}
LIST_REMOVE(flow, next);
- rte_free((void *)(size_t)flow->qos_rule.key_iova);
- rte_free((void *)(size_t)flow->qos_rule.mask_iova);
- rte_free((void *)(size_t)flow->fs_rule.key_iova);
- rte_free((void *)(size_t)flow->fs_rule.mask_iova);
+ if (flow->qos_key_addr)
+ rte_free(flow->qos_key_addr);
+ if (flow->qos_mask_addr)
+ rte_free(flow->qos_mask_addr);
+ if (flow->fs_key_addr)
+ rte_free(flow->fs_key_addr);
+ if (flow->fs_mask_addr)
+ rte_free(flow->fs_mask_addr);
/* Now free the flow */
rte_free(flow);
@@ -4156,12 +3231,12 @@ dpaa2_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct dpaa2_dev_priv *priv = dev->data->dev_private;
- struct rte_flow *flow = LIST_FIRST(&priv->flows);
+ struct dpaa2_dev_flow *flow = LIST_FIRST(&priv->flows);
while (flow) {
- struct rte_flow *next = LIST_NEXT(flow, next);
+ struct dpaa2_dev_flow *next = LIST_NEXT(flow, next);
- dpaa2_flow_destroy(dev, flow, error);
+ dpaa2_flow_destroy(dev, (struct rte_flow *)flow, error);
flow = next;
}
return 0;
@@ -4169,10 +3244,10 @@ dpaa2_flow_flush(struct rte_eth_dev *dev,
static int
dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
- struct rte_flow *flow __rte_unused,
- const struct rte_flow_action *actions __rte_unused,
- void *data __rte_unused,
- struct rte_flow_error *error __rte_unused)
+ struct rte_flow *_flow __rte_unused,
+ const struct rte_flow_action *actions __rte_unused,
+ void *data __rte_unused,
+ struct rte_flow_error *error __rte_unused)
{
return 0;
}
@@ -4189,11 +3264,11 @@ dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
void
dpaa2_flow_clean(struct rte_eth_dev *dev)
{
- struct rte_flow *flow;
+ struct dpaa2_dev_flow *flow;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
while ((flow = LIST_FIRST(&priv->flows)))
- dpaa2_flow_destroy(dev, flow, NULL);
+ dpaa2_flow_destroy(dev, (struct rte_flow *)flow, NULL);
}
const struct rte_flow_ops dpaa2_flow_ops = {
--
2.25.1
next prev parent reply other threads:[~2024-09-18 7:54 UTC|newest]
Thread overview: 229+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-13 5:59 [v1 00/43] DPAA2 specific patches vanshika.shukla
2024-09-13 5:59 ` [v1 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-09-13 5:59 ` [v1 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-13 5:59 ` [v1 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-13 5:59 ` [v1 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-13 5:59 ` [v1 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-13 5:59 ` [v1 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-13 5:59 ` [v1 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-13 5:59 ` [v1 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-13 5:59 ` [v1 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-13 5:59 ` [v1 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-13 5:59 ` [v1 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-13 5:59 ` [v1 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-13 5:59 ` [v1 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-13 5:59 ` [v1 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-13 5:59 ` [v1 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-13 5:59 ` [v1 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-13 5:59 ` [v1 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-13 5:59 ` [v1 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-13 5:59 ` [v1 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-13 5:59 ` [v1 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-13 5:59 ` [v1 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-13 5:59 ` [v1 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-13 5:59 ` [v1 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-13 5:59 ` [v1 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-09-13 5:59 ` [v1 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-13 5:59 ` [v1 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-13 5:59 ` [v1 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-13 5:59 ` [v1 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-13 5:59 ` [v1 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-13 5:59 ` [v1 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-13 5:59 ` [v1 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-13 5:59 ` [v1 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-13 5:59 ` [v1 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-13 5:59 ` [v1 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-13 5:59 ` [v1 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-13 5:59 ` [v1 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-13 5:59 ` [v1 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-13 5:59 ` [v1 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-13 5:59 ` [v1 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-13 5:59 ` [v1 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-13 5:59 ` [v1 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-13 5:59 ` [v1 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-13 5:59 ` [v1 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-09-18 7:50 ` [v2 00/43] DPAA2 specific patches vanshika.shukla
2024-09-18 7:50 ` [v2 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-14 12:00 ` [v3 00/43] DPAA2 specific patches vanshika.shukla
2024-10-14 12:00 ` [v3 01/43] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-14 12:00 ` [v3 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-14 12:00 ` [v3 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-14 12:00 ` [v3 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-14 12:00 ` [v3 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-14 12:00 ` [v3 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-14 12:00 ` [v3 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-14 12:00 ` [v3 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-14 12:00 ` [v3 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-14 12:00 ` [v3 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-14 12:00 ` [v3 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-14 12:00 ` [v3 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-14 12:00 ` [v3 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-15 2:27 ` Stephen Hemminger
2024-10-14 12:00 ` [v3 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-10-15 2:29 ` Stephen Hemminger
2024-10-14 12:00 ` [v3 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-14 12:00 ` [v3 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-15 2:31 ` Stephen Hemminger
2024-10-14 12:01 ` [v3 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-14 12:01 ` [v3 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-14 12:01 ` [v3 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-14 12:01 ` [v3 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-10-14 12:01 ` [v3 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-14 12:01 ` [v3 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-14 12:01 ` [v3 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-14 12:01 ` [v3 24/43] net/dpaa2: flow API refactor vanshika.shukla
2024-10-14 12:01 ` [v3 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-14 12:01 ` [v3 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-14 12:01 ` [v3 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-14 12:01 ` [v3 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-14 12:01 ` [v3 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-14 12:01 ` [v3 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-14 12:01 ` [v3 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-14 12:01 ` [v3 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-14 12:01 ` [v3 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-14 12:01 ` [v3 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-14 12:01 ` [v3 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-14 12:01 ` [v3 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-10-14 12:01 ` [v3 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-14 12:01 ` [v3 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-14 12:01 ` [v3 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-14 12:01 ` [v3 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-14 12:01 ` [v3 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-14 12:01 ` [v3 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-14 12:01 ` [v3 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-15 2:32 ` Stephen Hemminger
2024-10-22 19:12 ` [v4 00/42] DPAA2 specific patches vanshika.shukla
2024-10-22 19:12 ` [v4 01/42] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-22 19:12 ` [v4 02/42] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-22 19:12 ` [v4 03/42] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-22 19:12 ` [v4 04/42] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-22 19:12 ` [v4 05/42] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-22 19:12 ` [v4 06/42] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-22 19:12 ` [v4 07/42] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-22 19:12 ` [v4 08/42] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-22 19:12 ` [v4 09/42] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-22 19:12 ` [v4 10/42] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-22 19:12 ` [v4 11/42] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-22 19:12 ` [v4 12/42] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-22 19:12 ` [v4 13/42] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-22 19:12 ` [v4 14/42] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-10-22 19:12 ` [v4 15/42] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-22 19:12 ` [v4 16/42] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-23 1:02 ` Stephen Hemminger
2024-10-22 19:12 ` [v4 17/42] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-22 19:12 ` [v4 18/42] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-22 19:12 ` [v4 19/42] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-22 19:12 ` [v4 20/42] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-22 19:12 ` [v4 21/42] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-22 19:12 ` [v4 22/42] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-22 19:12 ` [v4 23/42] net/dpaa2: flow API refactor vanshika.shukla
2024-10-23 0:52 ` Stephen Hemminger
2024-10-23 12:04 ` [EXT] " Vanshika Shukla
2024-10-22 19:12 ` [v4 24/42] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-22 19:12 ` [v4 25/42] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-22 19:12 ` [v4 26/42] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-22 19:12 ` [v4 27/42] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-22 19:12 ` [v4 28/42] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-22 19:12 ` [v4 29/42] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-22 19:12 ` [v4 30/42] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-22 19:12 ` [v4 31/42] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-22 19:12 ` [v4 32/42] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-22 19:12 ` [v4 33/42] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-22 19:12 ` [v4 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-22 19:12 ` [v4 35/42] net/dpaa2: support software taildrop vanshika.shukla
2024-10-22 19:12 ` [v4 36/42] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-22 19:12 ` [v4 37/42] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-22 19:12 ` [v4 38/42] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-22 19:12 ` [v4 39/42] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-22 19:12 ` [v4 40/42] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-22 19:12 ` [v4 41/42] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-22 19:12 ` [v4 42/42] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-23 11:59 ` [v5 00/42] DPAA2 specific patches vanshika.shukla
2024-10-23 11:59 ` [v5 01/42] net/dpaa2: enhance Tx scatter-gather mempool vanshika.shukla
2024-10-23 11:59 ` [v5 02/42] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-10-23 11:59 ` [v5 03/42] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-10-23 11:59 ` [v5 04/42] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-10-23 11:59 ` [v5 05/42] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-10-23 11:59 ` [v5 06/42] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-10-23 11:59 ` [v5 07/42] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-10-23 11:59 ` [v5 08/42] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-10-23 11:59 ` [v5 09/42] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-10-23 11:59 ` [v5 10/42] net/dpaa2: update DPNI link status method vanshika.shukla
2024-10-23 11:59 ` [v5 11/42] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-10-23 11:59 ` [v5 12/42] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-10-23 11:59 ` [v5 13/42] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-10-23 11:59 ` [v5 14/42] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-11-09 17:07 ` Thomas Monjalon
2024-10-23 11:59 ` [v5 15/42] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-10-23 11:59 ` [v5 16/42] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-10-23 11:59 ` [v5 17/42] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-10-23 11:59 ` [v5 18/42] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-10-23 11:59 ` [v5 19/42] bus/fslmc: fix coverity issue vanshika.shukla
2024-10-23 11:59 ` [v5 20/42] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-10-23 11:59 ` [v5 21/42] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-10-23 11:59 ` [v5 22/42] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-10-23 11:59 ` [v5 23/42] net/dpaa2: flow API refactor vanshika.shukla
2024-11-09 19:01 ` Thomas Monjalon
2024-10-23 11:59 ` [v5 24/42] net/dpaa2: dump Rx parser result vanshika.shukla
2024-10-23 11:59 ` [v5 25/42] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-10-23 11:59 ` [v5 26/42] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-10-23 11:59 ` [v5 27/42] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-10-23 11:59 ` [v5 28/42] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-10-23 11:59 ` [v5 29/42] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-10-23 11:59 ` [v5 30/42] net/dpaa2: add GTP flow support vanshika.shukla
2024-10-23 11:59 ` [v5 31/42] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-10-23 11:59 ` [v5 32/42] net/dpaa2: soft parser flow verification vanshika.shukla
2024-10-23 11:59 ` [v5 33/42] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-10-23 11:59 ` [v5 34/42] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-10-23 11:59 ` [v5 35/42] net/dpaa2: support software taildrop vanshika.shukla
2024-10-23 11:59 ` [v5 36/42] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-10-23 11:59 ` [v5 37/42] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-10-23 11:59 ` [v5 38/42] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-10-23 11:59 ` [v5 39/42] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-10-23 11:59 ` [v5 40/42] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-10-23 11:59 ` [v5 41/42] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-10-23 11:59 ` [v5 42/42] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-11-07 11:24 ` [v5 00/42] DPAA2 specific patches Hemant Agrawal
2024-09-18 7:50 ` [v2 02/43] net/dpaa2: support PTP packet one-step timestamp vanshika.shukla
2024-09-18 7:50 ` [v2 03/43] net/dpaa2: add proper MTU debugging print vanshika.shukla
2024-09-18 7:50 ` [v2 04/43] net/dpaa2: add support to dump dpdmux counters vanshika.shukla
2024-09-18 7:50 ` [v2 05/43] bus/fslmc: change dpcon close as internal symbol vanshika.shukla
2024-09-18 7:50 ` [v2 06/43] bus/fslmc: add close API to close DPAA2 device vanshika.shukla
2024-09-18 7:50 ` [v2 07/43] net/dpaa2: dpdmux: add support for CVLAN vanshika.shukla
2024-09-18 7:50 ` [v2 08/43] bus/fslmc: upgrade with MC version 10.37 vanshika.shukla
2024-09-18 7:50 ` [v2 09/43] net/dpaa2: support link state for eth interfaces vanshika.shukla
2024-09-18 7:50 ` [v2 10/43] net/dpaa2: update DPNI link status method vanshika.shukla
2024-09-18 7:50 ` [v2 11/43] net/dpaa2: add new PMD API to check dpaa platform version vanshika.shukla
2024-09-18 7:50 ` [v2 12/43] bus/fslmc: improve BMAN buffer acquire vanshika.shukla
2024-09-18 7:50 ` [v2 13/43] bus/fslmc: get MC VFIO group FD directly vanshika.shukla
2024-09-18 7:50 ` [v2 14/43] bus/fslmc: enhance MC VFIO multiprocess support vanshika.shukla
2024-09-18 7:50 ` [v2 15/43] bus/fslmc: free VFIO group FD in case of add group failure vanshika.shukla
2024-09-18 7:50 ` [v2 16/43] bus/fslmc: dynamic IOVA mode configuration vanshika.shukla
2024-09-18 7:50 ` [v2 17/43] bus/fslmc: remove VFIO IRQ mapping vanshika.shukla
2024-09-18 7:50 ` [v2 18/43] bus/fslmc: create dpaa2 device with it's object vanshika.shukla
2024-09-18 7:50 ` [v2 19/43] bus/fslmc: fix coverity issue vanshika.shukla
2024-09-18 7:50 ` [v2 20/43] bus/fslmc: fix invalid error FD code vanshika.shukla
2024-09-18 7:50 ` [v2 21/43] bus/fslmc: change qbman eq desc from d to desc vanshika.shukla
2024-09-18 7:50 ` [v2 22/43] bus/fslmc: introduce VFIO DMA mapping API for fslmc vanshika.shukla
2024-09-18 7:50 ` [v2 23/43] net/dpaa2: change miss flow ID macro name vanshika.shukla
2024-09-18 7:50 ` vanshika.shukla [this message]
2024-09-18 7:50 ` [v2 25/43] net/dpaa2: dump Rx parser result vanshika.shukla
2024-09-18 7:50 ` [v2 26/43] net/dpaa2: enhancement of raw flow extract vanshika.shukla
2024-09-18 7:50 ` [v2 27/43] net/dpaa2: frame attribute flags parser vanshika.shukla
2024-09-18 7:50 ` [v2 28/43] net/dpaa2: add VXLAN distribution support vanshika.shukla
2024-09-18 7:50 ` [v2 29/43] net/dpaa2: protocol inside tunnel distribution vanshika.shukla
2024-09-18 7:50 ` [v2 30/43] net/dpaa2: eCPRI support by parser result vanshika.shukla
2024-09-18 7:50 ` [v2 31/43] net/dpaa2: add GTP flow support vanshika.shukla
2024-09-18 7:50 ` [v2 32/43] net/dpaa2: check if Soft parser is loaded vanshika.shukla
2024-09-18 7:50 ` [v2 33/43] net/dpaa2: soft parser flow verification vanshika.shukla
2024-09-18 7:50 ` [v2 34/43] net/dpaa2: add flow support for IPsec AH and ESP vanshika.shukla
2024-09-18 7:50 ` [v2 35/43] net/dpaa2: fix memory corruption in TM vanshika.shukla
2024-09-18 7:50 ` [v2 36/43] net/dpaa2: support software taildrop vanshika.shukla
2024-09-18 7:50 ` [v2 37/43] net/dpaa2: check IOVA before sending MC command vanshika.shukla
2024-09-18 7:50 ` [v2 38/43] net/dpaa2: improve DPDMUX error behavior settings vanshika.shukla
2024-09-18 7:50 ` [v2 39/43] net/dpaa2: store drop priority in mbuf vanshika.shukla
2024-09-18 7:50 ` [v2 40/43] net/dpaa2: add API to get endpoint name vanshika.shukla
2024-09-18 7:50 ` [v2 41/43] net/dpaa2: support VLAN traffic splitting vanshika.shukla
2024-09-18 7:50 ` [v2 42/43] net/dpaa2: add support for C-VLAN and MAC vanshika.shukla
2024-09-18 7:50 ` [v2 43/43] net/dpaa2: dpdmux single flow/multiple rules support vanshika.shukla
2024-10-10 2:54 ` [v2 00/43] DPAA2 specific patches Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240918075056.1838654-25-vanshika.shukla@nxp.com \
--to=vanshika.shukla@nxp.com \
--cc=dev@dpdk.org \
--cc=hemant.agrawal@nxp.com \
--cc=jun.yang@nxp.com \
--cc=sachin.saxena@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).