From: Hemant Agrawal <hemant.agrawal@nxp.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com, Jun Yang <jun.yang@nxp.com>
Subject: [dpdk-dev] [PATCH v2 20/29] net/dpaa2: define the size of table entry
Date: Tue, 7 Jul 2020 14:52:35 +0530 [thread overview]
Message-ID: <20200707092244.12791-21-hemant.agrawal@nxp.com> (raw)
In-Reply-To: <20200707092244.12791-1-hemant.agrawal@nxp.com>
From: Jun Yang <jun.yang@nxp.com>
If entry size is not bigger than 27, MC alloc one TCAM entry,
otherwise, alloc 2 TCAM entries.
Extracts size by HW must be not bigger than TCAM entry size(27 or 54).
So define the flow entry size as 54.
Signed-off-by: Jun Yang <jun.yang@nxp.com>
---
drivers/net/dpaa2/dpaa2_flow.c | 90 ++++++++++++++++++++++------------
1 file changed, 60 insertions(+), 30 deletions(-)
diff --git a/drivers/net/dpaa2/dpaa2_flow.c b/drivers/net/dpaa2/dpaa2_flow.c
index 760a8a793..bcbd5977a 100644
--- a/drivers/net/dpaa2/dpaa2_flow.c
+++ b/drivers/net/dpaa2/dpaa2_flow.c
@@ -29,6 +29,8 @@
*/
int mc_l4_port_identification;
+#define FIXED_ENTRY_SIZE 54
+
enum flow_rule_ipaddr_type {
FLOW_NONE_IPADDR,
FLOW_IPV4_ADDR,
@@ -47,7 +49,8 @@ struct rte_flow {
LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
struct dpni_rule_cfg qos_rule;
struct dpni_rule_cfg fs_rule;
- uint8_t key_size;
+ uint8_t qos_real_key_size;
+ uint8_t fs_real_key_size;
uint8_t tc_id; /** Traffic Class ID. */
uint8_t tc_index; /** index within this Traffic Class. */
enum rte_flow_action_type action;
@@ -478,6 +481,7 @@ dpaa2_flow_rule_data_set(
prot, field);
return -1;
}
+
memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
@@ -523,9 +527,11 @@ _dpaa2_flow_rule_move_ipaddr_tail(
len = NH_FLD_IPV6_ADDR_SIZE;
memcpy(tmp, (char *)key_src, len);
+ memset((char *)key_src, 0, len);
memcpy((char *)key_dst, tmp, len);
memcpy(tmp, (char *)mask_src, len);
+ memset((char *)mask_src, 0, len);
memcpy((char *)mask_dst, tmp, len);
return 0;
@@ -1251,8 +1257,7 @@ dpaa2_configure_flow_generic_ip(
return -1;
}
- local_cfg |= (DPAA2_QOS_TABLE_RECONFIGURE |
- DPAA2_QOS_TABLE_IPADDR_EXTRACT);
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
}
index = dpaa2_flow_extract_search(
@@ -1269,8 +1274,7 @@ dpaa2_configure_flow_generic_ip(
return -1;
}
- local_cfg |= (DPAA2_FS_TABLE_RECONFIGURE |
- DPAA2_FS_TABLE_IPADDR_EXTRACT);
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
}
if (spec_ipv4)
@@ -1339,8 +1343,7 @@ dpaa2_configure_flow_generic_ip(
return -1;
}
- local_cfg |= (DPAA2_QOS_TABLE_RECONFIGURE |
- DPAA2_QOS_TABLE_IPADDR_EXTRACT);
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
}
index = dpaa2_flow_extract_search(
@@ -1361,8 +1364,7 @@ dpaa2_configure_flow_generic_ip(
return -1;
}
- local_cfg |= (DPAA2_FS_TABLE_RECONFIGURE |
- DPAA2_FS_TABLE_IPADDR_EXTRACT);
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
}
if (spec_ipv4)
@@ -2641,7 +2643,7 @@ dpaa2_flow_entry_update(
char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
- int extend = -1, extend1, size;
+ int extend = -1, extend1, size = -1;
uint16_t qos_index;
while (curr) {
@@ -2696,6 +2698,9 @@ dpaa2_flow_entry_update(
else
extend = extend1;
+ RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
+ (size == NH_FLD_IPV6_ADDR_SIZE));
+
memcpy(ipsrc_key,
(char *)(size_t)curr->qos_rule.key_iova +
curr->ipaddr_rule.qos_ipsrc_offset,
@@ -2725,6 +2730,9 @@ dpaa2_flow_entry_update(
else
extend = extend1;
+ RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
+ (size == NH_FLD_IPV6_ADDR_SIZE));
+
memcpy(ipdst_key,
(char *)(size_t)curr->qos_rule.key_iova +
curr->ipaddr_rule.qos_ipdst_offset,
@@ -2745,6 +2753,8 @@ dpaa2_flow_entry_update(
}
if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
+ RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
+ (size == NH_FLD_IPV6_ADDR_SIZE));
memcpy((char *)(size_t)curr->qos_rule.key_iova +
curr->ipaddr_rule.qos_ipsrc_offset,
ipsrc_key,
@@ -2755,6 +2765,8 @@ dpaa2_flow_entry_update(
size);
}
if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
+ RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
+ (size == NH_FLD_IPV6_ADDR_SIZE));
memcpy((char *)(size_t)curr->qos_rule.key_iova +
curr->ipaddr_rule.qos_ipdst_offset,
ipdst_key,
@@ -2766,7 +2778,9 @@ dpaa2_flow_entry_update(
}
if (extend >= 0)
- curr->qos_rule.key_size += extend;
+ curr->qos_real_key_size += extend;
+
+ curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
priv->token, &curr->qos_rule,
@@ -2873,7 +2887,8 @@ dpaa2_flow_entry_update(
}
if (extend >= 0)
- curr->fs_rule.key_size += extend;
+ curr->fs_real_key_size += extend;
+ curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
priv->token, curr->tc_id, curr->tc_index,
@@ -3093,31 +3108,34 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
priv->qos_entries);
return -1;
}
- flow->qos_rule.key_size = priv->extract
- .qos_key_extract.key_info.key_total_size;
+ flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
if (flow->ipaddr_rule.qos_ipdst_offset >=
flow->ipaddr_rule.qos_ipsrc_offset) {
- flow->qos_rule.key_size =
+ flow->qos_real_key_size =
flow->ipaddr_rule.qos_ipdst_offset +
NH_FLD_IPV4_ADDR_SIZE;
} else {
- flow->qos_rule.key_size =
+ flow->qos_real_key_size =
flow->ipaddr_rule.qos_ipsrc_offset +
NH_FLD_IPV4_ADDR_SIZE;
}
- } else if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV6_ADDR) {
+ } else if (flow->ipaddr_rule.ipaddr_type ==
+ FLOW_IPV6_ADDR) {
if (flow->ipaddr_rule.qos_ipdst_offset >=
flow->ipaddr_rule.qos_ipsrc_offset) {
- flow->qos_rule.key_size =
+ flow->qos_real_key_size =
flow->ipaddr_rule.qos_ipdst_offset +
NH_FLD_IPV6_ADDR_SIZE;
} else {
- flow->qos_rule.key_size =
+ flow->qos_real_key_size =
flow->ipaddr_rule.qos_ipsrc_offset +
NH_FLD_IPV6_ADDR_SIZE;
}
}
+
+ flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
+
ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
priv->token, &flow->qos_rule,
flow->tc_id, qos_index,
@@ -3134,17 +3152,20 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
priv->fs_entries);
return -1;
}
- flow->fs_rule.key_size = priv->extract
- .tc_key_extract[attr->group].key_info.key_total_size;
+
+ flow->fs_real_key_size =
+ priv->extract.tc_key_extract[flow->tc_id]
+ .key_info.key_total_size;
+
if (flow->ipaddr_rule.ipaddr_type ==
FLOW_IPV4_ADDR) {
if (flow->ipaddr_rule.fs_ipdst_offset >=
flow->ipaddr_rule.fs_ipsrc_offset) {
- flow->fs_rule.key_size =
+ flow->fs_real_key_size =
flow->ipaddr_rule.fs_ipdst_offset +
NH_FLD_IPV4_ADDR_SIZE;
} else {
- flow->fs_rule.key_size =
+ flow->fs_real_key_size =
flow->ipaddr_rule.fs_ipsrc_offset +
NH_FLD_IPV4_ADDR_SIZE;
}
@@ -3152,15 +3173,18 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
FLOW_IPV6_ADDR) {
if (flow->ipaddr_rule.fs_ipdst_offset >=
flow->ipaddr_rule.fs_ipsrc_offset) {
- flow->fs_rule.key_size =
+ flow->fs_real_key_size =
flow->ipaddr_rule.fs_ipdst_offset +
NH_FLD_IPV6_ADDR_SIZE;
} else {
- flow->fs_rule.key_size =
+ flow->fs_real_key_size =
flow->ipaddr_rule.fs_ipsrc_offset +
NH_FLD_IPV6_ADDR_SIZE;
}
}
+
+ flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
+
ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
flow->tc_id, flow->tc_index,
&flow->fs_rule, &action);
@@ -3259,8 +3283,10 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
priv->qos_entries);
return -1;
}
- flow->qos_rule.key_size =
+
+ flow->qos_real_key_size =
priv->extract.qos_key_extract.key_info.key_total_size;
+ flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
&flow->qos_rule, flow->tc_id,
qos_index, 0, 0);
@@ -3283,11 +3309,15 @@ dpaa2_generic_flow_set(struct rte_flow *flow,
}
if (!ret) {
- ret = dpaa2_flow_entry_update(priv, flow->tc_id);
- if (ret) {
- DPAA2_PMD_ERR("Flow entry update failed.");
+ if (is_keycfg_configured &
+ (DPAA2_QOS_TABLE_RECONFIGURE |
+ DPAA2_FS_TABLE_RECONFIGURE)) {
+ ret = dpaa2_flow_entry_update(priv, flow->tc_id);
+ if (ret) {
+ DPAA2_PMD_ERR("Flow entry update failed.");
- return -1;
+ return -1;
+ }
}
/* New rules are inserted. */
if (!curr) {
--
2.17.1
next prev parent reply other threads:[~2020-07-07 9:31 UTC|newest]
Thread overview: 83+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-27 13:22 [dpdk-dev] [PATCH 00/37] NXP DPAAx enhancements Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 01/37] bus/fslmc: fix getting the FD error Hemant Agrawal
2020-05-27 18:07 ` Akhil Goyal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 02/37] net/dpaa: fix fd offset data type Hemant Agrawal
2020-05-27 18:08 ` Akhil Goyal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 03/37] net/dpaa2: enable timestamp for Rx offload case as well Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 04/37] bus/fslmc: combine thread specific variables Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 05/37] bus/fslmc: rework portal allocation to a per thread basis Hemant Agrawal
2020-07-01 7:23 ` Ferruh Yigit
2020-05-27 13:22 ` [dpdk-dev] [PATCH 06/37] bus/fslmc: support handle portal alloc failure Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 07/37] bus/fslmc: support portal migration Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 08/37] bus/fslmc: rename the cinh read functions used for ls1088 Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 09/37] net/dpaa: enable Tx queue taildrop Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 10/37] net/dpaa: add 2.5G support Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 11/37] net/dpaa: update process specific device info Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 12/37] drivers: optimize thread local storage for dpaa Hemant Agrawal
2020-05-27 18:13 ` Akhil Goyal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 13/37] bus/dpaa: enable link state interrupt Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 14/37] bus/dpaa: enable set link status Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 15/37] net/dpaa: add support for fmlib in dpdk Hemant Agrawal
2020-06-30 17:00 ` Ferruh Yigit
2020-07-01 4:18 ` Hemant Agrawal
2020-07-01 7:35 ` Ferruh Yigit
2020-05-27 13:23 ` [dpdk-dev] [PATCH 16/37] net/dpaa: add VSP support in FMLIB Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 17/37] net/dpaa: add support for fmcless mode Hemant Agrawal
2020-06-30 17:01 ` Ferruh Yigit
2020-07-01 4:04 ` Hemant Agrawal
2020-07-01 7:37 ` Ferruh Yigit
2020-05-27 13:23 ` [dpdk-dev] [PATCH 18/37] bus/dpaa: add shared MAC support Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 19/37] bus/dpaa: add Virtual Storage Profile port init Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 20/37] net/dpaa: add support for Virtual Storage Profile Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 21/37] net/dpaa: add fmc parser support for VSP Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 22/37] net/dpaa: add RSS update func with FMCless Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 23/37] net/dpaa2: dynamic flow control support Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 24/37] net/dpaa2: key extracts of flow API Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 25/37] net/dpaa2: sanity check for flow extracts Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 26/37] net/dpaa2: free flow rule memory Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 27/37] net/dpaa2: flow QoS or FS table entry indexing Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 28/37] net/dpaa2: define the size of table entry Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 29/37] net/dpaa2: log of flow extracts and rules Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 30/37] net/dpaa2: discrimination between IPv4 and IPv6 Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 31/37] net/dpaa2: distribution size set on multiple TCs Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 32/37] net/dpaa2: index of queue action for flow Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 33/37] net/dpaa2: flow data sanity check Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 34/37] net/dpaa2: flow API QoS setup follows FS setup Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 35/37] net/dpaa2: flow API FS miss action configuration Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 36/37] net/dpaa2: configure per class distribution size Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 37/37] net/dpaa2: support raw flow classification Hemant Agrawal
2020-06-30 17:01 ` [dpdk-dev] [PATCH 00/37] NXP DPAAx enhancements Ferruh Yigit
2020-07-01 4:08 ` Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 00/29] " Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 01/29] bus/fslmc: fix getting the FD error Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 02/29] net/dpaa: fix fd offset data type Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 03/29] net/dpaa2: enable timestamp for Rx offload case as well Hemant Agrawal
2020-07-11 13:46 ` Thomas Monjalon
2020-07-13 3:47 ` Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 04/29] bus/fslmc: combine thread specific variables Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 05/29] bus/fslmc: rework portal allocation to a per thread basis Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 06/29] bus/fslmc: support handle portal alloc failure Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 07/29] bus/fslmc: support portal migration Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 08/29] bus/fslmc: rename the cinh read functions used for ls1088 Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 09/29] net/dpaa: enable Tx queue taildrop Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 10/29] net/dpaa: add 2.5G support Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 11/29] net/dpaa: update process specific device info Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 12/29] drivers: optimize thread local storage for dpaa Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 13/29] bus/dpaa: enable link state interrupt Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 14/29] bus/dpaa: enable set link status Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 15/29] net/dpaa2: support dynamic flow control Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 16/29] net/dpaa2: support key extracts of flow API Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 17/29] net/dpaa2: add sanity check for flow extracts Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 18/29] net/dpaa2: free flow rule memory Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 19/29] net/dpaa2: support QoS or FS table entry indexing Hemant Agrawal
2020-07-07 9:22 ` Hemant Agrawal [this message]
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 21/29] net/dpaa2: add logging of flow extracts and rules Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 22/29] net/dpaa2: support iscrimination between IPv4 and IPv6 Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 23/29] net/dpaa2: support distribution size set on multiple TCs Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 24/29] net/dpaa2: support ndex of queue action for flow Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 25/29] net/dpaa2: add flow data sanity check Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 26/29] net/dpaa2: modify flow API QoS setup to follow FS setup Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 27/29] net/dpaa2: support flow API FS miss action configuration Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 28/29] net/dpaa2: configure per class distribution size Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 29/29] net/dpaa2: support raw flow classification Hemant Agrawal
2020-07-09 1:54 ` [dpdk-dev] [PATCH v2 00/29] NXP DPAAx enhancements Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200707092244.12791-21-hemant.agrawal@nxp.com \
--to=hemant.agrawal@nxp.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=jun.yang@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).