* [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
@ 2017-11-24 8:43 Wei Zhao
2017-12-21 3:12 ` Zhang, Helin
` (2 more replies)
0 siblings, 3 replies; 17+ messages in thread
From: Wei Zhao @ 2017-11-24 8:43 UTC (permalink / raw)
To: dev; +Cc: Wei Zhao
Rte_flow actually defined to include RSS,
but till now, RSS is out of rte_flow.
This patch is to move i40e existing RSS to rte_flow.
This patch also enable queue region configuration
using flow API for i40e.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 91 +++++++++++
drivers/net/i40e/i40e_ethdev.h | 11 ++
drivers/net/i40e/i40e_flow.c | 336 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 438 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 811cc9f..75b3bf3 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1349,6 +1349,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize queue region configuration */
i40e_init_queue_region_conf(dev);
+ /* initialize rss configuration from rte_flow */
+ memset(&pf->rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+
return 0;
err_init_fdir_filter_list:
@@ -10943,12 +10947,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
}
}
+/* Restore rss filter */
+static inline void
+i40e_rss_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_rte_flow_rss_conf *conf =
+ &pf->rss_info;
+ if (conf->num)
+ i40e_config_rss_filter(pf, conf, TRUE);
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
i40e_fdir_filter_restore(pf);
+ i40e_rss_filter_restore(pf);
}
static bool
@@ -11366,6 +11381,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
return ret;
}
+int
+i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t i, lut = 0;
+ uint16_t j, num;
+ struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+
+ if (!add) {
+ if (memcmp(conf, rss_info,
+ sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
+ i40e_pf_disable_rss(pf);
+ memset(rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (rss_info->num)
+ return -EINVAL;
+
+ /* If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
+
+ num = RTE_MIN(num, conf->num);
+ PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
+ return -ENOTSUP;
+ }
+
+ /* Fill in redirection table */
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
+ i40e_pf_disable_rss(pf);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Random default keys */
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+
+ return i40e_hw_rss_hash_set(pf, &rss_conf);
+
+ rte_memcpy(rss_info,
+ conf, sizeof(struct i40e_rte_flow_rss_conf));
+
+ return 0;
+}
+
RTE_INIT(i40e_init_log);
static void
i40e_init_log(void)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index cd67453..0a59e39 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -891,6 +891,13 @@ struct i40e_customized_pctype {
bool valid; /* Check if it's valid */
};
+struct i40e_rte_flow_rss_conf {
+ struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+ uint16_t queue_region_conf; /**< Queue region config flag */
+ uint16_t num; /**< Number of entries in queue[]. */
+ uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
+};
+
/*
* Structure to store private data specific for PF instance.
*/
@@ -945,6 +952,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
+ struct i40e_rte_flow_rss_conf rss_info; /* rss info */
struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
@@ -1071,6 +1079,7 @@ union i40e_filter_t {
struct i40e_fdir_filter_conf fdir_filter;
struct rte_eth_tunnel_filter_conf tunnel_filter;
struct i40e_tunnel_filter_conf consistent_tunnel_filter;
+ struct i40e_rte_flow_rss_conf rss_conf;
};
typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
@@ -1198,6 +1207,8 @@ int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
+int i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 7e4936e..e127f4c 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -138,6 +138,8 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
+static int
i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -4095,6 +4097,297 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
}
static int
+i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ uint8_t *action_flag,
+ struct i40e_queue_regions *info)
+{
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ return 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ *action_flag = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) {
+ info->region[0].user_priority[0] =
+ (vlan_spec->tci >> 13) & 0x7;
+ info->region[0].user_priority_num = 1;
+ info->queue_region_number = 1;
+ *action_flag = 0;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ uint8_t *action_flag,
+ struct i40e_queue_regions *conf_info,
+ union i40e_filter_t *filter)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_rte_flow_rss_conf *rss_config =
+ &filter->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ uint16_t i, j, n;
+ uint32_t index = 0;
+
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ if (action_flag) {
+ for (n = 0; n < 64; n++) {
+ if (rss->rss_conf->rss_hf & (1 << n)) {
+ conf_info->region[0].user_priority[0] = n;
+ conf_info->region[0].user_priority_num = 1;
+ conf_info->queue_region_number = 1;
+ break;
+ }
+ }
+ }
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < conf_info->queue_region_number; n++) {
+ if (conf_info->region[n].user_priority_num ||
+ conf_info->region[n].flowtype_num) {
+ if (!((rte_is_power_of_2(rss->num)) &&
+ rss->num <= 64)) {
+ PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].user_priority[n] >=
+ I40E_MAX_USER_PRIORITY) {
+ PMD_DRV_LOG(ERR, "the user priority max index is 7");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].hw_flowtype[n] >=
+ I40E_FILTER_PCTYPE_MAX) {
+ PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
+ return -rte_errno;
+ }
+
+ if (rss_info->num < rss->num ||
+ rss_info->queue[0] < rss->queue[0] ||
+ (rss->queue[0] + rss->num >
+ rss_info->num + rss_info->queue[0])) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ if (info->region[i].queue_num == rss->num &&
+ info->region[i].queue_start_index ==
+ rss->queue[0])
+ break;
+ }
+
+ if (i == info->queue_region_number) {
+ if (i > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return -rte_errno;
+ }
+
+ info->region[i].queue_num =
+ rss->num;
+ info->region[i].queue_start_index =
+ rss->queue[0];
+ info->region[i].region_id =
+ info->queue_region_number;
+
+ j = info->region[i].user_priority_num;
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] =
+ conf_info->
+ region[n].user_priority[0];
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] =
+ conf_info->
+ region[n].hw_flowtype[0];
+ info->region[i].flowtype_num++;
+ }
+ info->queue_region_number++;
+ } else {
+ j = info->region[i].user_priority_num;
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] =
+ conf_info->
+ region[n].user_priority[0];
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] =
+ conf_info->
+ region[n].hw_flowtype[0];
+ info->region[i].flowtype_num++;
+ }
+ }
+ }
+
+ rss_config->queue_region_conf = TRUE;
+ return 0;
+ }
+
+ if (!rss || !rss->num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+ if (rss->rss_conf)
+ rss_config->rss_conf = *rss->rss_conf;
+ else
+ rss_config->rss_conf.rss_hf =
+ pf->adapter->flow_types_mask;
+
+ for (n = 0; n < rss->num; ++n)
+ rss_config->queue[n] = rss->queue[n];
+ rss_config->num = rss->num;
+ index++;
+
+ /* check if the next not void action is END */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+ rss_config->queue_region_conf = FALSE;
+
+ return 0;
+}
+
+static int
+i40e_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ union i40e_filter_t *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct i40e_queue_regions info;
+ uint8_t action_flag = 0;
+
+ memset(&info, 0, sizeof(struct i40e_queue_regions));
+
+ ret = i40e_flow_parse_rss_pattern(dev, pattern,
+ error, &action_flag, &info);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_rss_action(dev, actions, error,
+ &action_flag, &info, filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_HASH;
+
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_set(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (conf->queue_region_conf) {
+ i40e_flush_queue_region_all_conf(dev, hw, pf, add);
+ conf->queue_region_conf = 0;
+ } else {
+ i40e_config_rss_filter(pf, conf, add);
+ }
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_del(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40e_flush_queue_region_all_conf(dev, hw, pf, add);
+
+ i40e_config_rss_filter(pf, conf, add);
+ return 0;
+}
+
+static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -4130,6 +4423,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
memset(&cons_filter, 0, sizeof(cons_filter));
+ /* Get the non-void item of action */
+ while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ i++;
+
+ if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ ret = i40e_parse_rss_filter(dev, attr, pattern,
+ actions, &cons_filter, error);
+ return ret;
+ }
+
+ i = 0;
/* Get the non-void item number of pattern */
while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
@@ -4217,6 +4521,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
i40e_tunnel_filter_list);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_set(dev,
+ &cons_filter.rss_conf, 1);
+ flow->rule = &pf->rss_info;
+ break;
default:
goto free_flow;
}
@@ -4255,6 +4564,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_flow_add_del_fdir_filter(dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_del(dev,
+ (struct i40e_rte_flow_rss_conf *)flow->rule, 0);
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -4397,6 +4709,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
+ ret = i40e_flow_flush_rss_filter(dev);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush rss flows.");
+ return -rte_errno;
+ }
+
return ret;
}
@@ -4487,3 +4807,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
return ret;
}
+
+/* remove the rss filter */
+static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret = -EINVAL;
+
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ if (rss_info->num)
+ ret = i40e_config_rss_filter(pf, rss_info, FALSE);
+ return ret;
+}
--
2.9.3
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
2017-11-24 8:43 [dpdk-dev] [PATCH] net/i40e: move RSS to flow API Wei Zhao
@ 2017-12-21 3:12 ` Zhang, Helin
2017-12-22 4:36 ` Zhang, Qi Z
2018-01-08 8:35 ` [dpdk-dev] [PATCH v2] " Wei Zhao
2 siblings, 0 replies; 17+ messages in thread
From: Zhang, Helin @ 2017-12-21 3:12 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Zhao1, Wei
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wei Zhao
> Sent: Friday, November 24, 2017 4:43 PM
> To: dev@dpdk.org
> Cc: Zhao1, Wei
> Subject: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
>
> Rte_flow actually defined to include RSS, but till now, RSS is out of rte_flow.
> This patch is to move i40e existing RSS to rte_flow.
> This patch also enable queue region configuration using flow API for i40e.
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
I saw some checkpatch warning reported, please help to fix it.
Thanks,
Helin
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
2017-11-24 8:43 [dpdk-dev] [PATCH] net/i40e: move RSS to flow API Wei Zhao
2017-12-21 3:12 ` Zhang, Helin
@ 2017-12-22 4:36 ` Zhang, Qi Z
2018-01-07 15:43 ` Zhang, Helin
` (2 more replies)
2018-01-08 8:35 ` [dpdk-dev] [PATCH v2] " Wei Zhao
2 siblings, 3 replies; 17+ messages in thread
From: Zhang, Qi Z @ 2017-12-22 4:36 UTC (permalink / raw)
To: Zhao1, Wei, dev; +Cc: Zhao1, Wei
Hi Wei:
Please check my comment below.
Besides, there some line over 80 character warning need to fix
Regards
Qi
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wei Zhao
> Sent: Friday, November 24, 2017 4:43 PM
> To: dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>
> Subject: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
>
> Rte_flow actually defined to include RSS, but till now, RSS is out of rte_flow.
> This patch is to move i40e existing RSS to rte_flow.
> This patch also enable queue region configuration using flow API for i40e.
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
> drivers/net/i40e/i40e_ethdev.c | 91 +++++++++++
> drivers/net/i40e/i40e_ethdev.h | 11 ++
> drivers/net/i40e/i40e_flow.c | 336
> +++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 438 insertions(+)
>
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 811cc9f..75b3bf3 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -1349,6 +1349,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
> /* initialize queue region configuration */
> i40e_init_queue_region_conf(dev);
>
> + /* initialize rss configuration from rte_flow */
> + memset(&pf->rss_info, 0,
> + sizeof(struct i40e_rte_flow_rss_conf));
> +
> return 0;
>
> err_init_fdir_filter_list:
> @@ -10943,12 +10947,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
> }
> }
>
> +/* Restore rss filter */
> +static inline void
> +i40e_rss_filter_restore(struct i40e_pf *pf) {
> + struct i40e_rte_flow_rss_conf *conf =
> + &pf->rss_info;
> + if (conf->num)
> + i40e_config_rss_filter(pf, conf, TRUE); }
> +
> static void
> i40e_filter_restore(struct i40e_pf *pf) {
> i40e_ethertype_filter_restore(pf);
> i40e_tunnel_filter_restore(pf);
> i40e_fdir_filter_restore(pf);
> + i40e_rss_filter_restore(pf);
> }
>
> static bool
> @@ -11366,6 +11381,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf
> *pf)
> return ret;
> }
>
> +int
> +i40e_config_rss_filter(struct i40e_pf *pf,
> + struct i40e_rte_flow_rss_conf *conf, bool add) {
> + struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> + uint32_t i, lut = 0;
> + uint16_t j, num;
> + struct rte_eth_rss_conf rss_conf = conf->rss_conf;
> + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> +
> + if (!add) {
> + if (memcmp(conf, rss_info,
> + sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
> + i40e_pf_disable_rss(pf);
> + memset(rss_info, 0,
> + sizeof(struct i40e_rte_flow_rss_conf));
> + return 0;
> + }
> + return -EINVAL;
> + }
> +
> + if (rss_info->num)
> + return -EINVAL;
> +
> + /* If both VMDQ and RSS enabled, not all of PF queues are configured.
> + * It's necessary to calculate the actual PF queues that are configured.
> + */
> + if (pf->dev_data->dev_conf.rxmode.mq_mode &
> ETH_MQ_RX_VMDQ_FLAG)
> + num = i40e_pf_calc_configured_queues_num(pf);
> + else
> + num = pf->dev_data->nb_rx_queues;
> +
> + num = RTE_MIN(num, conf->num);
> + PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are
> configured",
> + num);
> +
> + if (num == 0) {
> + PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
> + return -ENOTSUP;
> + }
> +
> + /* Fill in redirection table */
> + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
> + if (j == num)
> + j = 0;
> + lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
> + hw->func_caps.rss_table_entry_width) - 1));
> + if ((i & 3) == 3)
> + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
> + }
> +
> + if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
> + i40e_pf_disable_rss(pf);
> + return 0;
> + }
> + if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
> + (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
> + /* Random default keys */
> + static uint32_t rss_key_default[] = {0x6b793944,
> + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
> + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
> + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
> +
> + rss_conf.rss_key = (uint8_t *)rss_key_default;
> + rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
> + sizeof(uint32_t);
> + }
> +
> + return i40e_hw_rss_hash_set(pf, &rss_conf);
> +
> + rte_memcpy(rss_info,
> + conf, sizeof(struct i40e_rte_flow_rss_conf));
> +
> + return 0;
> +}
> +
> RTE_INIT(i40e_init_log);
> static void
> i40e_init_log(void)
> diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
> index cd67453..0a59e39 100644
> --- a/drivers/net/i40e/i40e_ethdev.h
> +++ b/drivers/net/i40e/i40e_ethdev.h
> @@ -891,6 +891,13 @@ struct i40e_customized_pctype {
> bool valid; /* Check if it's valid */
> };
>
> +struct i40e_rte_flow_rss_conf {
> + struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
> + uint16_t queue_region_conf; /**< Queue region config flag */
> + uint16_t num; /**< Number of entries in queue[]. */
> + uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */ };
> +
> /*
> * Structure to store private data specific for PF instance.
> */
> @@ -945,6 +952,7 @@ struct i40e_pf {
> struct i40e_fdir_info fdir; /* flow director info */
> struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
> struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
> + struct i40e_rte_flow_rss_conf rss_info; /* rss info */
> struct i40e_queue_regions queue_region; /* queue region info */
> struct i40e_fc_conf fc_conf; /* Flow control conf */
> struct i40e_mirror_rule_list mirror_list; @@ -1071,6 +1079,7 @@ union
> i40e_filter_t {
> struct i40e_fdir_filter_conf fdir_filter;
> struct rte_eth_tunnel_filter_conf tunnel_filter;
> struct i40e_tunnel_filter_conf consistent_tunnel_filter;
> + struct i40e_rte_flow_rss_conf rss_conf;
> };
>
> typedef int (*parse_filter_t)(struct rte_eth_dev *dev, @@ -1198,6 +1207,8
> @@ int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb); int
> i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
> struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on); void
> i40e_init_queue_region_conf(struct rte_eth_dev *dev);
> +int i40e_config_rss_filter(struct i40e_pf *pf,
> + struct i40e_rte_flow_rss_conf *conf, bool add);
>
> #define I40E_DEV_TO_PCI(eth_dev) \
> RTE_DEV_TO_PCI((eth_dev)->device)
> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index
> 7e4936e..e127f4c 100644
> --- a/drivers/net/i40e/i40e_flow.c
> +++ b/drivers/net/i40e/i40e_flow.c
> @@ -138,6 +138,8 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf
> *pf); static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf); static
> int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf); static int
> +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); static int
> i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
> const struct rte_flow_attr *attr,
> const struct rte_flow_item pattern[], @@ -4095,6
> +4097,297 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, }
>
> static int
> +i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
> + const struct rte_flow_item *pattern,
> + struct rte_flow_error *error,
> + uint8_t *action_flag,
> + struct i40e_queue_regions *info) {
> + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
> + const struct rte_flow_item *item = pattern;
> + enum rte_flow_item_type item_type;
> +
> + if (item->type == RTE_FLOW_ITEM_TYPE_END)
> + return 0;
> +
> + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> + if (item->last) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Not support range");
> + return -rte_errno;
> + }
> + item_type = item->type;
> + switch (item_type) {
> + case RTE_FLOW_ITEM_TYPE_ETH:
> + *action_flag = 1;
> + break;
> + case RTE_FLOW_ITEM_TYPE_VLAN:
> + vlan_spec =
> + (const struct rte_flow_item_vlan *)item->spec;
> + vlan_mask =
> + (const struct rte_flow_item_vlan *)item->mask;
> + if (vlan_spec && vlan_mask) {
> + if (vlan_mask->tci ==
> + rte_cpu_to_be_16(I40E_TCI_MASK)) {
> + info->region[0].user_priority[0] =
> + (vlan_spec->tci >> 13) & 0x7;
> + info->region[0].user_priority_num = 1;
> + info->queue_region_number = 1;
> + *action_flag = 0;
> + }
> + }
> + break;
> + default:
> + break;
> + }
> + }
> +
> + return 0;
[Qi:] The function only check item->last, besides seems all kinds of pattern sequence will be accept, this may not match device's capability.
I suggest to add more strict pattern check and more comment to explain the acceptable pattern.
> +}
> +
> +static int
> +i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
> + const struct rte_flow_action *actions,
> + struct rte_flow_error *error,
> + uint8_t *action_flag,
> + struct i40e_queue_regions *conf_info,
> + union i40e_filter_t *filter)
> +{
> + const struct rte_flow_action *act;
> + const struct rte_flow_action_rss *rss;
> + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + struct i40e_queue_regions *info = &pf->queue_region;
> + struct i40e_rte_flow_rss_conf *rss_config =
> + &filter->rss_conf;
> + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> + uint16_t i, j, n;
> + uint32_t index = 0;
> +
> + NEXT_ITEM_OF_ACTION(act, actions, index);
> + rss = (const struct rte_flow_action_rss *)act->conf;
> +
> + if (action_flag) {
> + for (n = 0; n < 64; n++) {
> + if (rss->rss_conf->rss_hf & (1 << n)) {
> + conf_info->region[0].user_priority[0] = n;
> + conf_info->region[0].user_priority_num = 1;
> + conf_info->queue_region_number = 1;
> + break;
> + }
> + }
> + }
[Qi:] Convert act->conf to struct rte_flow_action_rss and access its data should after you checked the act->type is RTE_FLOW_ACTION_TYPE_RSS,
So, it's better to switch place with following type check code.
> +
> + /**
> + * rss only supports forwarding,
> + * check if the first not void action is RSS.
> + */
> + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + act, "Not supported action.");
> + return -rte_errno;
> + }
> +
> + for (n = 0; n < conf_info->queue_region_number; n++) {
> + if (conf_info->region[n].user_priority_num ||
> + conf_info->region[n].flowtype_num) {
> + if (!((rte_is_power_of_2(rss->num)) &&
> + rss->num <= 64)) {
> + PMD_DRV_LOG(ERR, "The region sizes should be any of
> the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
> + "total number of queues do not exceed the VSI
> allocation");
> + return -rte_errno;
> + }
> +
> + if (conf_info->region[n].user_priority[n] >=
> + I40E_MAX_USER_PRIORITY) {
> + PMD_DRV_LOG(ERR, "the user priority max index is 7");
> + return -rte_errno;
> + }
> +
> + if (conf_info->region[n].hw_flowtype[n] >=
> + I40E_FILTER_PCTYPE_MAX) {
> + PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max
> index is 63");
> + return -rte_errno;
> + }
> +
> + if (rss_info->num < rss->num ||
> + rss_info->queue[0] < rss->queue[0] ||
> + (rss->queue[0] + rss->num >
> + rss_info->num + rss_info->queue[0])) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + act,
> + "no valid queues");
> + return -rte_errno;
> + }
> +
> + for (i = 0; i < info->queue_region_number; i++) {
> + if (info->region[i].queue_num == rss->num &&
> + info->region[i].queue_start_index ==
> + rss->queue[0])
> + break;
> + }
> +
> + if (i == info->queue_region_number) {
> + if (i > I40E_REGION_MAX_INDEX) {
> + PMD_DRV_LOG(ERR, "the queue region max index is
> 7");
> + return -rte_errno;
> + }
> +
> + info->region[i].queue_num =
> + rss->num;
> + info->region[i].queue_start_index =
> + rss->queue[0];
> + info->region[i].region_id =
> + info->queue_region_number;
> +
> + j = info->region[i].user_priority_num;
> + if (conf_info->region[n].user_priority_num) {
> + info->region[i].user_priority[j] =
> + conf_info->
> + region[n].user_priority[0];
> + info->region[i].user_priority_num++;
> + }
> +
> + j = info->region[i].flowtype_num;
> + if (conf_info->region[n].flowtype_num) {
> + info->region[i].hw_flowtype[j] =
> + conf_info->
> + region[n].hw_flowtype[0];
> + info->region[i].flowtype_num++;
> + }
> + info->queue_region_number++;
> + } else {
> + j = info->region[i].user_priority_num;
> + if (conf_info->region[n].user_priority_num) {
> + info->region[i].user_priority[j] =
> + conf_info->
> + region[n].user_priority[0];
> + info->region[i].user_priority_num++;
> + }
> +
> + j = info->region[i].flowtype_num;
> + if (conf_info->region[n].flowtype_num) {
> + info->region[i].hw_flowtype[j] =
> + conf_info->
> + region[n].hw_flowtype[0];
> + info->region[i].flowtype_num++;
> + }
> + }
> + }
> +
> + rss_config->queue_region_conf = TRUE;
> + return 0;
> + }
> +
> + if (!rss || !rss->num) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + act,
> + "no valid queues");
> + return -rte_errno;
> + }
> +
> + for (n = 0; n < rss->num; n++) {
> + if (rss->queue[n] >= dev->data->nb_rx_queues) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + act,
> + "queue id > max number of queues");
> + return -rte_errno;
> + }
> + }
> + if (rss->rss_conf)
> + rss_config->rss_conf = *rss->rss_conf;
> + else
> + rss_config->rss_conf.rss_hf =
> + pf->adapter->flow_types_mask;
> +
> + for (n = 0; n < rss->num; ++n)
> + rss_config->queue[n] = rss->queue[n];
> + rss_config->num = rss->num;
> + index++;
> +
> + /* check if the next not void action is END */
> + NEXT_ITEM_OF_ACTION(act, actions, index);
> + if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + act, "Not supported action.");
> + return -rte_errno;
> + }
> + rss_config->queue_region_conf = FALSE;
> +
> + return 0;
> +}
> +
> +static int
> +i40e_parse_rss_filter(struct rte_eth_dev *dev,
> + const struct rte_flow_attr *attr,
> + const struct rte_flow_item pattern[],
> + const struct rte_flow_action actions[],
> + union i40e_filter_t *filter,
> + struct rte_flow_error *error)
> +{
> + int ret;
> + struct i40e_queue_regions info;
> + uint8_t action_flag = 0;
> +
> + memset(&info, 0, sizeof(struct i40e_queue_regions));
> +
> + ret = i40e_flow_parse_rss_pattern(dev, pattern,
> + error, &action_flag, &info);
> + if (ret)
> + return ret;
> +
> + ret = i40e_flow_parse_rss_action(dev, actions, error,
> + &action_flag, &info, filter);
> + if (ret)
> + return ret;
> +
> + ret = i40e_flow_parse_attr(attr, error);
> + if (ret)
> + return ret;
> +
> + cons_filter_type = RTE_ETH_FILTER_HASH;
> +
> + return 0;
> +}
> +
> +static int
> +i40e_config_rss_filter_set(struct rte_eth_dev *dev,
> + struct i40e_rte_flow_rss_conf *conf, bool add) {
[Qi:] why parameter "add", it is always set with 1 and we have i40e_config_rss_filter_del with add = 0.
> + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + struct i40e_hw *hw =
> I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + if (conf->queue_region_conf) {
> + i40e_flush_queue_region_all_conf(dev, hw, pf, add);
> + conf->queue_region_conf = 0;
> + } else {
> + i40e_config_rss_filter(pf, conf, add);
> + }
> + return 0;
> +}
> +
> +static int
> +i40e_config_rss_filter_del(struct rte_eth_dev *dev,
> + struct i40e_rte_flow_rss_conf *conf, bool add) {
[Qi:] same question for "add".
> + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + struct i40e_hw *hw =
> I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + i40e_flush_queue_region_all_conf(dev, hw, pf, add);
> +
> + i40e_config_rss_filter(pf, conf, add);
> + return 0;
> +}
> +
> +static int
> i40e_flow_validate(struct rte_eth_dev *dev,
> const struct rte_flow_attr *attr,
> const struct rte_flow_item pattern[], @@ -4130,6 +4423,17 @@
> i40e_flow_validate(struct rte_eth_dev *dev,
>
> memset(&cons_filter, 0, sizeof(cons_filter));
>
> + /* Get the non-void item of action */
> + while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
> + i++;
> +
> + if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
> + ret = i40e_parse_rss_filter(dev, attr, pattern,
> + actions, &cons_filter, error);
> + return ret;
> + }
> +
> + i = 0;
> /* Get the non-void item number of pattern */
> while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
> if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) @@ -4217,6
> +4521,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
> flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
> i40e_tunnel_filter_list);
> break;
> + case RTE_ETH_FILTER_HASH:
> + ret = i40e_config_rss_filter_set(dev,
> + &cons_filter.rss_conf, 1);
> + flow->rule = &pf->rss_info;
> + break;
> default:
> goto free_flow;
> }
> @@ -4255,6 +4564,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
> ret = i40e_flow_add_del_fdir_filter(dev,
> &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
> break;
> + case RTE_ETH_FILTER_HASH:
> + ret = i40e_config_rss_filter_del(dev,
> + (struct i40e_rte_flow_rss_conf *)flow->rule, 0);
> default:
> PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> filter_type);
> @@ -4397,6 +4709,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct
> rte_flow_error *error)
> return -rte_errno;
> }
>
> + ret = i40e_flow_flush_rss_filter(dev);
> + if (ret) {
> + rte_flow_error_set(error, -ret,
> + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> + "Failed to flush rss flows.");
> + return -rte_errno;
> + }
> +
> return ret;
> }
>
> @@ -4487,3 +4807,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
>
> return ret;
> }
> +
> +/* remove the rss filter */
> +static int
> +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) {
> + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> + struct i40e_hw *hw =
> I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + int32_t ret = -EINVAL;
> +
> + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
> +
> + if (rss_info->num)
> + ret = i40e_config_rss_filter(pf, rss_info, FALSE);
> + return ret;
> +}
> --
> 2.9.3
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
2017-12-22 4:36 ` Zhang, Qi Z
@ 2018-01-07 15:43 ` Zhang, Helin
2018-01-08 1:53 ` Zhao1, Wei
2018-01-08 8:30 ` Zhao1, Wei
2018-01-08 8:33 ` Zhao1, Wei
2 siblings, 1 reply; 17+ messages in thread
From: Zhang, Helin @ 2018-01-07 15:43 UTC (permalink / raw)
To: Zhang, Qi Z, Zhao1, Wei, dev; +Cc: Zhao1, Wei
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhang, Qi Z
> Sent: Friday, December 22, 2017 12:36 PM
> To: Zhao1, Wei; dev@dpdk.org
> Cc: Zhao1, Wei
> Subject: Re: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
>
> Hi Wei:
>
> Please check my comment below.
> Besides, there some line over 80 character warning need to fix
>
> Regards
> Qi
>
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wei Zhao
> > Sent: Friday, November 24, 2017 4:43 PM
> > To: dev@dpdk.org
> > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > Subject: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
> >
> > Rte_flow actually defined to include RSS, but till now, RSS is out of rte_flow.
> > This patch is to move i40e existing RSS to rte_flow.
> > This patch also enable queue region configuration using flow API for i40e.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> > drivers/net/i40e/i40e_ethdev.c | 91 +++++++++++
> > drivers/net/i40e/i40e_ethdev.h | 11 ++
> > drivers/net/i40e/i40e_flow.c | 336
> > +++++++++++++++++++++++++++++++++++++++++
> > 3 files changed, 438 insertions(+)
Have you addressed all the comments? Did I miss anything?
In addition, I think doc update is needed. e.g. doc/nics/i40e.rst or similar?
Thanks,
Helin
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
2018-01-07 15:43 ` Zhang, Helin
@ 2018-01-08 1:53 ` Zhao1, Wei
0 siblings, 0 replies; 17+ messages in thread
From: Zhao1, Wei @ 2018-01-08 1:53 UTC (permalink / raw)
To: Zhang, Helin, Zhang, Qi Z, dev
Yes, I will commit a new version today.
> -----Original Message-----
> From: Zhang, Helin
> Sent: Sunday, January 7, 2018 11:43 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Zhao1, Wei <wei.zhao1@intel.com>;
> dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>
> Subject: RE: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
>
>
>
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhang, Qi Z
> > Sent: Friday, December 22, 2017 12:36 PM
> > To: Zhao1, Wei; dev@dpdk.org
> > Cc: Zhao1, Wei
> > Subject: Re: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
> >
> > Hi Wei:
> >
> > Please check my comment below.
> > Besides, there some line over 80 character warning need to fix
> >
> > Regards
> > Qi
> >
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wei Zhao
> > > Sent: Friday, November 24, 2017 4:43 PM
> > > To: dev@dpdk.org
> > > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > > Subject: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
> > >
> > > Rte_flow actually defined to include RSS, but till now, RSS is out of
> rte_flow.
> > > This patch is to move i40e existing RSS to rte_flow.
> > > This patch also enable queue region configuration using flow API for i40e.
> > >
> > > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > > ---
> > > drivers/net/i40e/i40e_ethdev.c | 91 +++++++++++
> > > drivers/net/i40e/i40e_ethdev.h | 11 ++
> > > drivers/net/i40e/i40e_flow.c | 336
> > > +++++++++++++++++++++++++++++++++++++++++
> > > 3 files changed, 438 insertions(+)
>
> Have you addressed all the comments? Did I miss anything?
> In addition, I think doc update is needed. e.g. doc/nics/i40e.rst or similar?
>
> Thanks,
> Helin
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
2017-12-22 4:36 ` Zhang, Qi Z
2018-01-07 15:43 ` Zhang, Helin
@ 2018-01-08 8:30 ` Zhao1, Wei
2018-01-08 8:33 ` Zhao1, Wei
2 siblings, 0 replies; 17+ messages in thread
From: Zhao1, Wei @ 2018-01-08 8:30 UTC (permalink / raw)
To: Zhang, Qi Z, dev
Hi, zhangqi
> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Friday, December 22, 2017 12:36 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>
> Subject: RE: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
>
> Hi Wei:
>
> Please check my comment below.
> Besides, there some line over 80 character warning need to fix
>
> Regards
> Qi
>
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wei Zhao
> > Sent: Friday, November 24, 2017 4:43 PM
> > To: dev@dpdk.org
> > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > Subject: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
> >
> > Rte_flow actually defined to include RSS, but till now, RSS is out of rte_flow.
> > This patch is to move i40e existing RSS to rte_flow.
> > This patch also enable queue region configuration using flow API for i40e.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> > drivers/net/i40e/i40e_ethdev.c | 91 +++++++++++
> > drivers/net/i40e/i40e_ethdev.h | 11 ++
> > drivers/net/i40e/i40e_flow.c | 336
> > +++++++++++++++++++++++++++++++++++++++++
> > 3 files changed, 438 insertions(+)
> >
> > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > b/drivers/net/i40e/i40e_ethdev.c index 811cc9f..75b3bf3 100644
> > --- a/drivers/net/i40e/i40e_ethdev.c
> > +++ b/drivers/net/i40e/i40e_ethdev.c
> > @@ -1349,6 +1349,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
> > /* initialize queue region configuration */
> > i40e_init_queue_region_conf(dev);
> >
> > + /* initialize rss configuration from rte_flow */
> > + memset(&pf->rss_info, 0,
> > + sizeof(struct i40e_rte_flow_rss_conf));
> > +
> > return 0;
> >
> > err_init_fdir_filter_list:
> > @@ -10943,12 +10947,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
> > }
> > }
> >
> > +/* Restore rss filter */
> > +static inline void
> > +i40e_rss_filter_restore(struct i40e_pf *pf) {
> > + struct i40e_rte_flow_rss_conf *conf =
> > + &pf->rss_info;
> > + if (conf->num)
> > + i40e_config_rss_filter(pf, conf, TRUE); }
> > +
> > static void
> > i40e_filter_restore(struct i40e_pf *pf) {
> > i40e_ethertype_filter_restore(pf);
> > i40e_tunnel_filter_restore(pf);
> > i40e_fdir_filter_restore(pf);
> > + i40e_rss_filter_restore(pf);
> > }
> >
> > static bool
> > @@ -11366,6 +11381,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf
> > *pf)
> > return ret;
> > }
> >
> > +int
> > +i40e_config_rss_filter(struct i40e_pf *pf,
> > + struct i40e_rte_flow_rss_conf *conf, bool add) {
> > + struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> > + uint32_t i, lut = 0;
> > + uint16_t j, num;
> > + struct rte_eth_rss_conf rss_conf = conf->rss_conf;
> > + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > +
> > + if (!add) {
> > + if (memcmp(conf, rss_info,
> > + sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
> > + i40e_pf_disable_rss(pf);
> > + memset(rss_info, 0,
> > + sizeof(struct i40e_rte_flow_rss_conf));
> > + return 0;
> > + }
> > + return -EINVAL;
> > + }
> > +
> > + if (rss_info->num)
> > + return -EINVAL;
> > +
> > + /* If both VMDQ and RSS enabled, not all of PF queues are
> configured.
> > + * It's necessary to calculate the actual PF queues that are configured.
> > + */
> > + if (pf->dev_data->dev_conf.rxmode.mq_mode &
> > ETH_MQ_RX_VMDQ_FLAG)
> > + num = i40e_pf_calc_configured_queues_num(pf);
> > + else
> > + num = pf->dev_data->nb_rx_queues;
> > +
> > + num = RTE_MIN(num, conf->num);
> > + PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are
> > configured",
> > + num);
> > +
> > + if (num == 0) {
> > + PMD_DRV_LOG(ERR, "No PF queues are configured to
> enable RSS");
> > + return -ENOTSUP;
> > + }
> > +
> > + /* Fill in redirection table */
> > + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
> > + if (j == num)
> > + j = 0;
> > + lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
> > + hw->func_caps.rss_table_entry_width) - 1));
> > + if ((i & 3) == 3)
> > + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
> > + }
> > +
> > + if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
> > + i40e_pf_disable_rss(pf);
> > + return 0;
> > + }
> > + if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
> > + (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
> > + /* Random default keys */
> > + static uint32_t rss_key_default[] = {0x6b793944,
> > + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
> > + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
> > + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
> > +
> > + rss_conf.rss_key = (uint8_t *)rss_key_default;
> > + rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
> > + sizeof(uint32_t);
> > + }
> > +
> > + return i40e_hw_rss_hash_set(pf, &rss_conf);
> > +
> > + rte_memcpy(rss_info,
> > + conf, sizeof(struct i40e_rte_flow_rss_conf));
> > +
> > + return 0;
> > +}
> > +
> > RTE_INIT(i40e_init_log);
> > static void
> > i40e_init_log(void)
> > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > b/drivers/net/i40e/i40e_ethdev.h index cd67453..0a59e39 100644
> > --- a/drivers/net/i40e/i40e_ethdev.h
> > +++ b/drivers/net/i40e/i40e_ethdev.h
> > @@ -891,6 +891,13 @@ struct i40e_customized_pctype {
> > bool valid; /* Check if it's valid */
> > };
> >
> > +struct i40e_rte_flow_rss_conf {
> > + struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
> > + uint16_t queue_region_conf; /**< Queue region config flag */
> > + uint16_t num; /**< Number of entries in queue[]. */
> > + uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use.
> */ };
> > +
> > /*
> > * Structure to store private data specific for PF instance.
> > */
> > @@ -945,6 +952,7 @@ struct i40e_pf {
> > struct i40e_fdir_info fdir; /* flow director info */
> > struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
> > struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
> > + struct i40e_rte_flow_rss_conf rss_info; /* rss info */
> > struct i40e_queue_regions queue_region; /* queue region info */
> > struct i40e_fc_conf fc_conf; /* Flow control conf */
> > struct i40e_mirror_rule_list mirror_list; @@ -1071,6 +1079,7 @@
> > union i40e_filter_t {
> > struct i40e_fdir_filter_conf fdir_filter;
> > struct rte_eth_tunnel_filter_conf tunnel_filter;
> > struct i40e_tunnel_filter_conf consistent_tunnel_filter;
> > + struct i40e_rte_flow_rss_conf rss_conf;
> > };
> >
> > typedef int (*parse_filter_t)(struct rte_eth_dev *dev, @@ -1198,6
> > +1207,8 @@ int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool
> > sw_dcb); int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
> > struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on); void
> > i40e_init_queue_region_conf(struct rte_eth_dev *dev);
> > +int i40e_config_rss_filter(struct i40e_pf *pf,
> > + struct i40e_rte_flow_rss_conf *conf, bool add);
> >
> > #define I40E_DEV_TO_PCI(eth_dev) \
> > RTE_DEV_TO_PCI((eth_dev)->device)
> > diff --git a/drivers/net/i40e/i40e_flow.c
> > b/drivers/net/i40e/i40e_flow.c index 7e4936e..e127f4c 100644
> > --- a/drivers/net/i40e/i40e_flow.c
> > +++ b/drivers/net/i40e/i40e_flow.c
> > @@ -138,6 +138,8 @@ static int i40e_flow_flush_fdir_filter(struct
> > i40e_pf *pf); static int i40e_flow_flush_ethertype_filter(struct
> > i40e_pf *pf); static int i40e_flow_flush_tunnel_filter(struct i40e_pf
> > *pf); static int
> > +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); static int
> > i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
> > const struct rte_flow_attr *attr,
> > const struct rte_flow_item pattern[], @@ -4095,6
> > +4097,297 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, }
> >
> > static int
> > +i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
> > + const struct rte_flow_item *pattern,
> > + struct rte_flow_error *error,
> > + uint8_t *action_flag,
> > + struct i40e_queue_regions *info) {
> > + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
> > + const struct rte_flow_item *item = pattern;
> > + enum rte_flow_item_type item_type;
> > +
> > + if (item->type == RTE_FLOW_ITEM_TYPE_END)
> > + return 0;
> > +
> > + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > + if (item->last) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Not support range");
> > + return -rte_errno;
> > + }
> > + item_type = item->type;
> > + switch (item_type) {
> > + case RTE_FLOW_ITEM_TYPE_ETH:
> > + *action_flag = 1;
> > + break;
> > + case RTE_FLOW_ITEM_TYPE_VLAN:
> > + vlan_spec =
> > + (const struct rte_flow_item_vlan *)item-
> >spec;
> > + vlan_mask =
> > + (const struct rte_flow_item_vlan *)item-
> >mask;
> > + if (vlan_spec && vlan_mask) {
> > + if (vlan_mask->tci ==
> > + rte_cpu_to_be_16(I40E_TCI_MASK))
> {
> > + info->region[0].user_priority[0] =
> > + (vlan_spec->tci >> 13) & 0x7;
> > + info->region[0].user_priority_num =
> 1;
> > + info->queue_region_number = 1;
> > + *action_flag = 0;
> > + }
> > + }
> > + break;
> > + default:
> > + break;
> > + }
> > + }
> > +
> > + return 0;
>
> [Qi:] The function only check item->last, besides seems all kinds of pattern
> sequence will be accept, this may not match device's capability.
> I suggest to add more strict pattern check and more comment to explain the
> acceptable pattern.
Maybe, I should add
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Not support range");
return -rte_errno;
after "default:", because rss config do not need any other pattern.
VLAN is only for queue region configuration.
> > +
> > +static int
> > +i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
> > + const struct rte_flow_action *actions,
> > + struct rte_flow_error *error,
> > + uint8_t *action_flag,
> > + struct i40e_queue_regions *conf_info,
> > + union i40e_filter_t *filter)
> > +{
> > + const struct rte_flow_action *act;
> > + const struct rte_flow_action_rss *rss;
> > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > + struct i40e_queue_regions *info = &pf->queue_region;
> > + struct i40e_rte_flow_rss_conf *rss_config =
> > + &filter->rss_conf;
> > + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > + uint16_t i, j, n;
> > + uint32_t index = 0;
> > +
> > + NEXT_ITEM_OF_ACTION(act, actions, index);
> > + rss = (const struct rte_flow_action_rss *)act->conf;
> > +
> > + if (action_flag) {
> > + for (n = 0; n < 64; n++) {
> > + if (rss->rss_conf->rss_hf & (1 << n)) {
> > + conf_info->region[0].user_priority[0] = n;
> > + conf_info->region[0].user_priority_num = 1;
> > + conf_info->queue_region_number = 1;
> > + break;
> > + }
> > + }
> > + }
>
> [Qi:] Convert act->conf to struct rte_flow_action_rss and access its data
> should after you checked the act->type is RTE_FLOW_ACTION_TYPE_RSS, So,
> it's better to switch place with following type check code.
Ok, change in v2
> > +
> > + /**
> > + * rss only supports forwarding,
> > + * check if the first not void action is RSS.
> > + */
> > + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> > + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION,
> > + act, "Not supported action.");
> > + return -rte_errno;
> > + }
> > +
> > + for (n = 0; n < conf_info->queue_region_number; n++) {
> > + if (conf_info->region[n].user_priority_num ||
> > + conf_info->region[n].flowtype_num) {
> > + if (!((rte_is_power_of_2(rss->num)) &&
> > + rss->num <= 64)) {
> > + PMD_DRV_LOG(ERR, "The region sizes
> should be any of
> > the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
> > + "total number of queues do not exceed the
> VSI
> > allocation");
> > + return -rte_errno;
> > + }
> > +
> > + if (conf_info->region[n].user_priority[n] >=
> > + I40E_MAX_USER_PRIORITY) {
> > + PMD_DRV_LOG(ERR, "the user priority max
> index is 7");
> > + return -rte_errno;
> > + }
> > +
> > + if (conf_info->region[n].hw_flowtype[n] >=
> > + I40E_FILTER_PCTYPE_MAX) {
> > + PMD_DRV_LOG(ERR, "the hw_flowtype or
> PCTYPE max
> > index is 63");
> > + return -rte_errno;
> > + }
> > +
> > + if (rss_info->num < rss->num ||
> > + rss_info->queue[0] < rss->queue[0] ||
> > + (rss->queue[0] + rss->num >
> > + rss_info->num + rss_info->queue[0]))
> {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION,
> > + act,
> > + "no valid queues");
> > + return -rte_errno;
> > + }
> > +
> > + for (i = 0; i < info->queue_region_number; i++) {
> > + if (info->region[i].queue_num == rss->num
> &&
> > + info->region[i].queue_start_index ==
> > + rss->queue[0])
> > + break;
> > + }
> > +
> > + if (i == info->queue_region_number) {
> > + if (i > I40E_REGION_MAX_INDEX) {
> > + PMD_DRV_LOG(ERR, "the queue
> region max index is
> > 7");
> > + return -rte_errno;
> > + }
> > +
> > + info->region[i].queue_num =
> > + rss->num;
> > + info->region[i].queue_start_index =
> > + rss->queue[0];
> > + info->region[i].region_id =
> > + info->queue_region_number;
> > +
> > + j = info->region[i].user_priority_num;
> > + if (conf_info->region[n].user_priority_num) {
> > + info->region[i].user_priority[j] =
> > + conf_info->
> > + region[n].user_priority[0];
> > + info->region[i].user_priority_num++;
> > + }
> > +
> > + j = info->region[i].flowtype_num;
> > + if (conf_info->region[n].flowtype_num) {
> > + info->region[i].hw_flowtype[j] =
> > + conf_info->
> > + region[n].hw_flowtype[0];
> > + info->region[i].flowtype_num++;
> > + }
> > + info->queue_region_number++;
> > + } else {
> > + j = info->region[i].user_priority_num;
> > + if (conf_info->region[n].user_priority_num) {
> > + info->region[i].user_priority[j] =
> > + conf_info->
> > + region[n].user_priority[0];
> > + info->region[i].user_priority_num++;
> > + }
> > +
> > + j = info->region[i].flowtype_num;
> > + if (conf_info->region[n].flowtype_num) {
> > + info->region[i].hw_flowtype[j] =
> > + conf_info->
> > + region[n].hw_flowtype[0];
> > + info->region[i].flowtype_num++;
> > + }
> > + }
> > + }
> > +
> > + rss_config->queue_region_conf = TRUE;
> > + return 0;
> > + }
> > +
> > + if (!rss || !rss->num) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION,
> > + act,
> > + "no valid queues");
> > + return -rte_errno;
> > + }
> > +
> > + for (n = 0; n < rss->num; n++) {
> > + if (rss->queue[n] >= dev->data->nb_rx_queues) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION,
> > + act,
> > + "queue id > max number of queues");
> > + return -rte_errno;
> > + }
> > + }
> > + if (rss->rss_conf)
> > + rss_config->rss_conf = *rss->rss_conf;
> > + else
> > + rss_config->rss_conf.rss_hf =
> > + pf->adapter->flow_types_mask;
> > +
> > + for (n = 0; n < rss->num; ++n)
> > + rss_config->queue[n] = rss->queue[n];
> > + rss_config->num = rss->num;
> > + index++;
> > +
> > + /* check if the next not void action is END */
> > + NEXT_ITEM_OF_ACTION(act, actions, index);
> > + if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> > + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION,
> > + act, "Not supported action.");
> > + return -rte_errno;
> > + }
> > + rss_config->queue_region_conf = FALSE;
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +i40e_parse_rss_filter(struct rte_eth_dev *dev,
> > + const struct rte_flow_attr *attr,
> > + const struct rte_flow_item pattern[],
> > + const struct rte_flow_action actions[],
> > + union i40e_filter_t *filter,
> > + struct rte_flow_error *error)
> > +{
> > + int ret;
> > + struct i40e_queue_regions info;
> > + uint8_t action_flag = 0;
> > +
> > + memset(&info, 0, sizeof(struct i40e_queue_regions));
> > +
> > + ret = i40e_flow_parse_rss_pattern(dev, pattern,
> > + error, &action_flag, &info);
> > + if (ret)
> > + return ret;
> > +
> > + ret = i40e_flow_parse_rss_action(dev, actions, error,
> > + &action_flag, &info, filter);
> > + if (ret)
> > + return ret;
> > +
> > + ret = i40e_flow_parse_attr(attr, error);
> > + if (ret)
> > + return ret;
> > +
> > + cons_filter_type = RTE_ETH_FILTER_HASH;
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +i40e_config_rss_filter_set(struct rte_eth_dev *dev,
> > + struct i40e_rte_flow_rss_conf *conf, bool add) {
>
> [Qi:] why parameter "add", it is always set with 1 and we have
> i40e_config_rss_filter_del with add = 0.
Good idea, I will delete that parameter in v2, it is useless seems.
>
> > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > + struct i40e_hw *hw =
> > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +
> > + if (conf->queue_region_conf) {
> > + i40e_flush_queue_region_all_conf(dev, hw, pf, add);
> > + conf->queue_region_conf = 0;
> > + } else {
> > + i40e_config_rss_filter(pf, conf, add);
> > + }
> > + return 0;
> > +}
> > +
> > +static int
> > +i40e_config_rss_filter_del(struct rte_eth_dev *dev,
> > + struct i40e_rte_flow_rss_conf *conf, bool add) {
>
> [Qi:] same question for "add".
Good idea, I will delete that parameter in v2, it is useless seems.
>
> > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > + struct i40e_hw *hw =
> > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +
> > + i40e_flush_queue_region_all_conf(dev, hw, pf, add);
> > +
> > + i40e_config_rss_filter(pf, conf, add);
> > + return 0;
> > +}
> > +
> > +static int
> > i40e_flow_validate(struct rte_eth_dev *dev,
> > const struct rte_flow_attr *attr,
> > const struct rte_flow_item pattern[], @@ -4130,6 +4423,17
> @@
> > i40e_flow_validate(struct rte_eth_dev *dev,
> >
> > memset(&cons_filter, 0, sizeof(cons_filter));
> >
> > + /* Get the non-void item of action */
> > + while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
> > + i++;
> > +
> > + if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
> > + ret = i40e_parse_rss_filter(dev, attr, pattern,
> > + actions, &cons_filter, error);
> > + return ret;
> > + }
> > +
> > + i = 0;
> > /* Get the non-void item number of pattern */
> > while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
> > if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) @@ -
> 4217,6
> > +4521,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
> > flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
> > i40e_tunnel_filter_list);
> > break;
> > + case RTE_ETH_FILTER_HASH:
> > + ret = i40e_config_rss_filter_set(dev,
> > + &cons_filter.rss_conf, 1);
> > + flow->rule = &pf->rss_info;
> > + break;
> > default:
> > goto free_flow;
> > }
> > @@ -4255,6 +4564,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
> > ret = i40e_flow_add_del_fdir_filter(dev,
> > &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
> > break;
> > + case RTE_ETH_FILTER_HASH:
> > + ret = i40e_config_rss_filter_del(dev,
> > + (struct i40e_rte_flow_rss_conf *)flow->rule, 0);
> > default:
> > PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> > filter_type);
> > @@ -4397,6 +4709,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct
> > rte_flow_error *error)
> > return -rte_errno;
> > }
> >
> > + ret = i40e_flow_flush_rss_filter(dev);
> > + if (ret) {
> > + rte_flow_error_set(error, -ret,
> > + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > + "Failed to flush rss flows.");
> > + return -rte_errno;
> > + }
> > +
> > return ret;
> > }
> >
> > @@ -4487,3 +4807,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf
> > *pf)
> >
> > return ret;
> > }
> > +
> > +/* remove the rss filter */
> > +static int
> > +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) {
> > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > + struct i40e_hw *hw =
> > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > + int32_t ret = -EINVAL;
> > +
> > + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
> > +
> > + if (rss_info->num)
> > + ret = i40e_config_rss_filter(pf, rss_info, FALSE);
> > + return ret;
> > +}
> > --
> > 2.9.3
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
2017-12-22 4:36 ` Zhang, Qi Z
2018-01-07 15:43 ` Zhang, Helin
2018-01-08 8:30 ` Zhao1, Wei
@ 2018-01-08 8:33 ` Zhao1, Wei
2 siblings, 0 replies; 17+ messages in thread
From: Zhao1, Wei @ 2018-01-08 8:33 UTC (permalink / raw)
To: Zhang, Qi Z, dev
Hi, zhangqi
> -----Original Message-----
> From: Zhao1, Wei
> Sent: Monday, January 8, 2018 4:30 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
>
> Hi, zhangqi
>
> > -----Original Message-----
> > From: Zhang, Qi Z
> > Sent: Friday, December 22, 2017 12:36 PM
> > To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > Subject: RE: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
> >
> > Hi Wei:
> >
> > Please check my comment below.
> > Besides, there some line over 80 character warning need to fix
Log message which is in double quotation marks will not be break into 2 lines, because Yigit has support this type message to over 80 characters.
> >
> > Regards
> > Qi
> >
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Wei Zhao
> > > Sent: Friday, November 24, 2017 4:43 PM
> > > To: dev@dpdk.org
> > > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > > Subject: [dpdk-dev] [PATCH] net/i40e: move RSS to flow API
> > >
> > > Rte_flow actually defined to include RSS, but till now, RSS is out of
> rte_flow.
> > > This patch is to move i40e existing RSS to rte_flow.
> > > This patch also enable queue region configuration using flow API for i40e.
> > >
> > > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > > ---
> > > drivers/net/i40e/i40e_ethdev.c | 91 +++++++++++
> > > drivers/net/i40e/i40e_ethdev.h | 11 ++
> > > drivers/net/i40e/i40e_flow.c | 336
> > > +++++++++++++++++++++++++++++++++++++++++
> > > 3 files changed, 438 insertions(+)
> > >
> > > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > > b/drivers/net/i40e/i40e_ethdev.c index 811cc9f..75b3bf3 100644
> > > --- a/drivers/net/i40e/i40e_ethdev.c
> > > +++ b/drivers/net/i40e/i40e_ethdev.c
> > > @@ -1349,6 +1349,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
> > > /* initialize queue region configuration */
> > > i40e_init_queue_region_conf(dev);
> > >
> > > + /* initialize rss configuration from rte_flow */
> > > + memset(&pf->rss_info, 0,
> > > + sizeof(struct i40e_rte_flow_rss_conf));
> > > +
> > > return 0;
> > >
> > > err_init_fdir_filter_list:
> > > @@ -10943,12 +10947,23 @@ i40e_tunnel_filter_restore(struct i40e_pf
> *pf)
> > > }
> > > }
> > >
> > > +/* Restore rss filter */
> > > +static inline void
> > > +i40e_rss_filter_restore(struct i40e_pf *pf) {
> > > + struct i40e_rte_flow_rss_conf *conf =
> > > + &pf->rss_info;
> > > + if (conf->num)
> > > + i40e_config_rss_filter(pf, conf, TRUE); }
> > > +
> > > static void
> > > i40e_filter_restore(struct i40e_pf *pf) {
> > > i40e_ethertype_filter_restore(pf);
> > > i40e_tunnel_filter_restore(pf);
> > > i40e_fdir_filter_restore(pf);
> > > + i40e_rss_filter_restore(pf);
> > > }
> > >
> > > static bool
> > > @@ -11366,6 +11381,82 @@ i40e_cloud_filter_qinq_create(struct
> > > i40e_pf
> > > *pf)
> > > return ret;
> > > }
> > >
> > > +int
> > > +i40e_config_rss_filter(struct i40e_pf *pf,
> > > + struct i40e_rte_flow_rss_conf *conf, bool add) {
> > > + struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> > > + uint32_t i, lut = 0;
> > > + uint16_t j, num;
> > > + struct rte_eth_rss_conf rss_conf = conf->rss_conf;
> > > + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > > +
> > > + if (!add) {
> > > + if (memcmp(conf, rss_info,
> > > + sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
> > > + i40e_pf_disable_rss(pf);
> > > + memset(rss_info, 0,
> > > + sizeof(struct i40e_rte_flow_rss_conf));
> > > + return 0;
> > > + }
> > > + return -EINVAL;
> > > + }
> > > +
> > > + if (rss_info->num)
> > > + return -EINVAL;
> > > +
> > > + /* If both VMDQ and RSS enabled, not all of PF queues are
> > configured.
> > > + * It's necessary to calculate the actual PF queues that are configured.
> > > + */
> > > + if (pf->dev_data->dev_conf.rxmode.mq_mode &
> > > ETH_MQ_RX_VMDQ_FLAG)
> > > + num = i40e_pf_calc_configured_queues_num(pf);
> > > + else
> > > + num = pf->dev_data->nb_rx_queues;
> > > +
> > > + num = RTE_MIN(num, conf->num);
> > > + PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are
> > > configured",
> > > + num);
> > > +
> > > + if (num == 0) {
> > > + PMD_DRV_LOG(ERR, "No PF queues are configured to
> > enable RSS");
> > > + return -ENOTSUP;
> > > + }
> > > +
> > > + /* Fill in redirection table */
> > > + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
> > > + if (j == num)
> > > + j = 0;
> > > + lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
> > > + hw->func_caps.rss_table_entry_width) - 1));
> > > + if ((i & 3) == 3)
> > > + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
> > > + }
> > > +
> > > + if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
> > > + i40e_pf_disable_rss(pf);
> > > + return 0;
> > > + }
> > > + if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
> > > + (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
> > > + /* Random default keys */
> > > + static uint32_t rss_key_default[] = {0x6b793944,
> > > + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
> > > + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
> > > + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
> > > +
> > > + rss_conf.rss_key = (uint8_t *)rss_key_default;
> > > + rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
> > > + sizeof(uint32_t);
> > > + }
> > > +
> > > + return i40e_hw_rss_hash_set(pf, &rss_conf);
> > > +
> > > + rte_memcpy(rss_info,
> > > + conf, sizeof(struct i40e_rte_flow_rss_conf));
> > > +
> > > + return 0;
> > > +}
> > > +
> > > RTE_INIT(i40e_init_log);
> > > static void
> > > i40e_init_log(void)
> > > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > > b/drivers/net/i40e/i40e_ethdev.h index cd67453..0a59e39 100644
> > > --- a/drivers/net/i40e/i40e_ethdev.h
> > > +++ b/drivers/net/i40e/i40e_ethdev.h
> > > @@ -891,6 +891,13 @@ struct i40e_customized_pctype {
> > > bool valid; /* Check if it's valid */
> > > };
> > >
> > > +struct i40e_rte_flow_rss_conf {
> > > + struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
> > > + uint16_t queue_region_conf; /**< Queue region config flag */
> > > + uint16_t num; /**< Number of entries in queue[]. */
> > > + uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use.
> > */ };
> > > +
> > > /*
> > > * Structure to store private data specific for PF instance.
> > > */
> > > @@ -945,6 +952,7 @@ struct i40e_pf {
> > > struct i40e_fdir_info fdir; /* flow director info */
> > > struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
> > > struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
> > > + struct i40e_rte_flow_rss_conf rss_info; /* rss info */
> > > struct i40e_queue_regions queue_region; /* queue region info */
> > > struct i40e_fc_conf fc_conf; /* Flow control conf */
> > > struct i40e_mirror_rule_list mirror_list; @@ -1071,6 +1079,7 @@
> > > union i40e_filter_t {
> > > struct i40e_fdir_filter_conf fdir_filter;
> > > struct rte_eth_tunnel_filter_conf tunnel_filter;
> > > struct i40e_tunnel_filter_conf consistent_tunnel_filter;
> > > + struct i40e_rte_flow_rss_conf rss_conf;
> > > };
> > >
> > > typedef int (*parse_filter_t)(struct rte_eth_dev *dev, @@ -1198,6
> > > +1207,8 @@ int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool
> > > sw_dcb); int i40e_flush_queue_region_all_conf(struct rte_eth_dev
> *dev,
> > > struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on); void
> > > i40e_init_queue_region_conf(struct rte_eth_dev *dev);
> > > +int i40e_config_rss_filter(struct i40e_pf *pf,
> > > + struct i40e_rte_flow_rss_conf *conf, bool add);
> > >
> > > #define I40E_DEV_TO_PCI(eth_dev) \
> > > RTE_DEV_TO_PCI((eth_dev)->device)
> > > diff --git a/drivers/net/i40e/i40e_flow.c
> > > b/drivers/net/i40e/i40e_flow.c index 7e4936e..e127f4c 100644
> > > --- a/drivers/net/i40e/i40e_flow.c
> > > +++ b/drivers/net/i40e/i40e_flow.c
> > > @@ -138,6 +138,8 @@ static int i40e_flow_flush_fdir_filter(struct
> > > i40e_pf *pf); static int i40e_flow_flush_ethertype_filter(struct
> > > i40e_pf *pf); static int i40e_flow_flush_tunnel_filter(struct
> > > i40e_pf *pf); static int
> > > +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); static int
> > > i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
> > > const struct rte_flow_attr *attr,
> > > const struct rte_flow_item pattern[], @@ -4095,6
> > > +4097,297 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, }
> > >
> > > static int
> > > +i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
> > > + const struct rte_flow_item *pattern,
> > > + struct rte_flow_error *error,
> > > + uint8_t *action_flag,
> > > + struct i40e_queue_regions *info) {
> > > + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
> > > + const struct rte_flow_item *item = pattern;
> > > + enum rte_flow_item_type item_type;
> > > +
> > > + if (item->type == RTE_FLOW_ITEM_TYPE_END)
> > > + return 0;
> > > +
> > > + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > > + if (item->last) {
> > > + rte_flow_error_set(error, EINVAL,
> > > + RTE_FLOW_ERROR_TYPE_ITEM,
> > > + item,
> > > + "Not support range");
> > > + return -rte_errno;
> > > + }
> > > + item_type = item->type;
> > > + switch (item_type) {
> > > + case RTE_FLOW_ITEM_TYPE_ETH:
> > > + *action_flag = 1;
> > > + break;
> > > + case RTE_FLOW_ITEM_TYPE_VLAN:
> > > + vlan_spec =
> > > + (const struct rte_flow_item_vlan *)item-
> > >spec;
> > > + vlan_mask =
> > > + (const struct rte_flow_item_vlan *)item-
> > >mask;
> > > + if (vlan_spec && vlan_mask) {
> > > + if (vlan_mask->tci ==
> > > + rte_cpu_to_be_16(I40E_TCI_MASK))
> > {
> > > + info->region[0].user_priority[0] =
> > > + (vlan_spec->tci >> 13) & 0x7;
> > > + info->region[0].user_priority_num =
> > 1;
> > > + info->queue_region_number = 1;
> > > + *action_flag = 0;
> > > + }
> > > + }
> > > + break;
> > > + default:
> > > + break;
> > > + }
> > > + }
> > > +
> > > + return 0;
> >
> > [Qi:] The function only check item->last, besides seems all kinds of
> > pattern sequence will be accept, this may not match device's capability.
> > I suggest to add more strict pattern check and more comment to explain
> > the acceptable pattern.
>
> Maybe, I should add
>
> rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM,
> item,
> "Not support range");
> return -rte_errno;
>
> after "default:", because rss config do not need any other pattern.
> VLAN is only for queue region configuration.
>
> > > +
> > > +static int
> > > +i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
> > > + const struct rte_flow_action *actions,
> > > + struct rte_flow_error *error,
> > > + uint8_t *action_flag,
> > > + struct i40e_queue_regions *conf_info,
> > > + union i40e_filter_t *filter) {
> > > + const struct rte_flow_action *act;
> > > + const struct rte_flow_action_rss *rss;
> > > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> > >dev_private);
> > > + struct i40e_queue_regions *info = &pf->queue_region;
> > > + struct i40e_rte_flow_rss_conf *rss_config =
> > > + &filter->rss_conf;
> > > + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > > + uint16_t i, j, n;
> > > + uint32_t index = 0;
> > > +
> > > + NEXT_ITEM_OF_ACTION(act, actions, index);
> > > + rss = (const struct rte_flow_action_rss *)act->conf;
> > > +
> > > + if (action_flag) {
> > > + for (n = 0; n < 64; n++) {
> > > + if (rss->rss_conf->rss_hf & (1 << n)) {
> > > + conf_info->region[0].user_priority[0] = n;
> > > + conf_info->region[0].user_priority_num = 1;
> > > + conf_info->queue_region_number = 1;
> > > + break;
> > > + }
> > > + }
> > > + }
> >
> > [Qi:] Convert act->conf to struct rte_flow_action_rss and access its
> > data should after you checked the act->type is
> > RTE_FLOW_ACTION_TYPE_RSS, So, it's better to switch place with following
> type check code.
>
> Ok, change in v2
>
> > > +
> > > + /**
> > > + * rss only supports forwarding,
> > > + * check if the first not void action is RSS.
> > > + */
> > > + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> > > + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> > > + rte_flow_error_set(error, EINVAL,
> > > + RTE_FLOW_ERROR_TYPE_ACTION,
> > > + act, "Not supported action.");
> > > + return -rte_errno;
> > > + }
> > > +
> > > + for (n = 0; n < conf_info->queue_region_number; n++) {
> > > + if (conf_info->region[n].user_priority_num ||
> > > + conf_info->region[n].flowtype_num) {
> > > + if (!((rte_is_power_of_2(rss->num)) &&
> > > + rss->num <= 64)) {
> > > + PMD_DRV_LOG(ERR, "The region sizes
> > should be any of
> > > the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
> > > + "total number of queues do not exceed the
> > VSI
> > > allocation");
> > > + return -rte_errno;
> > > + }
> > > +
> > > + if (conf_info->region[n].user_priority[n] >=
> > > + I40E_MAX_USER_PRIORITY) {
> > > + PMD_DRV_LOG(ERR, "the user priority max
> > index is 7");
> > > + return -rte_errno;
> > > + }
> > > +
> > > + if (conf_info->region[n].hw_flowtype[n] >=
> > > + I40E_FILTER_PCTYPE_MAX) {
> > > + PMD_DRV_LOG(ERR, "the hw_flowtype or
> > PCTYPE max
> > > index is 63");
> > > + return -rte_errno;
> > > + }
> > > +
> > > + if (rss_info->num < rss->num ||
> > > + rss_info->queue[0] < rss->queue[0] ||
> > > + (rss->queue[0] + rss->num >
> > > + rss_info->num + rss_info->queue[0]))
> > {
> > > + rte_flow_error_set(error, EINVAL,
> > > + RTE_FLOW_ERROR_TYPE_ACTION,
> > > + act,
> > > + "no valid queues");
> > > + return -rte_errno;
> > > + }
> > > +
> > > + for (i = 0; i < info->queue_region_number; i++) {
> > > + if (info->region[i].queue_num == rss->num
> > &&
> > > + info->region[i].queue_start_index ==
> > > + rss->queue[0])
> > > + break;
> > > + }
> > > +
> > > + if (i == info->queue_region_number) {
> > > + if (i > I40E_REGION_MAX_INDEX) {
> > > + PMD_DRV_LOG(ERR, "the queue
> > region max index is
> > > 7");
> > > + return -rte_errno;
> > > + }
> > > +
> > > + info->region[i].queue_num =
> > > + rss->num;
> > > + info->region[i].queue_start_index =
> > > + rss->queue[0];
> > > + info->region[i].region_id =
> > > + info->queue_region_number;
> > > +
> > > + j = info->region[i].user_priority_num;
> > > + if (conf_info->region[n].user_priority_num) {
> > > + info->region[i].user_priority[j] =
> > > + conf_info->
> > > + region[n].user_priority[0];
> > > + info->region[i].user_priority_num++;
> > > + }
> > > +
> > > + j = info->region[i].flowtype_num;
> > > + if (conf_info->region[n].flowtype_num) {
> > > + info->region[i].hw_flowtype[j] =
> > > + conf_info->
> > > + region[n].hw_flowtype[0];
> > > + info->region[i].flowtype_num++;
> > > + }
> > > + info->queue_region_number++;
> > > + } else {
> > > + j = info->region[i].user_priority_num;
> > > + if (conf_info->region[n].user_priority_num) {
> > > + info->region[i].user_priority[j] =
> > > + conf_info->
> > > + region[n].user_priority[0];
> > > + info->region[i].user_priority_num++;
> > > + }
> > > +
> > > + j = info->region[i].flowtype_num;
> > > + if (conf_info->region[n].flowtype_num) {
> > > + info->region[i].hw_flowtype[j] =
> > > + conf_info->
> > > + region[n].hw_flowtype[0];
> > > + info->region[i].flowtype_num++;
> > > + }
> > > + }
> > > + }
> > > +
> > > + rss_config->queue_region_conf = TRUE;
> > > + return 0;
> > > + }
> > > +
> > > + if (!rss || !rss->num) {
> > > + rte_flow_error_set(error, EINVAL,
> > > + RTE_FLOW_ERROR_TYPE_ACTION,
> > > + act,
> > > + "no valid queues");
> > > + return -rte_errno;
> > > + }
> > > +
> > > + for (n = 0; n < rss->num; n++) {
> > > + if (rss->queue[n] >= dev->data->nb_rx_queues) {
> > > + rte_flow_error_set(error, EINVAL,
> > > + RTE_FLOW_ERROR_TYPE_ACTION,
> > > + act,
> > > + "queue id > max number of queues");
> > > + return -rte_errno;
> > > + }
> > > + }
> > > + if (rss->rss_conf)
> > > + rss_config->rss_conf = *rss->rss_conf;
> > > + else
> > > + rss_config->rss_conf.rss_hf =
> > > + pf->adapter->flow_types_mask;
> > > +
> > > + for (n = 0; n < rss->num; ++n)
> > > + rss_config->queue[n] = rss->queue[n];
> > > + rss_config->num = rss->num;
> > > + index++;
> > > +
> > > + /* check if the next not void action is END */
> > > + NEXT_ITEM_OF_ACTION(act, actions, index);
> > > + if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> > > + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> > > + rte_flow_error_set(error, EINVAL,
> > > + RTE_FLOW_ERROR_TYPE_ACTION,
> > > + act, "Not supported action.");
> > > + return -rte_errno;
> > > + }
> > > + rss_config->queue_region_conf = FALSE;
> > > +
> > > + return 0;
> > > +}
> > > +
> > > +static int
> > > +i40e_parse_rss_filter(struct rte_eth_dev *dev,
> > > + const struct rte_flow_attr *attr,
> > > + const struct rte_flow_item pattern[],
> > > + const struct rte_flow_action actions[],
> > > + union i40e_filter_t *filter,
> > > + struct rte_flow_error *error)
> > > +{
> > > + int ret;
> > > + struct i40e_queue_regions info;
> > > + uint8_t action_flag = 0;
> > > +
> > > + memset(&info, 0, sizeof(struct i40e_queue_regions));
> > > +
> > > + ret = i40e_flow_parse_rss_pattern(dev, pattern,
> > > + error, &action_flag, &info);
> > > + if (ret)
> > > + return ret;
> > > +
> > > + ret = i40e_flow_parse_rss_action(dev, actions, error,
> > > + &action_flag, &info, filter);
> > > + if (ret)
> > > + return ret;
> > > +
> > > + ret = i40e_flow_parse_attr(attr, error);
> > > + if (ret)
> > > + return ret;
> > > +
> > > + cons_filter_type = RTE_ETH_FILTER_HASH;
> > > +
> > > + return 0;
> > > +}
> > > +
> > > +static int
> > > +i40e_config_rss_filter_set(struct rte_eth_dev *dev,
> > > + struct i40e_rte_flow_rss_conf *conf, bool add) {
> >
> > [Qi:] why parameter "add", it is always set with 1 and we have
> > i40e_config_rss_filter_del with add = 0.
>
> Good idea, I will delete that parameter in v2, it is useless seems.
>
> >
> > > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> > >dev_private);
> > > + struct i40e_hw *hw =
> > > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > > +
> > > + if (conf->queue_region_conf) {
> > > + i40e_flush_queue_region_all_conf(dev, hw, pf, add);
> > > + conf->queue_region_conf = 0;
> > > + } else {
> > > + i40e_config_rss_filter(pf, conf, add);
> > > + }
> > > + return 0;
> > > +}
> > > +
> > > +static int
> > > +i40e_config_rss_filter_del(struct rte_eth_dev *dev,
> > > + struct i40e_rte_flow_rss_conf *conf, bool add) {
> >
> > [Qi:] same question for "add".
>
> Good idea, I will delete that parameter in v2, it is useless seems.
>
> >
> > > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> > >dev_private);
> > > + struct i40e_hw *hw =
> > > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > > +
> > > + i40e_flush_queue_region_all_conf(dev, hw, pf, add);
> > > +
> > > + i40e_config_rss_filter(pf, conf, add);
> > > + return 0;
> > > +}
> > > +
> > > +static int
> > > i40e_flow_validate(struct rte_eth_dev *dev,
> > > const struct rte_flow_attr *attr,
> > > const struct rte_flow_item pattern[], @@ -4130,6 +4423,17
> > @@
> > > i40e_flow_validate(struct rte_eth_dev *dev,
> > >
> > > memset(&cons_filter, 0, sizeof(cons_filter));
> > >
> > > + /* Get the non-void item of action */
> > > + while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
> > > + i++;
> > > +
> > > + if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
> > > + ret = i40e_parse_rss_filter(dev, attr, pattern,
> > > + actions, &cons_filter, error);
> > > + return ret;
> > > + }
> > > +
> > > + i = 0;
> > > /* Get the non-void item number of pattern */
> > > while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
> > > if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) @@ -
> > 4217,6
> > > +4521,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
> > > flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
> > > i40e_tunnel_filter_list);
> > > break;
> > > + case RTE_ETH_FILTER_HASH:
> > > + ret = i40e_config_rss_filter_set(dev,
> > > + &cons_filter.rss_conf, 1);
> > > + flow->rule = &pf->rss_info;
> > > + break;
> > > default:
> > > goto free_flow;
> > > }
> > > @@ -4255,6 +4564,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
> > > ret = i40e_flow_add_del_fdir_filter(dev,
> > > &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
> > > break;
> > > + case RTE_ETH_FILTER_HASH:
> > > + ret = i40e_config_rss_filter_del(dev,
> > > + (struct i40e_rte_flow_rss_conf *)flow->rule, 0);
> > > default:
> > > PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> > > filter_type);
> > > @@ -4397,6 +4709,14 @@ i40e_flow_flush(struct rte_eth_dev *dev,
> > > struct rte_flow_error *error)
> > > return -rte_errno;
> > > }
> > >
> > > + ret = i40e_flow_flush_rss_filter(dev);
> > > + if (ret) {
> > > + rte_flow_error_set(error, -ret,
> > > + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > > + "Failed to flush rss flows.");
> > > + return -rte_errno;
> > > + }
> > > +
> > > return ret;
> > > }
> > >
> > > @@ -4487,3 +4807,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf
> > > *pf)
> > >
> > > return ret;
> > > }
> > > +
> > > +/* remove the rss filter */
> > > +static int
> > > +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) {
> > > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> > >dev_private);
> > > + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > > + struct i40e_hw *hw =
> > > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > > + int32_t ret = -EINVAL;
> > > +
> > > + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
> > > +
> > > + if (rss_info->num)
> > > + ret = i40e_config_rss_filter(pf, rss_info, FALSE);
> > > + return ret;
> > > +}
> > > --
> > > 2.9.3
^ permalink raw reply [flat|nested] 17+ messages in thread
* [dpdk-dev] [PATCH v2] net/i40e: move RSS to flow API
2017-11-24 8:43 [dpdk-dev] [PATCH] net/i40e: move RSS to flow API Wei Zhao
2017-12-21 3:12 ` Zhang, Helin
2017-12-22 4:36 ` Zhang, Qi Z
@ 2018-01-08 8:35 ` Wei Zhao
2018-01-09 2:33 ` Zhang, Qi Z
2018-01-09 9:18 ` [dpdk-dev] [PATCH v3] " Wei Zhao
2 siblings, 2 replies; 17+ messages in thread
From: Wei Zhao @ 2018-01-08 8:35 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, Wei Zhao
Rte_flow actually defined to include RSS,
but till now, RSS is out of rte_flow.
This patch is to move i40e existing RSS to rte_flow.
This patch also enable queue region configuration
using flow API for i40e.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
v2:
-change some code style.
---
drivers/net/i40e/i40e_ethdev.c | 91 +++++++++++
drivers/net/i40e/i40e_ethdev.h | 11 ++
drivers/net/i40e/i40e_flow.c | 340 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 442 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 811cc9f..75b3bf3 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1349,6 +1349,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize queue region configuration */
i40e_init_queue_region_conf(dev);
+ /* initialize rss configuration from rte_flow */
+ memset(&pf->rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+
return 0;
err_init_fdir_filter_list:
@@ -10943,12 +10947,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
}
}
+/* Restore rss filter */
+static inline void
+i40e_rss_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_rte_flow_rss_conf *conf =
+ &pf->rss_info;
+ if (conf->num)
+ i40e_config_rss_filter(pf, conf, TRUE);
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
i40e_fdir_filter_restore(pf);
+ i40e_rss_filter_restore(pf);
}
static bool
@@ -11366,6 +11381,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
return ret;
}
+int
+i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t i, lut = 0;
+ uint16_t j, num;
+ struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+
+ if (!add) {
+ if (memcmp(conf, rss_info,
+ sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
+ i40e_pf_disable_rss(pf);
+ memset(rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (rss_info->num)
+ return -EINVAL;
+
+ /* If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
+
+ num = RTE_MIN(num, conf->num);
+ PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
+ return -ENOTSUP;
+ }
+
+ /* Fill in redirection table */
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
+ i40e_pf_disable_rss(pf);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Random default keys */
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+
+ return i40e_hw_rss_hash_set(pf, &rss_conf);
+
+ rte_memcpy(rss_info,
+ conf, sizeof(struct i40e_rte_flow_rss_conf));
+
+ return 0;
+}
+
RTE_INIT(i40e_init_log);
static void
i40e_init_log(void)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index cd67453..0a59e39 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -891,6 +891,13 @@ struct i40e_customized_pctype {
bool valid; /* Check if it's valid */
};
+struct i40e_rte_flow_rss_conf {
+ struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+ uint16_t queue_region_conf; /**< Queue region config flag */
+ uint16_t num; /**< Number of entries in queue[]. */
+ uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
+};
+
/*
* Structure to store private data specific for PF instance.
*/
@@ -945,6 +952,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
+ struct i40e_rte_flow_rss_conf rss_info; /* rss info */
struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
@@ -1071,6 +1079,7 @@ union i40e_filter_t {
struct i40e_fdir_filter_conf fdir_filter;
struct rte_eth_tunnel_filter_conf tunnel_filter;
struct i40e_tunnel_filter_conf consistent_tunnel_filter;
+ struct i40e_rte_flow_rss_conf rss_conf;
};
typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
@@ -1198,6 +1207,8 @@ int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
+int i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 7e4936e..4d29818 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -138,6 +138,8 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
+static int
i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -4095,6 +4097,301 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
}
static int
+i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ uint8_t *action_flag,
+ struct i40e_queue_regions *info)
+{
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ return 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ *action_flag = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) {
+ info->region[0].user_priority[0] =
+ (vlan_spec->tci >> 13) & 0x7;
+ info->region[0].user_priority_num = 1;
+ info->queue_region_number = 1;
+ *action_flag = 0;
+ }
+ }
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ uint8_t *action_flag,
+ struct i40e_queue_regions *conf_info,
+ union i40e_filter_t *filter)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_rte_flow_rss_conf *rss_config =
+ &filter->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ uint16_t i, j, n;
+ uint32_t index = 0;
+
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (action_flag) {
+ for (n = 0; n < 64; n++) {
+ if (rss->rss_conf->rss_hf & (1 << n)) {
+ conf_info->region[0].user_priority[0] = n;
+ conf_info->region[0].user_priority_num = 1;
+ conf_info->queue_region_number = 1;
+ break;
+ }
+ }
+ }
+
+ for (n = 0; n < conf_info->queue_region_number; n++) {
+ if (conf_info->region[n].user_priority_num ||
+ conf_info->region[n].flowtype_num) {
+ if (!((rte_is_power_of_2(rss->num)) &&
+ rss->num <= 64)) {
+ PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].user_priority[n] >=
+ I40E_MAX_USER_PRIORITY) {
+ PMD_DRV_LOG(ERR, "the user priority max index is 7");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].hw_flowtype[n] >=
+ I40E_FILTER_PCTYPE_MAX) {
+ PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
+ return -rte_errno;
+ }
+
+ if (rss_info->num < rss->num ||
+ rss_info->queue[0] < rss->queue[0] ||
+ (rss->queue[0] + rss->num >
+ rss_info->num + rss_info->queue[0])) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ if (info->region[i].queue_num == rss->num &&
+ info->region[i].queue_start_index ==
+ rss->queue[0])
+ break;
+ }
+
+ if (i == info->queue_region_number) {
+ if (i > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return -rte_errno;
+ }
+
+ info->region[i].queue_num =
+ rss->num;
+ info->region[i].queue_start_index =
+ rss->queue[0];
+ info->region[i].region_id =
+ info->queue_region_number;
+
+ j = info->region[i].user_priority_num;
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] =
+ conf_info->
+ region[n].user_priority[0];
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] =
+ conf_info->
+ region[n].hw_flowtype[0];
+ info->region[i].flowtype_num++;
+ }
+ info->queue_region_number++;
+ } else {
+ j = info->region[i].user_priority_num;
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] =
+ conf_info->
+ region[n].user_priority[0];
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] =
+ conf_info->
+ region[n].hw_flowtype[0];
+ info->region[i].flowtype_num++;
+ }
+ }
+ }
+
+ rss_config->queue_region_conf = TRUE;
+ return 0;
+ }
+
+ if (!rss || !rss->num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+ if (rss->rss_conf)
+ rss_config->rss_conf = *rss->rss_conf;
+ else
+ rss_config->rss_conf.rss_hf =
+ pf->adapter->flow_types_mask;
+
+ for (n = 0; n < rss->num; ++n)
+ rss_config->queue[n] = rss->queue[n];
+ rss_config->num = rss->num;
+ index++;
+
+ /* check if the next not void action is END */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+ rss_config->queue_region_conf = FALSE;
+
+ return 0;
+}
+
+static int
+i40e_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ union i40e_filter_t *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct i40e_queue_regions info;
+ uint8_t action_flag = 0;
+
+ memset(&info, 0, sizeof(struct i40e_queue_regions));
+
+ ret = i40e_flow_parse_rss_pattern(dev, pattern,
+ error, &action_flag, &info);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_rss_action(dev, actions, error,
+ &action_flag, &info, filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_HASH;
+
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_set(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (conf->queue_region_conf) {
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ conf->queue_region_conf = 0;
+ } else {
+ i40e_config_rss_filter(pf, conf, 1);
+ }
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_del(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ i40e_config_rss_filter(pf, conf, 0);
+ return 0;
+}
+
+static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -4130,6 +4427,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
memset(&cons_filter, 0, sizeof(cons_filter));
+ /* Get the non-void item of action */
+ while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ i++;
+
+ if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ ret = i40e_parse_rss_filter(dev, attr, pattern,
+ actions, &cons_filter, error);
+ return ret;
+ }
+
+ i = 0;
/* Get the non-void item number of pattern */
while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
@@ -4217,6 +4525,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
i40e_tunnel_filter_list);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_set(dev,
+ &cons_filter.rss_conf);
+ flow->rule = &pf->rss_info;
+ break;
default:
goto free_flow;
}
@@ -4255,6 +4568,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_flow_add_del_fdir_filter(dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_del(dev,
+ (struct i40e_rte_flow_rss_conf *)flow->rule);
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -4397,6 +4713,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
+ ret = i40e_flow_flush_rss_filter(dev);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush rss flows.");
+ return -rte_errno;
+ }
+
return ret;
}
@@ -4487,3 +4811,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
return ret;
}
+
+/* remove the rss filter */
+static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret = -EINVAL;
+
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ if (rss_info->num)
+ ret = i40e_config_rss_filter(pf, rss_info, FALSE);
+ return ret;
+}
--
2.9.3
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/i40e: move RSS to flow API
2018-01-08 8:35 ` [dpdk-dev] [PATCH v2] " Wei Zhao
@ 2018-01-09 2:33 ` Zhang, Qi Z
2018-01-10 1:53 ` Zhao1, Wei
2018-01-09 9:18 ` [dpdk-dev] [PATCH v3] " Wei Zhao
1 sibling, 1 reply; 17+ messages in thread
From: Zhang, Qi Z @ 2018-01-09 2:33 UTC (permalink / raw)
To: Zhao1, Wei, dev
Checked with author offline.
Require more comments to explain the acceptable pattern for i40e_flow_parse_rss_pattern
and also need to correct the logic since current implementation will accept any combination of ETH and VLAN pattern which does not make sense.
Regards
Qi
> -----Original Message-----
> From: Zhao1, Wei
> Sent: Monday, January 8, 2018 4:36 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Zhao1, Wei <wei.zhao1@intel.com>
> Subject: [PATCH v2] net/i40e: move RSS to flow API
>
> Rte_flow actually defined to include RSS, but till now, RSS is out of rte_flow.
> This patch is to move i40e existing RSS to rte_flow.
> This patch also enable queue region configuration using flow API for i40e.
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
>
> ---
>
> v2:
> -change some code style.
> ---
> drivers/net/i40e/i40e_ethdev.c | 91 +++++++++++
> drivers/net/i40e/i40e_ethdev.h | 11 ++
> drivers/net/i40e/i40e_flow.c | 340
> +++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 442 insertions(+)
>
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 811cc9f..75b3bf3 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -1349,6 +1349,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
> /* initialize queue region configuration */
> i40e_init_queue_region_conf(dev);
>
> + /* initialize rss configuration from rte_flow */
> + memset(&pf->rss_info, 0,
> + sizeof(struct i40e_rte_flow_rss_conf));
> +
> return 0;
>
> err_init_fdir_filter_list:
> @@ -10943,12 +10947,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
> }
> }
>
> +/* Restore rss filter */
> +static inline void
> +i40e_rss_filter_restore(struct i40e_pf *pf) {
> + struct i40e_rte_flow_rss_conf *conf =
> + &pf->rss_info;
> + if (conf->num)
> + i40e_config_rss_filter(pf, conf, TRUE); }
> +
> static void
> i40e_filter_restore(struct i40e_pf *pf) {
> i40e_ethertype_filter_restore(pf);
> i40e_tunnel_filter_restore(pf);
> i40e_fdir_filter_restore(pf);
> + i40e_rss_filter_restore(pf);
> }
>
> static bool
> @@ -11366,6 +11381,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf
> *pf)
> return ret;
> }
>
> +int
> +i40e_config_rss_filter(struct i40e_pf *pf,
> + struct i40e_rte_flow_rss_conf *conf, bool add) {
> + struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> + uint32_t i, lut = 0;
> + uint16_t j, num;
> + struct rte_eth_rss_conf rss_conf = conf->rss_conf;
> + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> +
> + if (!add) {
> + if (memcmp(conf, rss_info,
> + sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
> + i40e_pf_disable_rss(pf);
> + memset(rss_info, 0,
> + sizeof(struct i40e_rte_flow_rss_conf));
> + return 0;
> + }
> + return -EINVAL;
> + }
> +
> + if (rss_info->num)
> + return -EINVAL;
> +
> + /* If both VMDQ and RSS enabled, not all of PF queues are configured.
> + * It's necessary to calculate the actual PF queues that are configured.
> + */
> + if (pf->dev_data->dev_conf.rxmode.mq_mode &
> ETH_MQ_RX_VMDQ_FLAG)
> + num = i40e_pf_calc_configured_queues_num(pf);
> + else
> + num = pf->dev_data->nb_rx_queues;
> +
> + num = RTE_MIN(num, conf->num);
> + PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
> + num);
> +
> + if (num == 0) {
> + PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
> + return -ENOTSUP;
> + }
> +
> + /* Fill in redirection table */
> + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
> + if (j == num)
> + j = 0;
> + lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
> + hw->func_caps.rss_table_entry_width) - 1));
> + if ((i & 3) == 3)
> + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
> + }
> +
> + if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
> + i40e_pf_disable_rss(pf);
> + return 0;
> + }
> + if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
> + (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
> + /* Random default keys */
> + static uint32_t rss_key_default[] = {0x6b793944,
> + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
> + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
> + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
> +
> + rss_conf.rss_key = (uint8_t *)rss_key_default;
> + rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
> + sizeof(uint32_t);
> + }
> +
> + return i40e_hw_rss_hash_set(pf, &rss_conf);
> +
> + rte_memcpy(rss_info,
> + conf, sizeof(struct i40e_rte_flow_rss_conf));
> +
> + return 0;
> +}
> +
> RTE_INIT(i40e_init_log);
> static void
> i40e_init_log(void)
> diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
> index cd67453..0a59e39 100644
> --- a/drivers/net/i40e/i40e_ethdev.h
> +++ b/drivers/net/i40e/i40e_ethdev.h
> @@ -891,6 +891,13 @@ struct i40e_customized_pctype {
> bool valid; /* Check if it's valid */
> };
>
> +struct i40e_rte_flow_rss_conf {
> + struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
> + uint16_t queue_region_conf; /**< Queue region config flag */
> + uint16_t num; /**< Number of entries in queue[]. */
> + uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */ };
> +
> /*
> * Structure to store private data specific for PF instance.
> */
> @@ -945,6 +952,7 @@ struct i40e_pf {
> struct i40e_fdir_info fdir; /* flow director info */
> struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
> struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
> + struct i40e_rte_flow_rss_conf rss_info; /* rss info */
> struct i40e_queue_regions queue_region; /* queue region info */
> struct i40e_fc_conf fc_conf; /* Flow control conf */
> struct i40e_mirror_rule_list mirror_list; @@ -1071,6 +1079,7 @@ union
> i40e_filter_t {
> struct i40e_fdir_filter_conf fdir_filter;
> struct rte_eth_tunnel_filter_conf tunnel_filter;
> struct i40e_tunnel_filter_conf consistent_tunnel_filter;
> + struct i40e_rte_flow_rss_conf rss_conf;
> };
>
> typedef int (*parse_filter_t)(struct rte_eth_dev *dev, @@ -1198,6 +1207,8
> @@ int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb); int
> i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
> struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on); void
> i40e_init_queue_region_conf(struct rte_eth_dev *dev);
> +int i40e_config_rss_filter(struct i40e_pf *pf,
> + struct i40e_rte_flow_rss_conf *conf, bool add);
>
> #define I40E_DEV_TO_PCI(eth_dev) \
> RTE_DEV_TO_PCI((eth_dev)->device)
> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index
> 7e4936e..4d29818 100644
> --- a/drivers/net/i40e/i40e_flow.c
> +++ b/drivers/net/i40e/i40e_flow.c
> @@ -138,6 +138,8 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf
> *pf); static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf); static
> int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf); static int
> +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); static int
> i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
> const struct rte_flow_attr *attr,
> const struct rte_flow_item pattern[], @@ -4095,6
> +4097,301 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, }
>
> static int
> +i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
> + const struct rte_flow_item *pattern,
> + struct rte_flow_error *error,
> + uint8_t *action_flag,
> + struct i40e_queue_regions *info) {
> + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
> + const struct rte_flow_item *item = pattern;
> + enum rte_flow_item_type item_type;
> +
> + if (item->type == RTE_FLOW_ITEM_TYPE_END)
> + return 0;
> +
> + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> + if (item->last) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Not support range");
> + return -rte_errno;
> + }
> + item_type = item->type;
> + switch (item_type) {
> + case RTE_FLOW_ITEM_TYPE_ETH:
> + *action_flag = 1;
> + break;
> + case RTE_FLOW_ITEM_TYPE_VLAN:
> + vlan_spec =
> + (const struct rte_flow_item_vlan *)item->spec;
> + vlan_mask =
> + (const struct rte_flow_item_vlan *)item->mask;
> + if (vlan_spec && vlan_mask) {
> + if (vlan_mask->tci ==
> + rte_cpu_to_be_16(I40E_TCI_MASK)) {
> + info->region[0].user_priority[0] =
> + (vlan_spec->tci >> 13) & 0x7;
> + info->region[0].user_priority_num = 1;
> + info->queue_region_number = 1;
> + *action_flag = 0;
> + }
> + }
> + break;
> + default:
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Not support range");
> + return -rte_errno;
> + }
> + }
> +
> + return 0;
> +}
> +
> +static int
> +i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
> + const struct rte_flow_action *actions,
> + struct rte_flow_error *error,
> + uint8_t *action_flag,
> + struct i40e_queue_regions *conf_info,
> + union i40e_filter_t *filter)
> +{
> + const struct rte_flow_action *act;
> + const struct rte_flow_action_rss *rss;
> + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + struct i40e_queue_regions *info = &pf->queue_region;
> + struct i40e_rte_flow_rss_conf *rss_config =
> + &filter->rss_conf;
> + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> + uint16_t i, j, n;
> + uint32_t index = 0;
> +
> + NEXT_ITEM_OF_ACTION(act, actions, index);
> + rss = (const struct rte_flow_action_rss *)act->conf;
> +
> + /**
> + * rss only supports forwarding,
> + * check if the first not void action is RSS.
> + */
> + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + act, "Not supported action.");
> + return -rte_errno;
> + }
> +
> + if (action_flag) {
> + for (n = 0; n < 64; n++) {
> + if (rss->rss_conf->rss_hf & (1 << n)) {
> + conf_info->region[0].user_priority[0] = n;
> + conf_info->region[0].user_priority_num = 1;
> + conf_info->queue_region_number = 1;
> + break;
> + }
> + }
> + }
> +
> + for (n = 0; n < conf_info->queue_region_number; n++) {
> + if (conf_info->region[n].user_priority_num ||
> + conf_info->region[n].flowtype_num) {
> + if (!((rte_is_power_of_2(rss->num)) &&
> + rss->num <= 64)) {
> + PMD_DRV_LOG(ERR, "The region sizes should be any of the
> following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
> + "total number of queues do not exceed the VSI allocation");
> + return -rte_errno;
> + }
> +
> + if (conf_info->region[n].user_priority[n] >=
> + I40E_MAX_USER_PRIORITY) {
> + PMD_DRV_LOG(ERR, "the user priority max index is 7");
> + return -rte_errno;
> + }
> +
> + if (conf_info->region[n].hw_flowtype[n] >=
> + I40E_FILTER_PCTYPE_MAX) {
> + PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max
> index is 63");
> + return -rte_errno;
> + }
> +
> + if (rss_info->num < rss->num ||
> + rss_info->queue[0] < rss->queue[0] ||
> + (rss->queue[0] + rss->num >
> + rss_info->num + rss_info->queue[0])) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + act,
> + "no valid queues");
> + return -rte_errno;
> + }
> +
> + for (i = 0; i < info->queue_region_number; i++) {
> + if (info->region[i].queue_num == rss->num &&
> + info->region[i].queue_start_index ==
> + rss->queue[0])
> + break;
> + }
> +
> + if (i == info->queue_region_number) {
> + if (i > I40E_REGION_MAX_INDEX) {
> + PMD_DRV_LOG(ERR, "the queue region max index is
> 7");
> + return -rte_errno;
> + }
> +
> + info->region[i].queue_num =
> + rss->num;
> + info->region[i].queue_start_index =
> + rss->queue[0];
> + info->region[i].region_id =
> + info->queue_region_number;
> +
> + j = info->region[i].user_priority_num;
> + if (conf_info->region[n].user_priority_num) {
> + info->region[i].user_priority[j] =
> + conf_info->
> + region[n].user_priority[0];
> + info->region[i].user_priority_num++;
> + }
> +
> + j = info->region[i].flowtype_num;
> + if (conf_info->region[n].flowtype_num) {
> + info->region[i].hw_flowtype[j] =
> + conf_info->
> + region[n].hw_flowtype[0];
> + info->region[i].flowtype_num++;
> + }
> + info->queue_region_number++;
> + } else {
> + j = info->region[i].user_priority_num;
> + if (conf_info->region[n].user_priority_num) {
> + info->region[i].user_priority[j] =
> + conf_info->
> + region[n].user_priority[0];
> + info->region[i].user_priority_num++;
> + }
> +
> + j = info->region[i].flowtype_num;
> + if (conf_info->region[n].flowtype_num) {
> + info->region[i].hw_flowtype[j] =
> + conf_info->
> + region[n].hw_flowtype[0];
> + info->region[i].flowtype_num++;
> + }
> + }
> + }
> +
> + rss_config->queue_region_conf = TRUE;
> + return 0;
> + }
> +
> + if (!rss || !rss->num) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + act,
> + "no valid queues");
> + return -rte_errno;
> + }
> +
> + for (n = 0; n < rss->num; n++) {
> + if (rss->queue[n] >= dev->data->nb_rx_queues) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + act,
> + "queue id > max number of queues");
> + return -rte_errno;
> + }
> + }
> + if (rss->rss_conf)
> + rss_config->rss_conf = *rss->rss_conf;
> + else
> + rss_config->rss_conf.rss_hf =
> + pf->adapter->flow_types_mask;
> +
> + for (n = 0; n < rss->num; ++n)
> + rss_config->queue[n] = rss->queue[n];
> + rss_config->num = rss->num;
> + index++;
> +
> + /* check if the next not void action is END */
> + NEXT_ITEM_OF_ACTION(act, actions, index);
> + if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + act, "Not supported action.");
> + return -rte_errno;
> + }
> + rss_config->queue_region_conf = FALSE;
> +
> + return 0;
> +}
> +
> +static int
> +i40e_parse_rss_filter(struct rte_eth_dev *dev,
> + const struct rte_flow_attr *attr,
> + const struct rte_flow_item pattern[],
> + const struct rte_flow_action actions[],
> + union i40e_filter_t *filter,
> + struct rte_flow_error *error)
> +{
> + int ret;
> + struct i40e_queue_regions info;
> + uint8_t action_flag = 0;
> +
> + memset(&info, 0, sizeof(struct i40e_queue_regions));
> +
> + ret = i40e_flow_parse_rss_pattern(dev, pattern,
> + error, &action_flag, &info);
> + if (ret)
> + return ret;
> +
> + ret = i40e_flow_parse_rss_action(dev, actions, error,
> + &action_flag, &info, filter);
> + if (ret)
> + return ret;
> +
> + ret = i40e_flow_parse_attr(attr, error);
> + if (ret)
> + return ret;
> +
> + cons_filter_type = RTE_ETH_FILTER_HASH;
> +
> + return 0;
> +}
> +
> +static int
> +i40e_config_rss_filter_set(struct rte_eth_dev *dev,
> + struct i40e_rte_flow_rss_conf *conf)
> +{
> + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + struct i40e_hw *hw =
> I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + if (conf->queue_region_conf) {
> + i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
> + conf->queue_region_conf = 0;
> + } else {
> + i40e_config_rss_filter(pf, conf, 1);
> + }
> + return 0;
> +}
> +
> +static int
> +i40e_config_rss_filter_del(struct rte_eth_dev *dev,
> + struct i40e_rte_flow_rss_conf *conf)
> +{
> + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + struct i40e_hw *hw =
> I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +
> + i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
> +
> + i40e_config_rss_filter(pf, conf, 0);
> + return 0;
> +}
> +
> +static int
> i40e_flow_validate(struct rte_eth_dev *dev,
> const struct rte_flow_attr *attr,
> const struct rte_flow_item pattern[], @@ -4130,6 +4427,17 @@
> i40e_flow_validate(struct rte_eth_dev *dev,
>
> memset(&cons_filter, 0, sizeof(cons_filter));
>
> + /* Get the non-void item of action */
> + while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
> + i++;
> +
> + if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
> + ret = i40e_parse_rss_filter(dev, attr, pattern,
> + actions, &cons_filter, error);
> + return ret;
> + }
> +
> + i = 0;
> /* Get the non-void item number of pattern */
> while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
> if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) @@ -4217,6
> +4525,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
> flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
> i40e_tunnel_filter_list);
> break;
> + case RTE_ETH_FILTER_HASH:
> + ret = i40e_config_rss_filter_set(dev,
> + &cons_filter.rss_conf);
> + flow->rule = &pf->rss_info;
> + break;
> default:
> goto free_flow;
> }
> @@ -4255,6 +4568,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
> ret = i40e_flow_add_del_fdir_filter(dev,
> &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
> break;
> + case RTE_ETH_FILTER_HASH:
> + ret = i40e_config_rss_filter_del(dev,
> + (struct i40e_rte_flow_rss_conf *)flow->rule);
> default:
> PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> filter_type);
> @@ -4397,6 +4713,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct
> rte_flow_error *error)
> return -rte_errno;
> }
>
> + ret = i40e_flow_flush_rss_filter(dev);
> + if (ret) {
> + rte_flow_error_set(error, -ret,
> + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> + "Failed to flush rss flows.");
> + return -rte_errno;
> + }
> +
> return ret;
> }
>
> @@ -4487,3 +4811,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
>
> return ret;
> }
> +
> +/* remove the rss filter */
> +static int
> +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) {
> + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> + struct i40e_hw *hw =
> I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + int32_t ret = -EINVAL;
> +
> + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
> +
> + if (rss_info->num)
> + ret = i40e_config_rss_filter(pf, rss_info, FALSE);
> + return ret;
> +}
> --
> 2.9.3
^ permalink raw reply [flat|nested] 17+ messages in thread
* [dpdk-dev] [PATCH v3] net/i40e: move RSS to flow API
2018-01-08 8:35 ` [dpdk-dev] [PATCH v2] " Wei Zhao
2018-01-09 2:33 ` Zhang, Qi Z
@ 2018-01-09 9:18 ` Wei Zhao
2018-01-10 2:10 ` [dpdk-dev] [PATCH v4] " Wei Zhao
1 sibling, 1 reply; 17+ messages in thread
From: Wei Zhao @ 2018-01-09 9:18 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, Wei Zhao
Rte_flow actually defined to include RSS,
but till now, RSS is out of rte_flow.
This patch is to move i40e existing RSS to rte_flow.
This patch also enable queue region configuration
using flow API for i40e.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
v2:
-change some code style.
v3:
-rebase code and add more commment.
---
doc/guides/rel_notes/release_18_02.rst | 7 +
drivers/net/i40e/i40e_ethdev.c | 91 +++++++++
drivers/net/i40e/i40e_ethdev.h | 11 +
drivers/net/i40e/i40e_flow.c | 356 +++++++++++++++++++++++++++++++++
4 files changed, 465 insertions(+)
diff --git a/doc/guides/rel_notes/release_18_02.rst b/doc/guides/rel_notes/release_18_02.rst
index f6e8090..31bcf31 100644
--- a/doc/guides/rel_notes/release_18_02.rst
+++ b/doc/guides/rel_notes/release_18_02.rst
@@ -69,6 +69,13 @@ New Features
rte_flow. This patch is to support igb and ixgbe NIC with existing RSS
configuration using rte_flow API.
+* **Added the i40e ethernet driver to support RSS with flow API.**
+
+ Rte_flow actually defined to include RSS, but till now, RSS is out of
+ rte_flow. This patch is to support i40e NIC with existing RSS
+ configuration using rte_flow API.It also enable queue region configuration
+ using flow API for i40e.
+
API Changes
-----------
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 9ec0201..e7d91df 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1320,6 +1320,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize queue region configuration */
i40e_init_queue_region_conf(dev);
+ /* initialize rss configuration from rte_flow */
+ memset(&pf->rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+
return 0;
err_init_fdir_filter_list:
@@ -10924,12 +10928,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
}
}
+/* Restore rss filter */
+static inline void
+i40e_rss_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_rte_flow_rss_conf *conf =
+ &pf->rss_info;
+ if (conf->num)
+ i40e_config_rss_filter(pf, conf, TRUE);
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
i40e_fdir_filter_restore(pf);
+ i40e_rss_filter_restore(pf);
}
static bool
@@ -11384,6 +11399,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
return ret;
}
+int
+i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t i, lut = 0;
+ uint16_t j, num;
+ struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+
+ if (!add) {
+ if (memcmp(conf, rss_info,
+ sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
+ i40e_pf_disable_rss(pf);
+ memset(rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (rss_info->num)
+ return -EINVAL;
+
+ /* If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
+
+ num = RTE_MIN(num, conf->num);
+ PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
+ return -ENOTSUP;
+ }
+
+ /* Fill in redirection table */
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
+ i40e_pf_disable_rss(pf);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Random default keys */
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+
+ return i40e_hw_rss_hash_set(pf, &rss_conf);
+
+ rte_memcpy(rss_info,
+ conf, sizeof(struct i40e_rte_flow_rss_conf));
+
+ return 0;
+}
+
RTE_INIT(i40e_init_log);
static void
i40e_init_log(void)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 7d5aff2..29032e8 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -863,6 +863,13 @@ struct i40e_customized_pctype {
bool valid; /* Check if it's valid */
};
+struct i40e_rte_flow_rss_conf {
+ struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+ uint16_t queue_region_conf; /**< Queue region config flag */
+ uint16_t num; /**< Number of entries in queue[]. */
+ uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
+};
+
/*
* Structure to store private data specific for PF instance.
*/
@@ -917,6 +924,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
+ struct i40e_rte_flow_rss_conf rss_info; /* rss info */
struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
@@ -1043,6 +1051,7 @@ union i40e_filter_t {
struct i40e_fdir_filter_conf fdir_filter;
struct rte_eth_tunnel_filter_conf tunnel_filter;
struct i40e_tunnel_filter_conf consistent_tunnel_filter;
+ struct i40e_rte_flow_rss_conf rss_conf;
};
typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
@@ -1170,6 +1179,8 @@ int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
+int i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index e522962..2b364b7 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -110,6 +110,8 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
+static int
i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -4102,6 +4104,317 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
}
static int
+i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ uint8_t *action_flag,
+ struct i40e_queue_regions *info)
+{
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ return 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ *action_flag = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) {
+ info->region[0].user_priority[0] =
+ (vlan_spec->tci >> 13) & 0x7;
+ info->region[0].user_priority_num = 1;
+ info->queue_region_number = 1;
+ *action_flag = 0;
+ }
+ }
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * This function is used to do configuration i40e existing RSS with rte_flow.
+ * It also enable queue region configuration using flow API for i40e.
+ * pattern can be used indicate what parameters will be include in flow,
+ * like user_priority or flowtype for queue region or HASH function for RSS.
+ * Action is used to transmit parameter like queue index and HASH
+ * function for RSS, or flowtype for queue region configuration.
+ * For example:
+ * pattern:
+ * Case 1: only ETH, indicate flowtype for queue region will be parsed.
+ * Case 2: only VLAN, indicate user_priority for queue region will be parsed.
+ * Case 3: none, indicate RSS related will be parsed in action.
+ * Any pattern other the ETH or VLAN will be treated as invalid except END.
+ * So, pattern choice is depened on the purpose of configuration of
+ * that flow.
+ * action:
+ * action RSS will be uaed to transmit valid parameter with
+ * struct rte_flow_action_rss for all the 3 case.
+ */
+
+static int
+i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ uint8_t *action_flag,
+ struct i40e_queue_regions *conf_info,
+ union i40e_filter_t *filter)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_rte_flow_rss_conf *rss_config =
+ &filter->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ uint16_t i, j, n, tmp;
+ uint32_t index = 0;
+
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (action_flag) {
+ for (n = 0; n < 64; n++) {
+ if (rss->rss_conf->rss_hf & (1 << n)) {
+ conf_info->region[0].hw_flowtype[0] = n;
+ conf_info->region[0].flowtype_num = 1;
+ conf_info->queue_region_number = 1;
+ break;
+ }
+ }
+ }
+
+ for (n = 0; n < conf_info->queue_region_number; n++) {
+ if (conf_info->region[n].user_priority_num ||
+ conf_info->region[n].flowtype_num) {
+ if (!((rte_is_power_of_2(rss->num)) &&
+ rss->num <= 64)) {
+ PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].user_priority[n] >=
+ I40E_MAX_USER_PRIORITY) {
+ PMD_DRV_LOG(ERR, "the user priority max index is 7");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].hw_flowtype[n] >=
+ I40E_FILTER_PCTYPE_MAX) {
+ PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
+ return -rte_errno;
+ }
+
+ if (rss_info->num < rss->num ||
+ rss_info->queue[0] < rss->queue[0] ||
+ (rss->queue[0] + rss->num >
+ rss_info->num + rss_info->queue[0])) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ if (info->region[i].queue_num == rss->num &&
+ info->region[i].queue_start_index ==
+ rss->queue[0])
+ break;
+ }
+
+ if (i == info->queue_region_number) {
+ if (i > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return -rte_errno;
+ }
+
+ info->region[i].queue_num =
+ rss->num;
+ info->region[i].queue_start_index =
+ rss->queue[0];
+ info->region[i].region_id =
+ info->queue_region_number;
+
+ j = info->region[i].user_priority_num;
+ tmp = conf_info->region[n].user_priority[0];
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] = tmp;
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ tmp = conf_info->region[n].hw_flowtype[0];
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] = tmp;
+ info->region[i].flowtype_num++;
+ }
+ info->queue_region_number++;
+ } else {
+ j = info->region[i].user_priority_num;
+ tmp = conf_info->region[n].user_priority[0];
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] = tmp;
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ tmp = conf_info->region[n].hw_flowtype[0];
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] = tmp;
+ info->region[i].flowtype_num++;
+ }
+ }
+ }
+
+ rss_config->queue_region_conf = TRUE;
+ return 0;
+ }
+
+ if (!rss || !rss->num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+ if (rss->rss_conf)
+ rss_config->rss_conf = *rss->rss_conf;
+ else
+ rss_config->rss_conf.rss_hf =
+ pf->adapter->flow_types_mask;
+
+ for (n = 0; n < rss->num; ++n)
+ rss_config->queue[n] = rss->queue[n];
+ rss_config->num = rss->num;
+ index++;
+
+ /* check if the next not void action is END */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+ rss_config->queue_region_conf = FALSE;
+
+ return 0;
+}
+
+static int
+i40e_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ union i40e_filter_t *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct i40e_queue_regions info;
+ uint8_t action_flag = 0;
+
+ memset(&info, 0, sizeof(struct i40e_queue_regions));
+
+ ret = i40e_flow_parse_rss_pattern(dev, pattern,
+ error, &action_flag, &info);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_rss_action(dev, actions, error,
+ &action_flag, &info, filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_HASH;
+
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_set(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (conf->queue_region_conf) {
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ conf->queue_region_conf = 0;
+ } else {
+ i40e_config_rss_filter(pf, conf, 1);
+ }
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_del(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ i40e_config_rss_filter(pf, conf, 0);
+ return 0;
+}
+
+static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -4137,6 +4450,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
memset(&cons_filter, 0, sizeof(cons_filter));
+ /* Get the non-void item of action */
+ while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ i++;
+
+ if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ ret = i40e_parse_rss_filter(dev, attr, pattern,
+ actions, &cons_filter, error);
+ return ret;
+ }
+
+ i = 0;
/* Get the non-void item number of pattern */
while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
@@ -4224,6 +4548,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
i40e_tunnel_filter_list);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_set(dev,
+ &cons_filter.rss_conf);
+ flow->rule = &pf->rss_info;
+ break;
default:
goto free_flow;
}
@@ -4262,6 +4591,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_flow_add_del_fdir_filter(dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_del(dev,
+ (struct i40e_rte_flow_rss_conf *)flow->rule);
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -4404,6 +4736,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
+ ret = i40e_flow_flush_rss_filter(dev);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush rss flows.");
+ return -rte_errno;
+ }
+
return ret;
}
@@ -4499,3 +4839,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
return ret;
}
+
+/* remove the rss filter */
+static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret = -EINVAL;
+
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ if (rss_info->num)
+ ret = i40e_config_rss_filter(pf, rss_info, FALSE);
+ return ret;
+}
--
2.9.3
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/i40e: move RSS to flow API
2018-01-09 2:33 ` Zhang, Qi Z
@ 2018-01-10 1:53 ` Zhao1, Wei
0 siblings, 0 replies; 17+ messages in thread
From: Zhao1, Wei @ 2018-01-10 1:53 UTC (permalink / raw)
To: Zhang, Qi Z, dev
A new v3 has been commit to DPDK.org
https://dpdk.org/dev/patchwork/patch/33158/
> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Tuesday, January 9, 2018 10:34 AM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Subject: RE: [PATCH v2] net/i40e: move RSS to flow API
>
> Checked with author offline.
>
> Require more comments to explain the acceptable pattern for
> i40e_flow_parse_rss_pattern and also need to correct the logic since current
> implementation will accept any combination of ETH and VLAN pattern which
> does not make sense.
>
> Regards
> Qi
>
> > -----Original Message-----
> > From: Zhao1, Wei
> > Sent: Monday, January 8, 2018 4:36 PM
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Zhao1, Wei
> > <wei.zhao1@intel.com>
> > Subject: [PATCH v2] net/i40e: move RSS to flow API
> >
> > Rte_flow actually defined to include RSS, but till now, RSS is out of rte_flow.
> > This patch is to move i40e existing RSS to rte_flow.
> > This patch also enable queue region configuration using flow API for i40e.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> >
> > ---
> >
> > v2:
> > -change some code style.
> > ---
> > drivers/net/i40e/i40e_ethdev.c | 91 +++++++++++
> > drivers/net/i40e/i40e_ethdev.h | 11 ++
> > drivers/net/i40e/i40e_flow.c | 340
> > +++++++++++++++++++++++++++++++++++++++++
> > 3 files changed, 442 insertions(+)
> >
> > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > b/drivers/net/i40e/i40e_ethdev.c index 811cc9f..75b3bf3 100644
> > --- a/drivers/net/i40e/i40e_ethdev.c
> > +++ b/drivers/net/i40e/i40e_ethdev.c
> > @@ -1349,6 +1349,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
> > /* initialize queue region configuration */
> > i40e_init_queue_region_conf(dev);
> >
> > + /* initialize rss configuration from rte_flow */
> > + memset(&pf->rss_info, 0,
> > + sizeof(struct i40e_rte_flow_rss_conf));
> > +
> > return 0;
> >
> > err_init_fdir_filter_list:
> > @@ -10943,12 +10947,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
> > }
> > }
> >
> > +/* Restore rss filter */
> > +static inline void
> > +i40e_rss_filter_restore(struct i40e_pf *pf) {
> > + struct i40e_rte_flow_rss_conf *conf =
> > + &pf->rss_info;
> > + if (conf->num)
> > + i40e_config_rss_filter(pf, conf, TRUE); }
> > +
> > static void
> > i40e_filter_restore(struct i40e_pf *pf) {
> > i40e_ethertype_filter_restore(pf);
> > i40e_tunnel_filter_restore(pf);
> > i40e_fdir_filter_restore(pf);
> > + i40e_rss_filter_restore(pf);
> > }
> >
> > static bool
> > @@ -11366,6 +11381,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf
> > *pf)
> > return ret;
> > }
> >
> > +int
> > +i40e_config_rss_filter(struct i40e_pf *pf,
> > + struct i40e_rte_flow_rss_conf *conf, bool add) {
> > + struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> > + uint32_t i, lut = 0;
> > + uint16_t j, num;
> > + struct rte_eth_rss_conf rss_conf = conf->rss_conf;
> > + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > +
> > + if (!add) {
> > + if (memcmp(conf, rss_info,
> > + sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
> > + i40e_pf_disable_rss(pf);
> > + memset(rss_info, 0,
> > + sizeof(struct i40e_rte_flow_rss_conf));
> > + return 0;
> > + }
> > + return -EINVAL;
> > + }
> > +
> > + if (rss_info->num)
> > + return -EINVAL;
> > +
> > + /* If both VMDQ and RSS enabled, not all of PF queues are
> configured.
> > + * It's necessary to calculate the actual PF queues that are configured.
> > + */
> > + if (pf->dev_data->dev_conf.rxmode.mq_mode &
> > ETH_MQ_RX_VMDQ_FLAG)
> > + num = i40e_pf_calc_configured_queues_num(pf);
> > + else
> > + num = pf->dev_data->nb_rx_queues;
> > +
> > + num = RTE_MIN(num, conf->num);
> > + PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are
> configured",
> > + num);
> > +
> > + if (num == 0) {
> > + PMD_DRV_LOG(ERR, "No PF queues are configured to
> enable RSS");
> > + return -ENOTSUP;
> > + }
> > +
> > + /* Fill in redirection table */
> > + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
> > + if (j == num)
> > + j = 0;
> > + lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
> > + hw->func_caps.rss_table_entry_width) - 1));
> > + if ((i & 3) == 3)
> > + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
> > + }
> > +
> > + if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
> > + i40e_pf_disable_rss(pf);
> > + return 0;
> > + }
> > + if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
> > + (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
> > + /* Random default keys */
> > + static uint32_t rss_key_default[] = {0x6b793944,
> > + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
> > + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
> > + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
> > +
> > + rss_conf.rss_key = (uint8_t *)rss_key_default;
> > + rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
> > + sizeof(uint32_t);
> > + }
> > +
> > + return i40e_hw_rss_hash_set(pf, &rss_conf);
> > +
> > + rte_memcpy(rss_info,
> > + conf, sizeof(struct i40e_rte_flow_rss_conf));
> > +
> > + return 0;
> > +}
> > +
> > RTE_INIT(i40e_init_log);
> > static void
> > i40e_init_log(void)
> > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > b/drivers/net/i40e/i40e_ethdev.h index cd67453..0a59e39 100644
> > --- a/drivers/net/i40e/i40e_ethdev.h
> > +++ b/drivers/net/i40e/i40e_ethdev.h
> > @@ -891,6 +891,13 @@ struct i40e_customized_pctype {
> > bool valid; /* Check if it's valid */
> > };
> >
> > +struct i40e_rte_flow_rss_conf {
> > + struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
> > + uint16_t queue_region_conf; /**< Queue region config flag */
> > + uint16_t num; /**< Number of entries in queue[]. */
> > + uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use.
> */ };
> > +
> > /*
> > * Structure to store private data specific for PF instance.
> > */
> > @@ -945,6 +952,7 @@ struct i40e_pf {
> > struct i40e_fdir_info fdir; /* flow director info */
> > struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
> > struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
> > + struct i40e_rte_flow_rss_conf rss_info; /* rss info */
> > struct i40e_queue_regions queue_region; /* queue region info */
> > struct i40e_fc_conf fc_conf; /* Flow control conf */
> > struct i40e_mirror_rule_list mirror_list; @@ -1071,6 +1079,7 @@
> > union i40e_filter_t {
> > struct i40e_fdir_filter_conf fdir_filter;
> > struct rte_eth_tunnel_filter_conf tunnel_filter;
> > struct i40e_tunnel_filter_conf consistent_tunnel_filter;
> > + struct i40e_rte_flow_rss_conf rss_conf;
> > };
> >
> > typedef int (*parse_filter_t)(struct rte_eth_dev *dev, @@ -1198,6
> > +1207,8 @@ int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool
> > sw_dcb); int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
> > struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on); void
> > i40e_init_queue_region_conf(struct rte_eth_dev *dev);
> > +int i40e_config_rss_filter(struct i40e_pf *pf,
> > + struct i40e_rte_flow_rss_conf *conf, bool add);
> >
> > #define I40E_DEV_TO_PCI(eth_dev) \
> > RTE_DEV_TO_PCI((eth_dev)->device)
> > diff --git a/drivers/net/i40e/i40e_flow.c
> > b/drivers/net/i40e/i40e_flow.c index
> > 7e4936e..4d29818 100644
> > --- a/drivers/net/i40e/i40e_flow.c
> > +++ b/drivers/net/i40e/i40e_flow.c
> > @@ -138,6 +138,8 @@ static int i40e_flow_flush_fdir_filter(struct
> > i40e_pf *pf); static int i40e_flow_flush_ethertype_filter(struct
> > i40e_pf *pf); static int i40e_flow_flush_tunnel_filter(struct i40e_pf
> > *pf); static int
> > +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); static int
> > i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
> > const struct rte_flow_attr *attr,
> > const struct rte_flow_item pattern[], @@ -4095,6
> > +4097,301 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, }
> >
> > static int
> > +i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
> > + const struct rte_flow_item *pattern,
> > + struct rte_flow_error *error,
> > + uint8_t *action_flag,
> > + struct i40e_queue_regions *info) {
> > + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
> > + const struct rte_flow_item *item = pattern;
> > + enum rte_flow_item_type item_type;
> > +
> > + if (item->type == RTE_FLOW_ITEM_TYPE_END)
> > + return 0;
> > +
> > + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > + if (item->last) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Not support range");
> > + return -rte_errno;
> > + }
> > + item_type = item->type;
> > + switch (item_type) {
> > + case RTE_FLOW_ITEM_TYPE_ETH:
> > + *action_flag = 1;
> > + break;
> > + case RTE_FLOW_ITEM_TYPE_VLAN:
> > + vlan_spec =
> > + (const struct rte_flow_item_vlan *)item-
> >spec;
> > + vlan_mask =
> > + (const struct rte_flow_item_vlan *)item-
> >mask;
> > + if (vlan_spec && vlan_mask) {
> > + if (vlan_mask->tci ==
> > + rte_cpu_to_be_16(I40E_TCI_MASK))
> {
> > + info->region[0].user_priority[0] =
> > + (vlan_spec->tci >> 13) & 0x7;
> > + info->region[0].user_priority_num =
> 1;
> > + info->queue_region_number = 1;
> > + *action_flag = 0;
> > + }
> > + }
> > + break;
> > + default:
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Not support range");
> > + return -rte_errno;
> > + }
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
> > + const struct rte_flow_action *actions,
> > + struct rte_flow_error *error,
> > + uint8_t *action_flag,
> > + struct i40e_queue_regions *conf_info,
> > + union i40e_filter_t *filter)
> > +{
> > + const struct rte_flow_action *act;
> > + const struct rte_flow_action_rss *rss;
> > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > + struct i40e_queue_regions *info = &pf->queue_region;
> > + struct i40e_rte_flow_rss_conf *rss_config =
> > + &filter->rss_conf;
> > + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > + uint16_t i, j, n;
> > + uint32_t index = 0;
> > +
> > + NEXT_ITEM_OF_ACTION(act, actions, index);
> > + rss = (const struct rte_flow_action_rss *)act->conf;
> > +
> > + /**
> > + * rss only supports forwarding,
> > + * check if the first not void action is RSS.
> > + */
> > + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
> > + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION,
> > + act, "Not supported action.");
> > + return -rte_errno;
> > + }
> > +
> > + if (action_flag) {
> > + for (n = 0; n < 64; n++) {
> > + if (rss->rss_conf->rss_hf & (1 << n)) {
> > + conf_info->region[0].user_priority[0] = n;
> > + conf_info->region[0].user_priority_num = 1;
> > + conf_info->queue_region_number = 1;
> > + break;
> > + }
> > + }
> > + }
> > +
> > + for (n = 0; n < conf_info->queue_region_number; n++) {
> > + if (conf_info->region[n].user_priority_num ||
> > + conf_info->region[n].flowtype_num) {
> > + if (!((rte_is_power_of_2(rss->num)) &&
> > + rss->num <= 64)) {
> > + PMD_DRV_LOG(ERR, "The region sizes
> should be any of the
> > following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
> > + "total number of queues do not exceed the
> VSI allocation");
> > + return -rte_errno;
> > + }
> > +
> > + if (conf_info->region[n].user_priority[n] >=
> > + I40E_MAX_USER_PRIORITY) {
> > + PMD_DRV_LOG(ERR, "the user priority max
> index is 7");
> > + return -rte_errno;
> > + }
> > +
> > + if (conf_info->region[n].hw_flowtype[n] >=
> > + I40E_FILTER_PCTYPE_MAX) {
> > + PMD_DRV_LOG(ERR, "the hw_flowtype or
> PCTYPE max
> > index is 63");
> > + return -rte_errno;
> > + }
> > +
> > + if (rss_info->num < rss->num ||
> > + rss_info->queue[0] < rss->queue[0] ||
> > + (rss->queue[0] + rss->num >
> > + rss_info->num + rss_info->queue[0]))
> {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION,
> > + act,
> > + "no valid queues");
> > + return -rte_errno;
> > + }
> > +
> > + for (i = 0; i < info->queue_region_number; i++) {
> > + if (info->region[i].queue_num == rss->num
> &&
> > + info->region[i].queue_start_index ==
> > + rss->queue[0])
> > + break;
> > + }
> > +
> > + if (i == info->queue_region_number) {
> > + if (i > I40E_REGION_MAX_INDEX) {
> > + PMD_DRV_LOG(ERR, "the queue
> region max index is
> > 7");
> > + return -rte_errno;
> > + }
> > +
> > + info->region[i].queue_num =
> > + rss->num;
> > + info->region[i].queue_start_index =
> > + rss->queue[0];
> > + info->region[i].region_id =
> > + info->queue_region_number;
> > +
> > + j = info->region[i].user_priority_num;
> > + if (conf_info->region[n].user_priority_num) {
> > + info->region[i].user_priority[j] =
> > + conf_info->
> > + region[n].user_priority[0];
> > + info->region[i].user_priority_num++;
> > + }
> > +
> > + j = info->region[i].flowtype_num;
> > + if (conf_info->region[n].flowtype_num) {
> > + info->region[i].hw_flowtype[j] =
> > + conf_info->
> > + region[n].hw_flowtype[0];
> > + info->region[i].flowtype_num++;
> > + }
> > + info->queue_region_number++;
> > + } else {
> > + j = info->region[i].user_priority_num;
> > + if (conf_info->region[n].user_priority_num) {
> > + info->region[i].user_priority[j] =
> > + conf_info->
> > + region[n].user_priority[0];
> > + info->region[i].user_priority_num++;
> > + }
> > +
> > + j = info->region[i].flowtype_num;
> > + if (conf_info->region[n].flowtype_num) {
> > + info->region[i].hw_flowtype[j] =
> > + conf_info->
> > + region[n].hw_flowtype[0];
> > + info->region[i].flowtype_num++;
> > + }
> > + }
> > + }
> > +
> > + rss_config->queue_region_conf = TRUE;
> > + return 0;
> > + }
> > +
> > + if (!rss || !rss->num) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION,
> > + act,
> > + "no valid queues");
> > + return -rte_errno;
> > + }
> > +
> > + for (n = 0; n < rss->num; n++) {
> > + if (rss->queue[n] >= dev->data->nb_rx_queues) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION,
> > + act,
> > + "queue id > max number of queues");
> > + return -rte_errno;
> > + }
> > + }
> > + if (rss->rss_conf)
> > + rss_config->rss_conf = *rss->rss_conf;
> > + else
> > + rss_config->rss_conf.rss_hf =
> > + pf->adapter->flow_types_mask;
> > +
> > + for (n = 0; n < rss->num; ++n)
> > + rss_config->queue[n] = rss->queue[n];
> > + rss_config->num = rss->num;
> > + index++;
> > +
> > + /* check if the next not void action is END */
> > + NEXT_ITEM_OF_ACTION(act, actions, index);
> > + if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> > + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION,
> > + act, "Not supported action.");
> > + return -rte_errno;
> > + }
> > + rss_config->queue_region_conf = FALSE;
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +i40e_parse_rss_filter(struct rte_eth_dev *dev,
> > + const struct rte_flow_attr *attr,
> > + const struct rte_flow_item pattern[],
> > + const struct rte_flow_action actions[],
> > + union i40e_filter_t *filter,
> > + struct rte_flow_error *error)
> > +{
> > + int ret;
> > + struct i40e_queue_regions info;
> > + uint8_t action_flag = 0;
> > +
> > + memset(&info, 0, sizeof(struct i40e_queue_regions));
> > +
> > + ret = i40e_flow_parse_rss_pattern(dev, pattern,
> > + error, &action_flag, &info);
> > + if (ret)
> > + return ret;
> > +
> > + ret = i40e_flow_parse_rss_action(dev, actions, error,
> > + &action_flag, &info, filter);
> > + if (ret)
> > + return ret;
> > +
> > + ret = i40e_flow_parse_attr(attr, error);
> > + if (ret)
> > + return ret;
> > +
> > + cons_filter_type = RTE_ETH_FILTER_HASH;
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +i40e_config_rss_filter_set(struct rte_eth_dev *dev,
> > + struct i40e_rte_flow_rss_conf *conf) {
> > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > + struct i40e_hw *hw =
> > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +
> > + if (conf->queue_region_conf) {
> > + i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
> > + conf->queue_region_conf = 0;
> > + } else {
> > + i40e_config_rss_filter(pf, conf, 1);
> > + }
> > + return 0;
> > +}
> > +
> > +static int
> > +i40e_config_rss_filter_del(struct rte_eth_dev *dev,
> > + struct i40e_rte_flow_rss_conf *conf) {
> > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > + struct i40e_hw *hw =
> > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +
> > + i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
> > +
> > + i40e_config_rss_filter(pf, conf, 0);
> > + return 0;
> > +}
> > +
> > +static int
> > i40e_flow_validate(struct rte_eth_dev *dev,
> > const struct rte_flow_attr *attr,
> > const struct rte_flow_item pattern[], @@ -4130,6 +4427,17
> @@
> > i40e_flow_validate(struct rte_eth_dev *dev,
> >
> > memset(&cons_filter, 0, sizeof(cons_filter));
> >
> > + /* Get the non-void item of action */
> > + while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
> > + i++;
> > +
> > + if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
> > + ret = i40e_parse_rss_filter(dev, attr, pattern,
> > + actions, &cons_filter, error);
> > + return ret;
> > + }
> > +
> > + i = 0;
> > /* Get the non-void item number of pattern */
> > while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
> > if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) @@ -
> 4217,6
> > +4525,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
> > flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
> > i40e_tunnel_filter_list);
> > break;
> > + case RTE_ETH_FILTER_HASH:
> > + ret = i40e_config_rss_filter_set(dev,
> > + &cons_filter.rss_conf);
> > + flow->rule = &pf->rss_info;
> > + break;
> > default:
> > goto free_flow;
> > }
> > @@ -4255,6 +4568,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
> > ret = i40e_flow_add_del_fdir_filter(dev,
> > &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
> > break;
> > + case RTE_ETH_FILTER_HASH:
> > + ret = i40e_config_rss_filter_del(dev,
> > + (struct i40e_rte_flow_rss_conf *)flow->rule);
> > default:
> > PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> > filter_type);
> > @@ -4397,6 +4713,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct
> > rte_flow_error *error)
> > return -rte_errno;
> > }
> >
> > + ret = i40e_flow_flush_rss_filter(dev);
> > + if (ret) {
> > + rte_flow_error_set(error, -ret,
> > + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > + "Failed to flush rss flows.");
> > + return -rte_errno;
> > + }
> > +
> > return ret;
> > }
> >
> > @@ -4487,3 +4811,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf
> > *pf)
> >
> > return ret;
> > }
> > +
> > +/* remove the rss filter */
> > +static int
> > +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) {
> > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
> > + struct i40e_hw *hw =
> > I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > + int32_t ret = -EINVAL;
> > +
> > + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
> > +
> > + if (rss_info->num)
> > + ret = i40e_config_rss_filter(pf, rss_info, FALSE);
> > + return ret;
> > +}
> > --
> > 2.9.3
^ permalink raw reply [flat|nested] 17+ messages in thread
* [dpdk-dev] [PATCH v4] net/i40e: move RSS to flow API
2018-01-09 9:18 ` [dpdk-dev] [PATCH v3] " Wei Zhao
@ 2018-01-10 2:10 ` Wei Zhao
2018-01-10 2:58 ` Zhang, Qi Z
2018-01-10 13:28 ` Ferruh Yigit
0 siblings, 2 replies; 17+ messages in thread
From: Wei Zhao @ 2018-01-10 2:10 UTC (permalink / raw)
To: dev; +Cc: qi.z.zhang, Wei Zhao
Rte_flow actually defined to include RSS,
but till now, RSS is out of rte_flow.
This patch is to move i40e existing RSS to rte_flow.
This patch also enable queue region configuration
using flow API for i40e.
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
v2:
-change some code style.
v3:
-rebase code and add more comment.
v4:
-rebase code branch and change comment location.
---
doc/guides/rel_notes/release_18_02.rst | 6 +
drivers/net/i40e/i40e_ethdev.c | 91 +++++++++
drivers/net/i40e/i40e_ethdev.h | 11 +
drivers/net/i40e/i40e_flow.c | 355 +++++++++++++++++++++++++++++++++
4 files changed, 463 insertions(+)
diff --git a/doc/guides/rel_notes/release_18_02.rst b/doc/guides/rel_notes/release_18_02.rst
index 2e0e796..3381a75 100644
--- a/doc/guides/rel_notes/release_18_02.rst
+++ b/doc/guides/rel_notes/release_18_02.rst
@@ -77,6 +77,12 @@ New Features
1, 2, 4, 8 or 16. If no such parameter is configured, the number of queues
per VF is 4 by default.
+* **Added the i40e ethernet driver to support RSS with flow API.**
+
+ Rte_flow actually defined to include RSS, but till now, RSS is out of
+ rte_flow. This patch is to support i40e NIC with existing RSS
+ configuration using rte_flow API.It also enable queue region configuration
+ using flow API for i40e.
API Changes
-----------
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 97066fb..6901205 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1312,6 +1312,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize queue region configuration */
i40e_init_queue_region_conf(dev);
+ /* initialize rss configuration from rte_flow */
+ memset(&pf->rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+
return 0;
err_init_fdir_filter_list:
@@ -11047,12 +11051,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
}
}
+/* Restore rss filter */
+static inline void
+i40e_rss_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_rte_flow_rss_conf *conf =
+ &pf->rss_info;
+ if (conf->num)
+ i40e_config_rss_filter(pf, conf, TRUE);
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
i40e_fdir_filter_restore(pf);
+ i40e_rss_filter_restore(pf);
}
static bool
@@ -11507,6 +11522,82 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
return ret;
}
+int
+i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t i, lut = 0;
+ uint16_t j, num;
+ struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+
+ if (!add) {
+ if (memcmp(conf, rss_info,
+ sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
+ i40e_pf_disable_rss(pf);
+ memset(rss_info, 0,
+ sizeof(struct i40e_rte_flow_rss_conf));
+ return 0;
+ }
+ return -EINVAL;
+ }
+
+ if (rss_info->num)
+ return -EINVAL;
+
+ /* If both VMDQ and RSS enabled, not all of PF queues are configured.
+ * It's necessary to calculate the actual PF queues that are configured.
+ */
+ if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
+ num = i40e_pf_calc_configured_queues_num(pf);
+ else
+ num = pf->dev_data->nb_rx_queues;
+
+ num = RTE_MIN(num, conf->num);
+ PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
+ num);
+
+ if (num == 0) {
+ PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS");
+ return -ENOTSUP;
+ }
+
+ /* Fill in redirection table */
+ for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ if (j == num)
+ j = 0;
+ lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
+ hw->func_caps.rss_table_entry_width) - 1));
+ if ((i & 3) == 3)
+ I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+
+ if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) {
+ i40e_pf_disable_rss(pf);
+ return 0;
+ }
+ if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
+ (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
+ /* Random default keys */
+ static uint32_t rss_key_default[] = {0x6b793944,
+ 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8,
+ 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581};
+
+ rss_conf.rss_key = (uint8_t *)rss_key_default;
+ rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ }
+
+ return i40e_hw_rss_hash_set(pf, &rss_conf);
+
+ rte_memcpy(rss_info,
+ conf, sizeof(struct i40e_rte_flow_rss_conf));
+
+ return 0;
+}
+
RTE_INIT(i40e_init_log);
static void
i40e_init_log(void)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 70ca42f..e8bc3bd 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -864,6 +864,13 @@ struct i40e_customized_pctype {
bool valid; /* Check if it's valid */
};
+struct i40e_rte_flow_rss_conf {
+ struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+ uint16_t queue_region_conf; /**< Queue region config flag */
+ uint16_t num; /**< Number of entries in queue[]. */
+ uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
+};
+
/*
* Structure to store private data specific for PF instance.
*/
@@ -918,6 +925,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
+ struct i40e_rte_flow_rss_conf rss_info; /* rss info */
struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
@@ -1044,6 +1052,7 @@ union i40e_filter_t {
struct i40e_fdir_filter_conf fdir_filter;
struct rte_eth_tunnel_filter_conf tunnel_filter;
struct i40e_tunnel_filter_conf consistent_tunnel_filter;
+ struct i40e_rte_flow_rss_conf rss_conf;
};
typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
@@ -1172,6 +1181,8 @@ int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw);
+int i40e_config_rss_filter(struct i40e_pf *pf,
+ struct i40e_rte_flow_rss_conf *conf, bool add);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 6feb7aa..6106f78 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -110,6 +110,8 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
+static int
i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -4108,6 +4110,316 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
return ret;
}
+/**
+ * This function is used to do configuration i40e existing RSS with rte_flow.
+ * It also enable queue region configuration using flow API for i40e.
+ * pattern can be used indicate what parameters will be include in flow,
+ * like user_priority or flowtype for queue region or HASH function for RSS.
+ * Action is used to transmit parameter like queue index and HASH
+ * function for RSS, or flowtype for queue region configuration.
+ * For example:
+ * pattern:
+ * Case 1: only ETH, indicate flowtype for queue region will be parsed.
+ * Case 2: only VLAN, indicate user_priority for queue region will be parsed.
+ * Case 3: none, indicate RSS related will be parsed in action.
+ * Any pattern other the ETH or VLAN will be treated as invalid except END.
+ * So, pattern choice is depened on the purpose of configuration of
+ * that flow.
+ * action:
+ * action RSS will be uaed to transmit valid parameter with
+ * struct rte_flow_action_rss for all the 3 case.
+ */
+static int
+i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ uint8_t *action_flag,
+ struct i40e_queue_regions *info)
+{
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ return 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ *action_flag = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) {
+ info->region[0].user_priority[0] =
+ (vlan_spec->tci >> 13) & 0x7;
+ info->region[0].user_priority_num = 1;
+ info->queue_region_number = 1;
+ *action_flag = 0;
+ }
+ }
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ uint8_t *action_flag,
+ struct i40e_queue_regions *conf_info,
+ union i40e_filter_t *filter)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_rte_flow_rss_conf *rss_config =
+ &filter->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ uint16_t i, j, n, tmp;
+ uint32_t index = 0;
+
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (action_flag) {
+ for (n = 0; n < 64; n++) {
+ if (rss->rss_conf->rss_hf & (1 << n)) {
+ conf_info->region[0].hw_flowtype[0] = n;
+ conf_info->region[0].flowtype_num = 1;
+ conf_info->queue_region_number = 1;
+ break;
+ }
+ }
+ }
+
+ for (n = 0; n < conf_info->queue_region_number; n++) {
+ if (conf_info->region[n].user_priority_num ||
+ conf_info->region[n].flowtype_num) {
+ if (!((rte_is_power_of_2(rss->num)) &&
+ rss->num <= 64)) {
+ PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].user_priority[n] >=
+ I40E_MAX_USER_PRIORITY) {
+ PMD_DRV_LOG(ERR, "the user priority max index is 7");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].hw_flowtype[n] >=
+ I40E_FILTER_PCTYPE_MAX) {
+ PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
+ return -rte_errno;
+ }
+
+ if (rss_info->num < rss->num ||
+ rss_info->queue[0] < rss->queue[0] ||
+ (rss->queue[0] + rss->num >
+ rss_info->num + rss_info->queue[0])) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ if (info->region[i].queue_num == rss->num &&
+ info->region[i].queue_start_index ==
+ rss->queue[0])
+ break;
+ }
+
+ if (i == info->queue_region_number) {
+ if (i > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return -rte_errno;
+ }
+
+ info->region[i].queue_num =
+ rss->num;
+ info->region[i].queue_start_index =
+ rss->queue[0];
+ info->region[i].region_id =
+ info->queue_region_number;
+
+ j = info->region[i].user_priority_num;
+ tmp = conf_info->region[n].user_priority[0];
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] = tmp;
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ tmp = conf_info->region[n].hw_flowtype[0];
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] = tmp;
+ info->region[i].flowtype_num++;
+ }
+ info->queue_region_number++;
+ } else {
+ j = info->region[i].user_priority_num;
+ tmp = conf_info->region[n].user_priority[0];
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] = tmp;
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ tmp = conf_info->region[n].hw_flowtype[0];
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] = tmp;
+ info->region[i].flowtype_num++;
+ }
+ }
+ }
+
+ rss_config->queue_region_conf = TRUE;
+ return 0;
+ }
+
+ if (!rss || !rss->num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+ if (rss->rss_conf)
+ rss_config->rss_conf = *rss->rss_conf;
+ else
+ rss_config->rss_conf.rss_hf =
+ pf->adapter->flow_types_mask;
+
+ for (n = 0; n < rss->num; ++n)
+ rss_config->queue[n] = rss->queue[n];
+ rss_config->num = rss->num;
+ index++;
+
+ /* check if the next not void action is END */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+ rss_config->queue_region_conf = FALSE;
+
+ return 0;
+}
+
+static int
+i40e_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ union i40e_filter_t *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct i40e_queue_regions info;
+ uint8_t action_flag = 0;
+
+ memset(&info, 0, sizeof(struct i40e_queue_regions));
+
+ ret = i40e_flow_parse_rss_pattern(dev, pattern,
+ error, &action_flag, &info);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_rss_action(dev, actions, error,
+ &action_flag, &info, filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_HASH;
+
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_set(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (conf->queue_region_conf) {
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ conf->queue_region_conf = 0;
+ } else {
+ i40e_config_rss_filter(pf, conf, 1);
+ }
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_del(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ i40e_config_rss_filter(pf, conf, 0);
+ return 0;
+}
+
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -4144,6 +4456,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
memset(&cons_filter, 0, sizeof(cons_filter));
+ /* Get the non-void item of action */
+ while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ i++;
+
+ if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ ret = i40e_parse_rss_filter(dev, attr, pattern,
+ actions, &cons_filter, error);
+ return ret;
+ }
+
+ i = 0;
/* Get the non-void item number of pattern */
while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
@@ -4231,6 +4554,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
i40e_tunnel_filter_list);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_set(dev,
+ &cons_filter.rss_conf);
+ flow->rule = &pf->rss_info;
+ break;
default:
goto free_flow;
}
@@ -4269,6 +4597,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_flow_add_del_fdir_filter(dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_del(dev,
+ (struct i40e_rte_flow_rss_conf *)flow->rule);
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -4411,6 +4742,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
+ ret = i40e_flow_flush_rss_filter(dev);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush rss flows.");
+ return -rte_errno;
+ }
+
return ret;
}
@@ -4506,3 +4845,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
return ret;
}
+
+/* remove the rss filter */
+static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret = -EINVAL;
+
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ if (rss_info->num)
+ ret = i40e_config_rss_filter(pf, rss_info, FALSE);
+ return ret;
+}
--
2.9.3
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH v4] net/i40e: move RSS to flow API
2018-01-10 2:10 ` [dpdk-dev] [PATCH v4] " Wei Zhao
@ 2018-01-10 2:58 ` Zhang, Qi Z
2018-01-10 3:36 ` Zhang, Helin
2018-01-10 13:28 ` Ferruh Yigit
1 sibling, 1 reply; 17+ messages in thread
From: Zhang, Qi Z @ 2018-01-10 2:58 UTC (permalink / raw)
To: Zhao1, Wei, dev
> -----Original Message-----
> From: Zhao1, Wei
> Sent: Wednesday, January 10, 2018 10:10 AM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Zhao1, Wei <wei.zhao1@intel.com>
> Subject: [PATCH v4] net/i40e: move RSS to flow API
>
> Rte_flow actually defined to include RSS, but till now, RSS is out of rte_flow.
> This patch is to move i40e existing RSS to rte_flow.
> This patch also enable queue region configuration using flow API for i40e.
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH v4] net/i40e: move RSS to flow API
2018-01-10 2:58 ` Zhang, Qi Z
@ 2018-01-10 3:36 ` Zhang, Helin
0 siblings, 0 replies; 17+ messages in thread
From: Zhang, Helin @ 2018-01-10 3:36 UTC (permalink / raw)
To: Zhang, Qi Z, Zhao1, Wei, dev
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Zhang, Qi Z
> Sent: Wednesday, January 10, 2018 10:59 AM
> To: Zhao1, Wei; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v4] net/i40e: move RSS to flow API
>
>
>
> > -----Original Message-----
> > From: Zhao1, Wei
> > Sent: Wednesday, January 10, 2018 10:10 AM
> > To: dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Zhao1, Wei <wei.zhao1@intel.com>
> > Subject: [PATCH v4] net/i40e: move RSS to flow API
> >
> > Rte_flow actually defined to include RSS, but till now, RSS is out of rte_flow.
> > This patch is to move i40e existing RSS to rte_flow.
> > This patch also enable queue region configuration using flow API for i40e.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> >
> Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel, with minor commit log changes. Thanks!
/Helin
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH v4] net/i40e: move RSS to flow API
2018-01-10 2:10 ` [dpdk-dev] [PATCH v4] " Wei Zhao
2018-01-10 2:58 ` Zhang, Qi Z
@ 2018-01-10 13:28 ` Ferruh Yigit
2018-01-12 5:50 ` Zhao1, Wei
2018-01-12 6:23 ` Zhao1, Wei
1 sibling, 2 replies; 17+ messages in thread
From: Ferruh Yigit @ 2018-01-10 13:28 UTC (permalink / raw)
To: Wei Zhao, dev; +Cc: qi.z.zhang
On 1/10/2018 2:10 AM, Wei Zhao wrote:
> Rte_flow actually defined to include RSS,
> but till now, RSS is out of rte_flow.
> This patch is to move i40e existing RSS to rte_flow.
> This patch also enable queue region configuration
> using flow API for i40e.
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
<...>
> @@ -4269,6 +4597,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
> ret = i40e_flow_add_del_fdir_filter(dev,
> &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
> break;
> + case RTE_ETH_FILTER_HASH:
> + ret = i40e_config_rss_filter_del(dev,
> + (struct i40e_rte_flow_rss_conf *)flow->rule);
This is causing build error [1], if the fallback is intentional compiler expects
a /* Fallthrough */ comment.
[1]
.../drivers/net/i40e/i40e_flow.c:4601:7: error: this statement may fall through
[-Werror=implicit-fallthrough=]
ret = i40e_config_rss_filter_del(dev,
~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
(struct i40e_rte_flow_rss_conf *)flow->rule);
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.../drivers/net/i40e/i40e_flow.c:4603:2: note: here
default:
^~~~~~~
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH v4] net/i40e: move RSS to flow API
2018-01-10 13:28 ` Ferruh Yigit
@ 2018-01-12 5:50 ` Zhao1, Wei
2018-01-12 6:23 ` Zhao1, Wei
1 sibling, 0 replies; 17+ messages in thread
From: Zhao1, Wei @ 2018-01-12 5:50 UTC (permalink / raw)
To: Yigit, Ferruh, dev; +Cc: Zhang, Qi Z
Hi, Ferruh
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, January 10, 2018 9:29 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v4] net/i40e: move RSS to flow API
>
> On 1/10/2018 2:10 AM, Wei Zhao wrote:
> > Rte_flow actually defined to include RSS, but till now, RSS is out of
> > rte_flow.
> > This patch is to move i40e existing RSS to rte_flow.
> > This patch also enable queue region configuration using flow API for
> > i40e.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
>
> <...>
>
> > @@ -4269,6 +4597,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
> > ret = i40e_flow_add_del_fdir_filter(dev,
> > &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
> > break;
> > + case RTE_ETH_FILTER_HASH:
> > + ret = i40e_config_rss_filter_del(dev,
> > + (struct i40e_rte_flow_rss_conf *)flow->rule);
>
> This is causing build error [1], if the fallback is intentional compiler expects a
> /* Fallthrough */ comment.
>
> [1]
> .../drivers/net/i40e/i40e_flow.c:4601:7: error: this statement may fall
> through [-Werror=implicit-fallthrough=]
> ret = i40e_config_rss_filter_del(dev,
>
> ~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>
> (struct i40e_rte_flow_rss_conf *)flow->rule);
> ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> .../drivers/net/i40e/i40e_flow.c:4603:2: note: here
>
> default:
> ^~~~~~~
It seems there miss a "break" after "case:" and before " default:",
I will commit a fix patch to dpdk-next-net-intel branch today.
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [dpdk-dev] [PATCH v4] net/i40e: move RSS to flow API
2018-01-10 13:28 ` Ferruh Yigit
2018-01-12 5:50 ` Zhao1, Wei
@ 2018-01-12 6:23 ` Zhao1, Wei
1 sibling, 0 replies; 17+ messages in thread
From: Zhao1, Wei @ 2018-01-12 6:23 UTC (permalink / raw)
To: Yigit, Ferruh, dev; +Cc: Zhang, Qi Z
Hi,Ferruh
A fix patch has been commit.
https://dpdk.org/dev/patchwork/patch/33640/
> -----Original Message-----
> From: Zhao1, Wei
> Sent: Friday, January 12, 2018 1:50 PM
> To: Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v4] net/i40e: move RSS to flow API
>
> Hi, Ferruh
>
> > -----Original Message-----
> > From: Yigit, Ferruh
> > Sent: Wednesday, January 10, 2018 9:29 PM
> > To: Zhao1, Wei <wei.zhao1@intel.com>; dev@dpdk.org
> > Cc: Zhang, Qi Z <qi.z.zhang@intel.com>
> > Subject: Re: [dpdk-dev] [PATCH v4] net/i40e: move RSS to flow API
> >
> > On 1/10/2018 2:10 AM, Wei Zhao wrote:
> > > Rte_flow actually defined to include RSS, but till now, RSS is out
> > > of rte_flow.
> > > This patch is to move i40e existing RSS to rte_flow.
> > > This patch also enable queue region configuration using flow API for
> > > i40e.
> > >
> > > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> >
> > <...>
> >
> > > @@ -4269,6 +4597,9 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
> > > ret = i40e_flow_add_del_fdir_filter(dev,
> > > &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
> > > break;
> > > + case RTE_ETH_FILTER_HASH:
> > > + ret = i40e_config_rss_filter_del(dev,
> > > + (struct i40e_rte_flow_rss_conf *)flow->rule);
> >
> > This is causing build error [1], if the fallback is intentional
> > compiler expects a
> > /* Fallthrough */ comment.
> >
> > [1]
> > .../drivers/net/i40e/i40e_flow.c:4601:7: error: this statement may
> > fall through [-Werror=implicit-fallthrough=]
> > ret = i40e_config_rss_filter_del(dev,
> >
> > ~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> >
> > (struct i40e_rte_flow_rss_conf *)flow->rule);
> > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> > .../drivers/net/i40e/i40e_flow.c:4603:2: note: here
> >
> > default:
> > ^~~~~~~
>
> It seems there miss a "break" after "case:" and before " default:", I will
> commit a fix patch to dpdk-next-net-intel branch today.
^ permalink raw reply [flat|nested] 17+ messages in thread
end of thread, other threads:[~2018-01-12 6:23 UTC | newest]
Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-11-24 8:43 [dpdk-dev] [PATCH] net/i40e: move RSS to flow API Wei Zhao
2017-12-21 3:12 ` Zhang, Helin
2017-12-22 4:36 ` Zhang, Qi Z
2018-01-07 15:43 ` Zhang, Helin
2018-01-08 1:53 ` Zhao1, Wei
2018-01-08 8:30 ` Zhao1, Wei
2018-01-08 8:33 ` Zhao1, Wei
2018-01-08 8:35 ` [dpdk-dev] [PATCH v2] " Wei Zhao
2018-01-09 2:33 ` Zhang, Qi Z
2018-01-10 1:53 ` Zhao1, Wei
2018-01-09 9:18 ` [dpdk-dev] [PATCH v3] " Wei Zhao
2018-01-10 2:10 ` [dpdk-dev] [PATCH v4] " Wei Zhao
2018-01-10 2:58 ` Zhang, Qi Z
2018-01-10 3:36 ` Zhang, Helin
2018-01-10 13:28 ` Ferruh Yigit
2018-01-12 5:50 ` Zhao1, Wei
2018-01-12 6:23 ` Zhao1, Wei
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).